prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>mct.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta 4
# Copyright 2015 [email protected]
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of pelisalacarta 4.
#
# pelisalacarta 4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pelisalacarta 4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pelisalacarta 4. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------
# MCT - Mini Cliente Torrent para pelisalacarta
#------------------------------------------------------------
import os
import re
import shutil
import tempfile
import urllib
import urllib2
try:
from python_libtorrent import get_libtorrent
lt = get_libtorrent()
except Exception, e:
import libtorrent as lt
import xbmc
import xbmcgui
from core import config
from core import scrapertools
from core import filetools
def play(url, xlistitem, is_view=None, subtitle=""):
# -- Necesario para algunas webs ----------------------------
if not url.endswith(".torrent") and not url.startswith("magnet"):
t_file = scrapertools.get_header_from_response(url, header_to_get="location")
if len(t_file) > 0:
url = t_file
t_file = scrapertools.get_header_from_response(url, header_to_get="location")
if len(t_file) > 0:
url = t_file
# -- Crear dos carpetas en descargas para los archivos ------
save_path_videos = os.path.join( config.get_setting("downloadpath") , "torrent-videos" )
save_path_torrents = os.path.join( config.get_setting("downloadpath") , "torrent-torrents" )
if not os.path.exists( save_path_torrents ): os.mkdir(save_path_torrents)
# -- Usar - archivo torrent desde web, meagnet o HD ---------
if not os.path.isfile(url) and not url.startswith("magnet"):
# -- http - crear archivo torrent -----------------------
data = url_get(url)
# -- El nombre del torrent será el que contiene en los --
# -- datos. -
re_name = urllib.unquote( scrapertools.get_match(data,':name\d+:(.*?)\d+:') )
#torrent_file = os.path.join(save_path_torrents, re_name+'.torrent')
torrent_file = filetools.join(save_path_torrents, unicode(re_name, "'utf-8'", errors="replace")+'.torrent')
f = open(torrent_file,'wb')
f.write(data)
f.close()
elif os.path.isfile(url):
# -- file - para usar torrens desde el HD ---------------
torrent_file = url
else:
# -- magnet ---------------------------------------------
torrent_file = url
# -----------------------------------------------------------
# -- MCT - MiniClienteTorrent -------------------------------
ses = lt.session()
print "### Init session ########"
print lt.version
print "#########################"
ses.add_dht_router("router.bittorrent.com",6881)
ses.add_dht_router("router.utorrent.com",6881)
ses.add_dht_router("router.bitcomet.com",554)
ses.add_dht_router("dht.transmissionbt.com",6881)
trackers = [
"http://exodus.desync.com:6969/announce",
"udp://tracker.publicbt.com:80/announce",
"udp://tracker.openbittorrent.com:80/announce",
"http://tracker.torrentbay.to:6969/announce",
"http://fr33dom.h33t.com:3310/announce",
"http://tracker.pow7.com/announce",
"udp://tracker.ccc.de:80/announce",
"http://tracker.bittorrent.am:80/announce",
"http://denis.stalker.h3q.com:6969/announce",
"udp://tracker.prq.to:80/announce",
"udp://tracker.istole.it:80/announce",
"udp://open.demonii.com:1337",
"http://9.rarbg.com:2710/announce",
"http://announce.torrentsmd.com:6969/announce",
"http://bt.careland.com.cn:6969/announce",
"http://explodie.org:6969/announce",
"http://mgtracker.org:2710/announce",
"http://tracker.best-torrents.net:6969/announce",
"http://tracker.tfile.me/announce",
"http://tracker.torrenty.org:6969/announce",
"http://tracker1.wasabii.com.tw:6969/announce",
"udp://9.rarbg.com:2710/announce",
"udp://9.rarbg.me:2710/announce",
"udp://coppersurfer.tk:6969/announce",
"udp://tracker.btzoo.eu:80/announce",
"http://www.spanishtracker.com:2710/announce",
"http://www.todotorrents.com:2710/announce",
]
video_file = ""
# -- magnet2torrent -----------------------------------------
if torrent_file.startswith("magnet"):
tempdir = tempfile.mkdtemp()
params = {
'save_path': tempdir,
'trackers':trackers,
'storage_mode': lt.storage_mode_t.storage_mode_allocate,
'paused': False,
'auto_managed': True,
'duplicate_is_error': True
}
h = lt.add_magnet_uri(ses, torrent_file, params)
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
while not h.has_metadata():
message, porcent, msg_file, s, download = getProgress(h, "Creando torrent desde magnet")
dp.update(porcent, message, msg_file)
if s.state == 1: download = 1
if dp.iscanceled():
dp.close()
remove_files( download, torrent_file, video_file, ses, h )
return
dp.close()
info = h.get_torrent_info()
data = lt.bencode( lt.create_torrent(info).generate() )
#torrent_file = os.path.join(save_path_torrents, info.name() + ".torrent")
torrent_file = os.path.join(save_path_torrents, unicode(info.name(), "'utf-8'", errors="replace") + ".torrent")
f = open(torrent_file,'wb')
f.write(data)
f.close()
ses.remove_torrent(h)
shutil.rmtree(tempdir)
# -----------------------------------------------------------
# -- Archivos torrent ---------------------------------------
e = lt.bdecode(open(torrent_file, 'rb').read())
info = lt.torrent_info(e)
# -- El más gordo o uno de los más gordo se entiende que es -
# -- el vídeo o es el vídeo que se usará como referencia -
# -- para el tipo de archivo -
print "##### Archivos ## %s ##" % len(info.files())
_index_file, _video_file, _size_file = get_video_file(info)
_video_file_ext = os.path.splitext( _video_file )[1]
if _video_file_ext == ".avi" or _video_file_ext == ".mp4":
print "##### storage_mode_t.storage_mode_allocate ("+_video_file_ext+") #####"
h = ses.add_torrent( { 'ti':info, 'save_path': save_path_videos, 'trackers':trackers, 'storage_mode':lt.storage_mode_t.storage_mode_allocate } )
else:
print "##### storage_mode: none ("+_video_file_ext+") #####"
h = ses.add_torrent( { 'ti':info, 'save_path': save_path_videos, 'trackers':trackers, 'storage_mode':lt.storage_mode_t.storage_mode_sparse } )
# -----------------------------------------------------------
# -- Descarga secuencial - trozo 1, trozo 2, ... ------------
h.set_sequential_download(True)
h.force_reannounce()
h.force_dht_announce()
# -- Prioritarizar/Seleccionar archivo-----------------------
_index, video_file, video_size = get_video_files_sizes( info )
if _index == -1:
_index = _index_file
video_file = _video_file
video_size = _size_file
# -- Inicio de variables para 'pause' automático cuando el -
# -- el vídeo se acerca a una pieza sin completar -
is_greater_num_pieces = False
is_greater_num_pieces_plus = False
is_greater_num_pieces_pause = False
#porcent4first_pieces = int( video_size / 1073741824 )
porcent4first_pieces = int( video_size * 0.000000005 )
if porcent4first_pieces < 10: porcent4first_pieces = 10
if porcent4first_pieces > 100: porcent4first_pieces = 100
#num_pieces_to_resume = int( video_size / 1610612736 )
num_pieces_to_resume = int( video_size * 0.0000000025 )
if num_pieces_to_resume < 5: num_pieces_to_resume = 5
if num_pieces_to_resume > 25: num_pieces_to_resume = 25
print "##### porcent4first_pieces ## %s ##" % porcent4first_pieces
print "##### num_pieces_to_resume ## %s ##" % num_pieces_to_resume
# -- Prioritarizar o seleccionar las piezas del archivo que -
# -- se desea reproducir con 'file_priorities' -
piece_set = set_priority_pieces(h, _index, video_file, video_size)
# -- Crear diálogo de progreso para el primer bucle ---------<|fim▁hole|> _pieces_info = {}
# -- Doble bucle anidado ------------------------------------
# -- Descarga - Primer bucle -
while not h.is_seed():
s = h.status()
xbmc.sleep(100)
# -- Recuperar los datos del progreso -------------------
message, porcent, msg_file, s, download = getProgress(h, video_file, _pf=_pieces_info)
# -- Si hace 'checking' existe descarga -----------------
# -- 'download' Se usará para saber si hay datos -
# -- descargados para el diálogo de 'remove_files' -
if s.state == 1: download = 1
# -- Player - play --------------------------------------
# -- Comprobar si se han completado las piezas para el -
# -- inicio del vídeo ............... -
first_pieces = True
_p = ""
_c = 0
for i in range( piece_set[0], piece_set[porcent4first_pieces] ):
_p+= "[%s:%s]" % ( i, h.have_piece(i) )
first_pieces&= h.have_piece(i)
if h.have_piece(i): _c+= 1
_pieces_info = {'current': 0, 'continuous': "%s/%s" % (_c,porcent4first_pieces), 'have': h.status().num_pieces, 'len': len(piece_set)}
_p = "##### first_pieces [%s/%s][%s]: " % ( _c, porcent4first_pieces, len(piece_set) ) + _p
print _p
# -- -------------------------------------------------- -
if is_view != "Ok" and first_pieces:
print "##### porcent [%.2f%%]" % (s.progress * 100)
is_view = "Ok"
dp.close()
# -- Player - Ver el vídeo --------------------------
playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO )
playlist.clear()
#ren_video_file = os.path.join( save_path_videos, video_file ).replace('\\','\\\\')
ren_video_file = os.path.join( save_path_videos, video_file )
playlist.add( ren_video_file, xlistitem )
#playlist.add( os.path.join( save_path_videos, video_file ), xlistitem )
#playlist.add( "http://192.168.0.200/mctplay/" + video_file.replace(' ','%20'), xlistitem )
player = play_video( xbmc.PLAYER_CORE_AUTO )
player.play(playlist)
'''
# -- Player - Ver el vídeo --------------------------
player = play_video()
#player.play( os.path.join( save_path_videos, video_file ) )
player.play( "http://192.168.0.200/mctplay/" + video_file.replace(' ','%20') )
'''
#player.play( os.path.join( save_path_videos, video_file ) )
# -- Contador de cancelaciones para la ventana de -
# -- 'pause' automático -
is_greater_num_pieces_canceled = 0
continuous_pieces = 0
porcent_time = 0.00
current_piece = 0
# -- Impedir que kodi haga 'resume' a un archivo ----
# -- que se reprodució con anterioridad y que se -
# -- eliminó para impedir que intente la reprucción -
# -- en una pieza que aún no se ha completado y se -
# -- active 'pause' automático -
not_resume = True
# -- Bandera subTítulos
_sub = False
# -- Segundo bucle - Player - Control de eventos ----
while player.isPlaying():
xbmc.sleep(100)
# -- Añadir subTítulos
if subtitle!="" and not _sub:
_sub = True
player.setSubtitles(subtitle)
# -- Impedir que kodi haga 'resume' al inicio ---
# -- de la descarga de un archivo conocido -
if not_resume:
player.seekTime(0)
not_resume = False
#xbmc.sleep(1000)
# -- Control 'pause' automático -
continuous_pieces = count_completed_continuous_pieces(h, piece_set)
if xbmc.Player().isPlaying():
# -- Porcentage del progreso del vídeo ------
porcent_time = player.getTime() / player.getTotalTime() * 100
# -- Pieza que se está reproduciendo --------
current_piece = int( porcent_time / 100 * len(piece_set) )
# -- Banderas de control --------------------
is_greater_num_pieces = (current_piece > continuous_pieces - num_pieces_to_resume)
is_greater_num_pieces_plus = (current_piece + porcent4first_pieces > continuous_pieces)
is_greater_num_pieces_finished = (current_piece + porcent4first_pieces >= len(piece_set))
# -- Activa 'pause' automático --------------
if is_greater_num_pieces and not player.paused and not is_greater_num_pieces_finished:
is_greater_num_pieces_pause = True
player.pause()
# -- Log ------------------------------------
_TotalTime = player.getTotalTime()
_Time = player.getTime()
_print_log = "\n##### Player ##################################"
_print_log+= "\nTamaño del vídeo: %s" % video_size
_print_log+= "\nTotal piezas: %s" % len(piece_set)
_print_log+= "\nPiezas contiguas: %s" % continuous_pieces
_print_log+= "\n-----------------------------------------------"
_print_log+= "\nVídeo-Total segundos: %s" % _TotalTime
_print_log+= "\nVídeo-Progreso segundos: %s" % _Time
_print_log+= "\nVídeo-Progreso porcentaje: %.2f%%" % porcent_time
_print_log+= "\n-----------------------------------------------"
_print_log+= "\ncurrent_piece: %s" % current_piece
_print_log+= "\nis_greater_num_pieces: %s" % is_greater_num_pieces
_print_log+= "\nis_greater_num_pieces_plus: %s" % is_greater_num_pieces_plus
_print_log+= "\nis_greater_num_pieces_pause: %s" % is_greater_num_pieces_pause
_print_log+= "\nis_greater_num_pieces_finished: %s" % is_greater_num_pieces_finished
_print_log+= "\nPieza que se está visionando: %.2f" % ( porcent_time / 100 * len(piece_set) )
_print_log+= "\nOffset que se está visionando: %.2f" % ( porcent_time / 100 * video_size )
if is_greater_num_pieces and not player.paused and not is_greater_num_pieces_finished:
_print_log+= "\n+++++++++++++++++++++++++++++++++++++++++++++++"
_print_log+= "\nPausa con:"
_print_log+= "\n current_piece = %s" % current_piece
_print_log+= "\n continuous_pieces = %s" % continuous_pieces
_print_log+= "\n###############################################"
print _print_log
# -------------------------------------------
_pieces_info = {'current': current_piece, 'continuous': continuous_pieces, 'have': h.status().num_pieces, 'len': len(piece_set)}
# -- Cerrar el diálogo de progreso --------------
if player.resumed:
dp.close()
# -- Mostrar el diálogo de progreso -------------
if player.paused:
# -- Crear diálogo si no existe -------------
if not player.statusDialogoProgress:
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
player.setDialogoProgress()
# -- Diálogos de estado en el visionado -----
if not h.is_seed():
# -- Recuperar los datos del progreso ---
message, porcent, msg_file, s, download = getProgress(h, video_file, _pf=_pieces_info)
dp.update(porcent, message, msg_file)
else:
dp.update(100, "Descarga completa: " + video_file)
# -- Se canceló el progreso en el visionado -
# -- Continuar -
if dp.iscanceled():
dp.close()
player.pause()
# -- Se canceló el progreso en el visionado -
# -- en la ventana de 'pause' automático. -
# -- Parar si el contador llega a 3 -
if dp.iscanceled() and is_greater_num_pieces_pause:
is_greater_num_pieces_canceled+= 1
if is_greater_num_pieces_canceled == 3:
player.stop()
# -- Desactiva 'pause' automático y ---------
# -- reinicia el contador de cancelaciones -
if not dp.iscanceled() and not is_greater_num_pieces_plus and is_greater_num_pieces_pause:
dp.close()
player.pause()
is_greater_num_pieces_pause = False
is_greater_num_pieces_canceled = 0
# -- El usuario cancelo el visionado --------
# -- Terminar -
if player.ended:
# -- Diálogo eliminar archivos ----------
remove_files( download, torrent_file, video_file, ses, h )
return
# -- Kodi - Se cerró el visionado -----------------------
# -- Continuar | Terminar -
if is_view == "Ok" and not xbmc.Player().isPlaying():
if info.num_files() == 1:
# -- Diálogo continuar o terminar ---------------
d = xbmcgui.Dialog()
ok = d.yesno('pelisalacarta-MCT', 'XBMC-Kodi Cerró el vídeo.', '¿Continuar con la sesión?')
else: ok = False
# -- SI ---------------------------------------------
if ok:
# -- Continuar: ---------------------------------
is_view=None
else:
# -- Terminar: ----------------------------------
# -- Comprobar si el vídeo pertenece a una ------
# -- lista de archivos -
_index, video_file, video_size = get_video_files_sizes( info )
if _index == -1 or info.num_files() == 1:
# -- Diálogo eliminar archivos --------------
remove_files( download, torrent_file, video_file, ses, h )
return
else:
# -- Lista de archivos. Diálogo de opciones -
piece_set = set_priority_pieces(h, _index, video_file, video_size)
is_view=None
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
# -- Mostar progeso antes del visionado -----------------
if is_view != "Ok" :
dp.update(porcent, message, msg_file)
# -- Se canceló el progreso antes del visionado ---------
# -- Terminar -
if dp.iscanceled():
dp.close()
# -- Comprobar si el vídeo pertenece a una lista de -
# -- archivos -
_index, video_file, video_size = get_video_files_sizes( info )
if _index == -1 or info.num_files() == 1:
# -- Diálogo eliminar archivos ------------------
remove_files( download, torrent_file, video_file, ses, h )
return
else:
# -- Lista de archivos. Diálogo de opciones -----
piece_set = set_priority_pieces(h, _index, video_file, video_size)
is_view=None
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
# -- Kodi - Error? - No debería llegar aquí -----------------
if is_view == "Ok" and not xbmc.Player().isPlaying():
dp.close()
# -- Diálogo eliminar archivos --------------------------
remove_files( download, torrent_file, video_file, ses, h )
return
# -- Progreso de la descarga ------------------------------------
def getProgress(h, video_file, _pf={}):
if len(_pf) > 0:
#_pf_msg = "[%s] [%s] [%s] [%s][CR]" % (_pf['current'], _pf['continuous'], _pf['have'], _pf['len'])
_pf_msg = "[%s] [%s] [%s] [%s]" % (_pf['current'], _pf['continuous'], _pf['have'], _pf['len'])
else: _pf_msg = ""
s = h.status()
state_str = ['queued', 'checking', 'downloading metadata', \
'downloading', 'finished', 'seeding', 'allocating', 'checking fastresume']
message = '%.2f%% d:%.1f kb/s u:%.1f kb/s p:%d s:%d %s' % \
(s.progress * 100, s.download_rate / 1000, s.upload_rate / 1000, \
s.num_peers, s.num_seeds, state_str[s.state])
porcent = int( s.progress * 100 )
download = ( s.progress * 100 )
if "/" in video_file: video_file = video_file.split("/")[1]
#msg_file = "..../"+video_file + " - %.2f MB" % (s.total_wanted/1048576.0)
#msg_file = video_file + " - %.2f MB" % (s.total_wanted/1048576.0)
msg_file = video_file
#msg_file = "[%s] "%len(msg_file)+_pf_msg+msg_file
if len(msg_file) > 50:
msg_file = msg_file.replace( video_file, os.path.splitext(video_file)[0][:40] + "... " + os.path.splitext(video_file)[1] )
msg_file = msg_file + "[CR]" + "%.2f MB" % (s.total_wanted/1048576.0) + " - " + _pf_msg
return (message, porcent, msg_file, s, download)
# -- Clase play_video - Controlar eventos -----------------------
class play_video(xbmc.Player):
def __init__( self, *args, **kwargs ):
self.paused = False
self.resumed = True
self.statusDialogoProgress = False
self.ended = False
def onPlayBackPaused(self):
self.paused = True
self.resumed = False
def onPlayBackResumed(self):
self.paused = False
self.resumed = True
self.statusDialogoProgress = False
def is_paused(self):
return self.paused
def setDialogoProgress(self):
self.statusDialogoProgress = True
def is_started(self):
self.ended = False
def is_ended(self):
self.ended = True
# -- Conseguir el nombre un alchivo de vídeo del metadata -------
# -- El más gordo o uno de los más gordo se entiende que es el -
# -- vídeo o es vídeo que se usará como referencia para el tipo -
# -- de archivo -
def get_video_file( info ):
size_file = 0
for i, f in enumerate(info.files()):
if f.size > size_file:
video_file = f.path.replace("\\","/")
size_file = f.size
index_file = i
return index_file, video_file, size_file
# -- Listado de selección del vídeo a prioritarizar -------------
def get_video_files_sizes( info ):
opciones = []
vfile_name = {}
vfile_size = {}
for i, f in enumerate( info.files() ):
#_title = f.path
#try: _title = f.path.encode('iso-8859-1')
#except: _title = f.path.decode('utf-8')
#_title = f.path.encode('iso-8859-1')
_title = unicode(f.path, "iso-8859-1", errors="replace")
_title = unicode(f.path, "'utf-8'", errors="replace")
_title = re.sub(r'(.*? )- Temporada (\d+) Completa(.*?)',
r'\1T\2\3',
_title)
_title = re.sub(r'\s\([^\)]+\)|\s\-',
'',
_title)
info.rename_file( i, _title )
for i, f in enumerate( info.files() ):
_index = int(i)
_title = f.path.replace("\\","/")
_size = f.size
_offset = f.offset
_file_name = os.path.splitext( _title )[0]
if "/" in _file_name: _file_name = _file_name.split('/')[1]
_file_ext = os.path.splitext( _title )[1]
_caption = str(i) + \
" - " + \
_file_name + _file_ext + \
" - %.2f MB" % (_size / 1048576.0)
vfile_name[i] = _title
vfile_size[i] = _size
opciones.append(_caption)
if len(opciones) > 1:
d = xbmcgui.Dialog()
seleccion = d.select("pelisalacarta-MCT: Lista de vídeos", opciones)
else: seleccion = 0
if seleccion == -1:
vfile_name[seleccion] = ""
vfile_size[seleccion] = 0
return seleccion, vfile_name[seleccion], vfile_size[seleccion]
# -- Preguntar si se desea borrar lo descargado -----------------
def remove_files( download, torrent_file, video_file, ses, h ):
dialog_view = False
torrent = False
if os.path.isfile( torrent_file ):
dialog_view = True
torrent = True
if download > 0:
dialog_view = True
if "/" in video_file: video_file = video_file.split("/")[0]
if dialog_view:
d = xbmcgui.Dialog()
ok = d.yesno('pelisalacarta-MCT', 'Borrar las descargas del video', video_file)
# -- SI -------------------------------------------------
if ok:
# -- Borrar archivo - torrent -----------------------
if torrent:
os.remove( torrent_file )
# -- Borrar carpeta/archivos y sesión - vídeo -------
ses.remove_torrent( h, 1 )
print "### End session #########"
else:
# -- Borrar sesión ----------------------------------
ses.remove_torrent( h )
print "### End session #########"
else:
# -- Borrar sesión --------------------------------------
ses.remove_torrent( h )
print "### End session #########"
return
# -- Descargar de la web los datos para crear el torrent --------
# -- Si queremos aligerar el script mct.py se puede importar la -
# -- función del conentor torrent.py -
def url_get(url, params={}, headers={}):
from contextlib import closing
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:20.0) Gecko/20100101 Firefox/20.0"
if params:
import urllib
url = "%s?%s" % (url, urllib.urlencode(params))
req = urllib2.Request(url)
req.add_header("User-Agent", USER_AGENT)
for k, v in headers.items():
req.add_header(k, v)
try:
with closing(urllib2.urlopen(req)) as response:
data = response.read()
if response.headers.get("Content-Encoding", "") == "gzip":
import zlib
return zlib.decompressobj(16 + zlib.MAX_WBITS).decompress(data)
return data
except urllib2.HTTPError:
return None
# -- Procedimiento para log de have_piece en las pruebas --------
def print_have_piece_set(h, piece_set):
c = 0
_print = "\n"
for i, _set in enumerate(piece_set):
if h.have_piece(_set): _print+= "[%s]" % str(_set).zfill(5)
else: _print+= "[XXXXX]"
c+= 1
if c == 20:
c = 0
_print+= "\n"
print _print
# -- Contar las piezas contiguas completas del vídeo ------------
def count_completed_continuous_pieces(h, piece_set):
not_zero = 0
for i, _set in enumerate(piece_set):
if not h.have_piece(_set): break
else: not_zero = 1
return i + not_zero
# -- Prioritarizar o seleccionar las piezas del archivo que se -
# -- desea reproducir con 'file_priorities' estableciendo a 1 -
# -- el archivo deseado y a 0 el resto de archivos almacenando -
# -- en una lista los índices de de las piezas del archivo -
def set_priority_pieces(h, _index, video_file, video_size):
for i, _set in enumerate(h.file_priorities()):
if i != _index: h.file_priority(i,0)
else: h.file_priority(i,1)
piece_set = []
for i, _set in enumerate(h.piece_priorities()):
if _set == 1: piece_set.append(i)
return piece_set<|fim▁end|> | dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
|
<|file_name|>api_db_spec.js<|end_file_name|><|fim▁begin|>/*globals describe, before, beforeEach, afterEach, it */
/*jshint expr:true*/
var testUtils = require('../../utils'),
should = require('should'),
// Stuff we are testing
dbAPI = require('../../../server/api/db'),
ModelTag = require('../../../server/models/tag'),
ModelPost = require('../../../server/models/post');
describe('DB API', function () {
// Keep the DB clean
before(testUtils.teardown);
afterEach(testUtils.teardown);
beforeEach(testUtils.setup('users:roles', 'posts', 'perms:db', 'perms:init'));
should.exist(dbAPI);
it('delete all content (owner)', function (done) {
return dbAPI.deleteAllContent(testUtils.context.owner).then(function (result) {
should.exist(result.db);
result.db.should.be.instanceof(Array);
result.db.should.be.empty;
}).then(function () {
return ModelTag.Tag.findAll(testUtils.context.owner).then(function (results) {
should.exist(results);
results.length.should.equal(0);
});
}).then(function () {
return ModelPost.Post.findAll(testUtils.context.owner).then(function (results) {
should.exist(results);
results.length.should.equal(0);
done();
});
}).catch(done);
});
it('delete all content (admin)', function (done) {
return dbAPI.deleteAllContent(testUtils.context.admin).then(function (result) {
should.exist(result.db);
result.db.should.be.instanceof(Array);
result.db.should.be.empty;
}).then(function () {
return ModelTag.Tag.findAll(testUtils.context.admin).then(function (results) {
should.exist(results);
results.length.should.equal(0);
});
}).then(function () {
return ModelPost.Post.findAll(testUtils.context.admin).then(function (results) {
should.exist(results);
results.length.should.equal(0);
done();
});
}).catch(done);
});
it('delete all content is denied (editor & author)', function (done) {
return dbAPI.deleteAllContent(testUtils.context.editor).then(function () {
done(new Error('Delete all content is not denied for editor.'));
}, function (error) {
error.type.should.eql('NoPermissionError');
return dbAPI.deleteAllContent(testUtils.context.author);
}).then(function () {
done(new Error('Delete all content is not denied for author.'));
}, function (error) {
error.type.should.eql('NoPermissionError');
return dbAPI.deleteAllContent();
}).then(function () {
done(new Error('Delete all content is not denied without authentication.'));
}).catch(function (error) {
error.type.should.eql('NoPermissionError');
done();
}).catch(done);
});
it('export content is denied (editor & author)', function (done) {
return dbAPI.exportContent(testUtils.context.editor).then(function () {
done(new Error('Export content is not denied for editor.'));
}, function (error) {
error.type.should.eql('NoPermissionError');
return dbAPI.exportContent(testUtils.context.author);
}).then(function () {
done(new Error('Export content is not denied for author.'));
}, function (error) {
error.type.should.eql('NoPermissionError');<|fim▁hole|> }).catch(function (error) {
error.type.should.eql('NoPermissionError');
done();
}).catch(done);
});
it('import content is denied (editor & author)', function (done) {
return dbAPI.importContent(testUtils.context.editor).then(function () {
done(new Error('Import content is not denied for editor.'));
}, function (error) {
error.type.should.eql('NoPermissionError');
return dbAPI.importContent(testUtils.context.author);
}).then(function () {
done(new Error('Import content is not denied for author.'));
}, function (error) {
error.type.should.eql('NoPermissionError');
return dbAPI.importContent();
}).then(function () {
done(new Error('Import content is not denied without authentication.'));
}).catch(function (error) {
error.type.should.eql('NoPermissionError');
done();
}).catch(done);
});
});<|fim▁end|> | return dbAPI.exportContent();
}).then(function () {
done(new Error('Export content is not denied without authentication.')); |
<|file_name|>SpeakPythonMakeDB.py<|end_file_name|><|fim▁begin|>import antlr3;
import sqlite3;
import pickle;
import sys, os;
import re;
from SpeakPython.SpeakPython import SpeakPython;
from SpeakPython.SpeakPythonLexer import SpeakPythonLexer;
from SpeakPython.SpeakPythonParser import SpeakPythonParser;
#sort results based on length of labels
def sortResults(results):
l = len(results);
if l == 1 or l == 0:
return results;
s1 = sortResults(results[:l/2]);
s2 = sortResults(results[l/2:]);
res = [];
si1 = 0;
si2 = 0;
sl1 = len(s1);
sl2 = len(s2);
max = sl1 + sl2;
for i in range(0, max):
if si1 == sl1:
res.extend(s2[si2:]);
break;
if si2 == sl2:
res.extend(s1[si1:]);
break;
if len(s1[si1].labels) > len(s2[si2].labels):
res.append( s1[si1] );
si1 += 1;
else:
res.append( s2[si2] );
si2 += 1;
return res;
def makeDB(conn):
c = conn.cursor();
try:
c.execute("DROP TABLE matches");
c.execute("DROP TABLE functions");
c.execute("DROP TABLE kleene")
conn.commit();
except Exception as e:
conn.rollback();
c.execute("CREATE TABLE matches (order_id INTEGER PRIMARY KEY, keywords TEXT, regex TEXT, results BLOB)");
c.execute("CREATE TABLE functions (name TEXT, regex TEXT, results BLOB)");
c.execute("CREATE TABLE kleene (id TEXT PRIMARY KEY, regexes BLOB)");
#index the keywords to speed up text search
c.execute("CREATE INDEX IF NOT EXISTS keyword_idx ON matches (keywords)");
c.execute("CREATE INDEX IF NOT EXISTS func_name_idx ON functions (name)");
conn.commit();
def performTestCases(exp, testCases):
print "Testing: ", exp
for t in testCases:
m = re.match(exp, t);
if m == None:
print "Test case failed: ", t;
return False;
return True;
def insertIntoDB(conn, matches, functions):
matchEntries = [];
kleeneEntries = [];
funcEntries = [];
print "Running test cases for matches...";
idCount = 0;
for m in matches:
#perform in-suite test cases
succeededTests = performTestCases(m.exp, m.testCases);
if not succeededTests:
return;
k = ','.join(m.keywords);
m.results = sortResults(m.results);
if len(m.kGroupRegexes) > 0:
kleeneEntries.append((str(idCount), pickle.dumps(m.kGroupRegexes)));
matchEntries.append((idCount, k, m.exp, pickle.dumps(m.results)));
idCount += 1;
print "All match test cases passed.";
c = conn.cursor();
c.executemany("INSERT INTO matches VALUES (?,?,?,?)", matchEntries);
conn.commit();
print "Running test cases for functions...";
for f in functions:
f = functions[f];
#perform in-suite test cases
succeededTests = performTestCases(f, f.testCases);
if not succeededTests:
return;
#save all regex groups in database under function name
if len(f.kGroupRegexes) > 0:
kleeneEntries.append((f.getName(), pickle.dumps(f.kGroupRegexes)));
f.results = sortResults(f.results);
funcEntries.append((f.getName(), f.getExp(), pickle.dumps(f.getResults())));
print "All function test cases passed";
c.executemany("INSERT INTO functions VALUES (?,?,?)", funcEntries);
c.executemany("INSERT INTO kleene VALUES (?,?)", kleeneEntries);
conn.commit();
print "Functions:";
for row in c.execute("SELECT * FROM functions"):
print row, '\n';
print "\n";
print "Matches:";
for row in c.execute("SELECT * FROM matches"):
print row, '\n';
print "\n";
print "Kleene:";
for row in c.execute("SELECT * FROM kleene"):
print row, '\n';
print "\n";
conn.close();
def parse(conn, fileList, dirName):
parser = None;
otherGlobalTests = {};
for f in fileList:
#join filename with current directory path
fileName = os.path.join(dirName, f);
#if f is a file, parse and insert into db
if os.path.isfile(fileName):
char_stream = antlr3.ANTLRFileStream(fileName);
lexer = SpeakPythonLexer(char_stream);
tokens = antlr3.CommonTokenStream(lexer);
# for t in lexer:
# print t;
parser = SpeakPythonParser(tokens);
parser.prog();
insertIntoDB(conn, parser.matches, parser.aliases);
#if f is a dir, pass list of files into recursive call
if os.path.isdir(fileName):
subFiles = os.listdir(fileName);
otherGlobalTests = parse(conn, subFiles, fileName);
globalTests = {};
if parser == None:
print "Parser not defined."
else:
globalTests = parser.globalTests;
globalTests.update(otherGlobalTests);<|fim▁hole|> return globalTests;
def main(argv):
name = argv[1] + '.db';
conn = sqlite3.connect(name);
makeDB(conn);
globalTests = parse(conn, [argv[2]], '');
for gt in globalTests:
sp = SpeakPython(name);
r = sp.matchResult(gt);
resultStr = '';
if r != None:
resultStr = r.getResult();
if resultStr != globalTests[gt]:
print "Value test case failed: (" + gt + ") does not return (" + globalTests[gt] + "), but instead returns (" + resultStr + ")";
main(sys.argv);<|fim▁end|> | |
<|file_name|>http.py<|end_file_name|><|fim▁begin|># This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from future.utils import iteritems
import abc
import copy
from twisted.internet import defer
from twisted.python import log
from buildbot import config
from buildbot.reporters import utils
from buildbot.util import httpclientservice
from buildbot.util import service
class HttpStatusPushBase(service.BuildbotService):
neededDetails = dict()
def checkConfig(self, *args, **kwargs):
service.BuildbotService.checkConfig(self)
httpclientservice.HTTPClientService.checkAvailable(self.__class__.__name__)
if not isinstance(kwargs.get('builders'), (type(None), list)):
config.error("builders must be a list or None")
@defer.inlineCallbacks
def reconfigService(self, builders=None, debug=None, verify=None, **kwargs):
yield service.BuildbotService.reconfigService(self)
self.debug = debug
self.verify = verify
self.builders = builders
self.neededDetails = copy.copy(self.neededDetails)
for k, v in iteritems(kwargs):
if k.startswith("want"):
self.neededDetails[k] = v
@defer.inlineCallbacks
def startService(self):
yield service.BuildbotService.startService(self)
startConsuming = self.master.mq.startConsuming
self._buildCompleteConsumer = yield startConsuming(
self.buildFinished,
('builds', None, 'finished'))
self._buildStartedConsumer = yield startConsuming(
self.buildStarted,
('builds', None, 'new'))
def stopService(self):
self._buildCompleteConsumer.stopConsuming()
self._buildStartedConsumer.stopConsuming()
def buildStarted(self, key, build):
return self.getMoreInfoAndSend(build)
def buildFinished(self, key, build):
return self.getMoreInfoAndSend(build)
def filterBuilds(self, build):
if self.builders is not None:
return build['builder']['name'] in self.builders
return True
@defer.inlineCallbacks
def getMoreInfoAndSend(self, build):
yield utils.getDetailsForBuild(self.master, build, **self.neededDetails)
if self.filterBuilds(build):
yield self.send(build)
@abc.abstractmethod
def send(self, build):
pass
class HttpStatusPush(HttpStatusPushBase):
name = "HttpStatusPush"
secrets = ['user', 'password', "auth"]
def checkConfig(self, serverUrl, user=None, password=None, auth=None, format_fn=None, **kwargs):
if user is not None and auth is not None:
config.error("Only one of user/password or auth must be given")
if user is not None:
config.warnDeprecated("0.9.1", "user/password is deprecated, use 'auth=(user, password)'")
if (format_fn is not None) and not callable(format_fn):
config.error("format_fn must be a function")<|fim▁hole|> HttpStatusPushBase.checkConfig(self, **kwargs)
@defer.inlineCallbacks
def reconfigService(self, serverUrl, user=None, password=None, auth=None, format_fn=None, **kwargs):
yield HttpStatusPushBase.reconfigService(self, **kwargs)
if user is not None:
auth = (user, password)
if format_fn is None:
self.format_fn = lambda x: x
else:
self.format_fn = format_fn
self._http = yield httpclientservice.HTTPClientService.getService(
self.master, serverUrl, auth=auth)
@defer.inlineCallbacks
def send(self, build):
response = yield self._http.post("", json=self.format_fn(build))
if response.code != 200:
log.msg("%s: unable to upload status: %s" %
(response.code, response.content))<|fim▁end|> | |
<|file_name|>QYBatchJobType.java<|end_file_name|><|fim▁begin|>package com.swifts.frame.modules.wx.fastweixin.company.message.req;<|fim▁hole|> * 微信企业号异步任务类型
* ====================================================================
*
* --------------------------------------------------------------------
* @author Nottyjay
* @version 1.0.beta
* @since 1.3.6
* ====================================================================
*/
public final class QYBatchJobType {
private String SYNCUSER = "sync_user";// 增量更新成员
private String REPLACEUSER = "replace_user";// 全量覆盖成员
private String INVITEUSER = "invite_user";// 邀请成员关注
private String REPLACEPARTY = "replace_party";// 全量覆盖部门
private QYBatchJobType() {
}
}<|fim▁end|> | /** |
<|file_name|>app.js<|end_file_name|><|fim▁begin|>/**
* Module dependencies.
*/
var express = require('express');
var http = require('http');
var path = require('path');
var handlebars = require('express3-handlebars');
var index = require('./routes/index');
var project = require('./routes/project');
var palette = require('./routes/palette');
// Example route
// var user = require('./routes/user');
var app = express();
// all environments
app.set('port', process.env.PORT || 3000);
app.set('views', path.join(__dirname, 'views'));
app.engine('handlebars', handlebars());
app.set('view engine', 'handlebars');
app.use(express.favicon());
app.use(express.logger('dev'));
app.use(express.json());
app.use(express.urlencoded());
app.use(express.methodOverride());
app.use(express.cookieParser('Intro HCI secret key'));
app.use(express.session());
app.use(app.router);
app.use(express.static(path.join(__dirname, 'public')));
// development only
if ('development' == app.get('env')) {
app.use(express.errorHandler());
}
// Add routes here
app.get('/', index.view);<|fim▁hole|>app.get('/project/:id', project.projectInfo);
app.get('/palette', palette.randomPalette);
// Example route
// app.get('/users', user.list);
http.createServer(app).listen(app.get('port'), function(){
console.log('Express server listening on port ' + app.get('port'));
});<|fim▁end|> | |
<|file_name|>message.go<|end_file_name|><|fim▁begin|>package payload
type Message struct {<|fim▁hole|> Id string
FromUser *User
Text string
Payload interface{}
SourceAdapter string
}<|fim▁end|> | |
<|file_name|>NavBarSkipLink.tsx<|end_file_name|><|fim▁begin|>import React from "react"
import styled from "styled-components"<|fim▁hole|>import { Text, color, space } from "@artsy/palette"
export const NavBarSkipLink: React.FC = () => {
return (
<Container href="#main">
<Text variant="text">Skip to Main Content</Text>
</Container>
)
}
NavBarSkipLink.displayName = "NavBarSkipLink"
const Container = styled.a`
display: block;
position: absolute;
top: -100%;
left: 0;
padding: ${space(1)}px;
color: ${color("black100")};
background-color: ${color("black10")};
&:focus {
position: relative;
top: 0;
}
`<|fim▁end|> | |
<|file_name|>test_models.py<|end_file_name|><|fim▁begin|>from django.utils import translation
from nose.tools import eq_
from olympia import amo
from olympia.amo.tests import TestCase, ESTestCase
from olympia.addons.models import Addon
from olympia.reviews import tasks
from olympia.reviews.models import (
check_spam, GroupedRating, Review, ReviewFlag, Spam)
from olympia.users.models import UserProfile
class TestReviewModel(TestCase):
fixtures = ['reviews/test_models']
def test_translations(self):
translation.activate('en-US')
# There's en-US and de translations. We should get en-US.
r1 = Review.objects.get(id=1)<|fim▁hole|> # There's only a de translation, so we get that.
r2 = Review.objects.get(id=2)
self.trans_eq(r2.title, 'r2 title de', 'de')
translation.activate('de')
# en and de exist, we get de.
r1 = Review.objects.get(id=1)
self.trans_eq(r1.title, 'r1 title de', 'de')
# There's only a de translation, so we get that.
r2 = Review.objects.get(id=2)
self.trans_eq(r2.title, 'r2 title de', 'de')
def test_soft_delete(self):
eq_(Review.objects.count(), 2)
eq_(Review.unfiltered.count(), 2)
Review.objects.get(id=1).delete()
eq_(Review.objects.count(), 1)
eq_(Review.unfiltered.count(), 2)
Review.objects.filter(id=2).delete()
eq_(Review.objects.count(), 0)
eq_(Review.unfiltered.count(), 2)
def test_filter_for_many_to_many(self):
# Check https://bugzilla.mozilla.org/show_bug.cgi?id=1142035.
review = Review.objects.get(id=1)
addon = review.addon
assert review in addon._reviews.all()
# Delete the review: it shouldn't be listed anymore.
review.update(deleted=True)
addon = Addon.objects.get(pk=addon.pk)
assert review not in addon._reviews.all()
def test_no_filter_for_relations(self):
# Check https://bugzilla.mozilla.org/show_bug.cgi?id=1142035.
review = Review.objects.get(id=1)
flag = ReviewFlag.objects.create(review=review,
flag='review_flag_reason_spam')
assert flag.review == review
# Delete the review: reviewflag.review should still work.
review.update(deleted=True)
flag = ReviewFlag.objects.get(pk=flag.pk)
assert flag.review == review
class TestGroupedRating(TestCase):
fixtures = ['reviews/dev-reply']
grouped_ratings = [(1, 0), (2, 0), (3, 0), (4, 1), (5, 0)]
def test_get_none(self):
eq_(GroupedRating.get(3, update_none=False), None)
def test_set(self):
eq_(GroupedRating.get(1865, update_none=False), None)
GroupedRating.set(1865)
eq_(GroupedRating.get(1865, update_none=False), self.grouped_ratings)
def test_cron(self):
eq_(GroupedRating.get(1865, update_none=False), None)
tasks.addon_grouped_rating(1865)
eq_(GroupedRating.get(1865, update_none=False), self.grouped_ratings)
def test_update_none(self):
eq_(GroupedRating.get(1865, update_none=False), None)
eq_(GroupedRating.get(1865, update_none=True), self.grouped_ratings)
class TestSpamTest(TestCase):
fixtures = ['reviews/test_models']
def test_create_not_there(self):
Review.objects.all().delete()
eq_(Review.objects.count(), 0)
check_spam(1)
def test_add(self):
assert Spam().add(Review.objects.all()[0], 'numbers')
class TestRefreshTest(ESTestCase):
fixtures = ['base/users']
def setUp(self):
super(TestRefreshTest, self).setUp()
self.addon = Addon.objects.create(type=amo.ADDON_EXTENSION)
self.user = UserProfile.objects.all()[0]
self.refresh()
eq_(self.get_bayesian_rating(), 0.0)
def get_bayesian_rating(self):
q = Addon.search().filter(id=self.addon.id)
return list(q.values_dict('bayesian_rating'))[0]['bayesian_rating'][0]
def test_created(self):
eq_(self.get_bayesian_rating(), 0.0)
Review.objects.create(addon=self.addon, user=self.user, rating=4)
self.refresh()
eq_(self.get_bayesian_rating(), 4.0)
def test_edited(self):
self.test_created()
r = self.addon.reviews.all()[0]
r.rating = 1
r.save()
self.refresh()
eq_(self.get_bayesian_rating(), 2.5)
def test_deleted(self):
self.test_created()
r = self.addon.reviews.all()[0]
r.delete()
self.refresh()
eq_(self.get_bayesian_rating(), 0.0)<|fim▁end|> | self.trans_eq(r1.title, 'r1 title en', 'en-US')
|
<|file_name|>trim.py<|end_file_name|><|fim▁begin|>import sys
import string
f = sys.stdin
g = sys.stdout
<|fim▁hole|>echo = 0
while 1:
l = f.readline()
if not l: break
ll=string.strip(l)
if ll=='BEGIN-LOG':
echo = 1
elif ll=='END-LOG':
echo = 0
elif echo:
l=string.replace(l,"-0.000"," 0.000") # squish annoying negative zeros
g.write(l)<|fim▁end|> | |
<|file_name|>HistoryStateType.java<|end_file_name|><|fim▁begin|>//
// Questo file è stato generato dall'architettura JavaTM per XML Binding (JAXB) Reference Implementation, v2.2.8-b130911.1802
// Vedere <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Qualsiasi modifica a questo file andrà persa durante la ricompilazione dello schema di origine.
// Generato il: 2014.10.23 alle 11:27:04 AM CEST
//
package org.cumulus.certificate.model;<|fim▁hole|>import javax.xml.bind.annotation.XmlType;
/**
* <p>Classe Java per HistoryStateType complex type.
*
* <p>Il seguente frammento di schema specifica il contenuto previsto contenuto in questa classe.
*
* <pre>
* <complexType name="HistoryStateType">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <attribute name="stateId" use="required" type="{http://www.w3.org/2001/XMLSchema}string" />
* <attribute name="refersToStateId" use="required" type="{http://www.w3.org/2001/XMLSchema}string" />
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "HistoryStateType")
public class HistoryStateType {
@XmlAttribute(name = "stateId", required = true)
protected String stateId;
@XmlAttribute(name = "refersToStateId", required = true)
protected String refersToStateId;
/**
* Recupera il valore della proprietà stateId.
*
* @return
* possible object is
* {@link String }
*
*/
public String getStateId() {
return stateId;
}
/**
* Imposta il valore della proprietà stateId.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setStateId(String value) {
this.stateId = value;
}
/**
* Recupera il valore della proprietà refersToStateId.
*
* @return
* possible object is
* {@link String }
*
*/
public String getRefersToStateId() {
return refersToStateId;
}
/**
* Imposta il valore della proprietà refersToStateId.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setRefersToStateId(String value) {
this.refersToStateId = value;
}
}<|fim▁end|> |
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute; |
<|file_name|>constants.py<|end_file_name|><|fim▁begin|># Copyright 2015-2016 NEC Corporation. All rights reserved.<|fim▁hole|># not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
NWA_DEVICE_GDV = "GeneralDev"
NWA_DEVICE_TFW = "TenantFW"
NWA_AGENT_TOPIC = 'nwa_agent'
NWA_AGENT_TYPE = 'NEC NWA Agent'
NWA_FIREWALL_PLUGIN = 'NECNWAFWaaS'
# an incremental size if the remaining size is zero.
NWA_GREENPOOL_ADD_SIZE = 32<|fim▁end|> | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may |
<|file_name|>tests.py<|end_file_name|><|fim▁begin|>import unittest
import re
from lxml import etree
from zope.testing import doctest, cleanup
import zope.component.eventtesting
from imagestore.xml import XMLValidationError, local_file
class ValidationTests(unittest.TestCase):
relaxng = etree.RelaxNG(file=local_file('rng', 'imagestore.rng'))
def validate(self, el):
if not self.relaxng.validate(etree.ElementTree(el)):
raise XMLValidationError("%s failed to validate" % el.tag)
def test_basic(self):
xml = '''
<imagestore xmlns="http://studiolab.io.tudelft.nl/ns/imagestore">
<sessions>
</sessions>
</imagestore>
'''
self.validate(etree.XML(xml))
def test_attributes(self):
xml = '''
<imagestore xmlns="http://studiolab.io.tudelft.nl/ns/imagestore">
<sessions href="sessions">
</sessions>
</imagestore>
'''
self.validate(etree.XML(xml))
def test_attributes_illegal(self):
xml = '''
<imagestore xmlns="http://studiolab.io.tudelft.nl/ns/imagestore">
<sessions name="sessions">
</sessions>
</imagestore>
'''
self.assertRaises(XMLValidationError, self.validate, etree.XML(xml))
def test_extended(self):
xml = '''
<imagestore xmlns="http://studiolab.io.tudelft.nl/ns/imagestore">
<sessions>
<session href="sessions/foo" name="foo">
<group xmlns="http://studiolab.io.tudelft.nl/ns/imagestore" href="." name="collection">
<source src="APP/sessions/foo/images/UNKNOWN" name="UNKNOWN"/>
<metadata href="metadata">
<depth href="metadata/depth">0.0</depth>
<rotation href="metadata/rotation">0.0</rotation>
<x href="metadata/x">0.0</x>
<y href="metadata/y">0.0</y>
</metadata>
<objects href="objects">
<image href="objects/alpha" name="alpha">
<source src="APP/sessions/foo/images/a.png" name="a.png"/>
<metadata href="objects/alpha/metadata">
<depth href="objects/alpha/metadata/depth">0.0</depth>
<rotation href="objects/alpha/metadata/rotation">0.0</rotation>
<x href="objects/alpha/metadata/x">0.0</x>
<y href="objects/alpha/metadata/y">0.0</y>
</metadata><|fim▁hole|> <group href="objects/beta" name="beta">
<source src="APP/sessions/foo/images/a.png" name="a.png"/>
<metadata href="objects/beta/metadata">
<depth href="objects/beta/metadata/depth">0.0</depth>
<rotation href="objects/beta/metadata/rotation">0.0</rotation>
<x href="objects/beta/metadata/x">0.0</x>
<y href="objects/beta/metadata/y">0.0</y>
</metadata>
<objects href="objects/beta/objects"/>
</group>
</objects>
</group>
<images>
</images>
</session>
</sessions>
</imagestore>
'''
self.validate(etree.XML(xml))
def setUpZope(test):
zope.component.eventtesting.setUp(test)
def cleanUpZope(test):
cleanup.cleanUp()
r_created = re.compile('<created>[^/]*</created>')
r_modified = re.compile('<modified>[^/]*</modified>')
def datetime_normalize(xml):
result = r_created.sub('<created></created>', xml)
result = r_modified.sub('<modified></modified', result)
return result
def test_suite():
optionflags = (
doctest.ELLIPSIS
| doctest.REPORT_NDIFF
| doctest.NORMALIZE_WHITESPACE
)
return unittest.TestSuite([
doctest.DocFileSuite(
'model.txt', optionflags=optionflags,
setUp=setUpZope, tearDown=cleanUpZope,
globs={'datetime_normalize': datetime_normalize}),
unittest.makeSuite(ValidationTests)])<|fim▁end|> | </image> |
<|file_name|>clone.js<|end_file_name|><|fim▁begin|>define(function(require) {
/*<|fim▁hole|> DEPENDENCIES
*/
var BaseDialog = require('utils/dialogs/dialog');
var TemplateHTML = require('hbs!./clone/html');
var Sunstone = require('sunstone');
var Notifier = require('utils/notifier');
var Locale = require('utils/locale');
var OpenNebulaSecurityGroup = require('opennebula/securitygroup');
/*
CONSTANTS
*/
var DIALOG_ID = require('./clone/dialogId');
var TAB_ID = require('../tabId');
/*
CONSTRUCTOR
*/
function Dialog() {
this.dialogId = DIALOG_ID;
BaseDialog.call(this);
}
Dialog.DIALOG_ID = DIALOG_ID;
Dialog.prototype = Object.create(BaseDialog.prototype);
Dialog.prototype.constructor = Dialog;
Dialog.prototype.html = _html;
Dialog.prototype.onShow = _onShow;
Dialog.prototype.setup = _setup;
return Dialog;
/*
FUNCTION DEFINITIONS
*/
function _html() {
return TemplateHTML({
'dialogId': this.dialogId
});
}
function _setup(context) {
var that = this;
context.off('invalid.fndtn.abide', '#' + DIALOG_ID + 'Form');
context.off('valid.fndtn.abide', '#' + DIALOG_ID + 'Form');
context.on('invalid.fndtn.abide', '#' + DIALOG_ID + 'Form', function(e) {
// Fix for valid event firing twice
if (e.namespace != 'abide.fndtn') { return; }
Notifier.notifyError(Locale.tr("One or more required fields are missing or malformed."));
}).on('valid.fndtn.abide', '#' + DIALOG_ID + 'Form', function(e) {
// Fix for valid event firing twice
if (e.namespace != 'abide.fndtn') { return; }
var name = $('input', this).val();
var sel_elems = Sunstone.getDataTable(TAB_ID).elements();
if (sel_elems.length > 1){
for (var i=0; i< sel_elems.length; i++)
//use name as prefix if several items selected
Sunstone.runAction('SecurityGroup.clone',
sel_elems[i],
name + OpenNebulaSecurityGroup.getName(sel_elems[i]));
} else {
Sunstone.runAction('SecurityGroup.clone',sel_elems[0],name);
}
return false;
});
context.foundation('reflow', 'abide');
return false;
}
function _onShow(context) {
var sel_elems = Sunstone.getDataTable(TAB_ID).elements();
//show different text depending on how many elements are selected
if (sel_elems.length > 1) {
$('.clone_one', context).hide();
$('.clone_several', context).show();
$('input',context).val('Copy of ');
} else {
$('.clone_one', context).show();
$('.clone_several', context).hide();
$('input',context).val('Copy of ' + OpenNebulaSecurityGroup.getName(sel_elems[0]));
}
$("input[name='name']",context).focus();
return false;
}
});<|fim▁end|> | |
<|file_name|>admin.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from django.contrib import admin
from .models import User<|fim▁hole|><|fim▁end|> |
admin.site.register(User) |
<|file_name|>utils_data.py<|end_file_name|><|fim▁begin|>import numpy as np
import cvxopt as co
def load_mnist_dataset():<|fim▁hole|> train_labels = np.array([mnist_train[i][1].numpy() for i in range(len(mnist_train))], dtype=np.int)
test = np.array([np.asarray(mnist_test[i][0]).reshape(28*28) for i in range(len(mnist_test))], dtype=np.float)
train = np.array([np.asarray(mnist_train[i][0]).reshape(28*28) for i in range(len(mnist_train))], dtype=np.float)
train /= 255. # normalize data to be in range [0,1]
test /= 255.
return train, train_labels, test, test_labels, [28, 28]
def load_fashion_mnist_dataset():
import torchvision.datasets as datasets
mnist_train = datasets.FashionMNIST(root='../data/fashion-mnist', train=True, download=True, transform=None)
mnist_test = datasets.FashionMNIST(root='../data/fashion-mnist', train=False, download=True, transform=None)
test_labels = np.array([mnist_test[i][1].numpy() for i in range(len(mnist_test))], dtype=np.int)
train_labels = np.array([mnist_train[i][1].numpy() for i in range(len(mnist_train))], dtype=np.int)
test = np.array([np.asarray(mnist_test[i][0]).reshape(28*28) for i in range(len(mnist_test))], dtype=np.float)
train = np.array([np.asarray(mnist_train[i][0]).reshape(28*28) for i in range(len(mnist_train))], dtype=np.float)
train /= 255. # normalize data to be in range [0,1]
test /= 255.
return train, train_labels, test, test_labels, [28, 28]
def load_emnist_dataset():
import torchvision.datasets as datasets
mnist_train = datasets.EMNIST(root='../data/emnist', split='balanced', train=True, download=True, transform=None)
mnist_test = datasets.EMNIST(root='../data/emnist', split='balanced', train=False, download=True, transform=None)
test_labels = np.array([mnist_test[i][1].numpy() for i in range(len(mnist_test))], dtype=np.int)
train_labels = np.array([mnist_train[i][1].numpy() for i in range(len(mnist_train))], dtype=np.int)
test = np.array([np.asarray(mnist_test[i][0]).reshape(28*28) for i in range(len(mnist_test))], dtype=np.float)
train = np.array([np.asarray(mnist_train[i][0]).reshape(28*28) for i in range(len(mnist_train))], dtype=np.float)
train /= 255. # normalize data to be in range [0,1]
test /= 255.
return train, train_labels, test, test_labels, [28, 28]
def load_cifar10_dataset():
import torchvision.datasets as datasets
cifar_train = datasets.CIFAR10(root='../data/cifar10', train=True, download=True, transform=None)
cifar_test = datasets.CIFAR10(root='../data/cifar10', train=False, download=True, transform=None)
test_labels = np.array([cifar_test[i][1] for i in range(len(cifar_test))], dtype=np.int)
train_labels = np.array([cifar_train[i][1] for i in range(len(cifar_train))], dtype=np.int)
test = np.array([np.asarray(cifar_test[i][0].convert('F')).reshape(32*32) for i in range(len(cifar_test))], dtype=np.float)
train = np.array([np.asarray(cifar_train[i][0].convert('F')).reshape(32*32) for i in range(len(cifar_train))], dtype=np.float)
train /= 255. # normalize data to be in range [0,1]
test /= 255.
return train, train_labels, test, test_labels, [32, 32]
def get_gaussian(num, dims=2, means=[0,0], vars=[1,1]):
data = np.random.multivariate_normal(means, np.eye(dims), num)
return data
def get_2state_gaussian_seq(lens,dims=2,means1=[2,2,2,2],means2=[5,5,5,5],vars1=[1,1,1,1],vars2=[1,1,1,1],anom_prob=1.0):
seqs = np.zeros((dims, lens))
lbls = np.zeros((1, lens), dtype=np.int8)
marker = 0
# generate first state sequence
for d in range(dims):
seqs[d, :] = np.random.randn(lens)*vars1[d] + means1[d]
prob = np.random.uniform()
if prob < anom_prob:
# add second state blocks
while True:
max_block_len = 0.6*lens
min_block_len = 0.1*lens
block_len = np.int(max_block_len*np.random.uniform()+3)
block_start = np.int(lens*np.random.uniform())
if block_len - (block_start+block_len-lens)-3 > min_block_len:
break
block_len = min( [block_len, block_len - (block_start+block_len-lens)-3] )
lbls[block_start:block_start+block_len-1] = 1
marker = 1
for d in range(dims):
seqs[d,block_start:block_start+block_len-1] = np.random.randn(1,block_len-1)*vars2[d] + means2[d]
return seqs, lbls, marker
def get_2state_anom_seq(lens, comb_block_len, anom_prob=1.0, num_blocks=1):
seqs = co.matrix(0.0, (1, lens))
lbls = co.matrix(0, (1, lens))
marker = 0
# generate first state sequence, gaussian noise 0=mean, 1=variance
seqs = np.zeros((1, lens))
lbls = np.zeros((1, lens))
bak = seqs.copy()
prob = np.random.uniform()
if prob < anom_prob:
# add second state blocks
block_len = np.int(np.floor(comb_block_len / float(num_blocks)))
marker = 1
# add a single block
blen = 0
for b in range(np.int(num_blocks)):
if (b==num_blocks-1 and b>1):
block_len = np.round(comb_block_len-blen)
isDone = False
while isDone == False:
start = np.int(np.random.uniform()*float(lens-block_len+1))
if np.sum(lbls[0,start:start+block_len]) == 0:
lbls[0, start:start+block_len] = 1
seqs[0, start:start+block_len] = bak[0, start:start+block_len]+4.0
isDone = True
break
blen += block_len
return seqs, lbls, marker<|fim▁end|> | import torchvision.datasets as datasets
mnist_train = datasets.MNIST(root='../data/mnist', train=True, download=True, transform=None)
mnist_test = datasets.MNIST(root='../data/mnist', train=False, download=True, transform=None)
test_labels = np.array([mnist_test[i][1].numpy() for i in range(len(mnist_test))], dtype=np.int) |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import
import urlparse
import urllib
from datetime import datetime
from django.db import models
from django.utils.translation import ugettext as _
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.utils.encoding import smart_unicode, smart_str
from .managers import RecentSearchManager
from .classes import SearchModel
class RecentSearch(models.Model):
"""
Keeps a list of the n most recent search keywords for a given user
"""
user = models.ForeignKey(User, verbose_name=_(u'user'), editable=False)
query = models.TextField(verbose_name=_(u'query'), editable=False)
datetime_created = models.DateTimeField(verbose_name=_(u'datetime created'), editable=False)
hits = models.IntegerField(verbose_name=_(u'hits'), editable=False)
objects = RecentSearchManager()
def __unicode__(self):<|fim▁hole|> if self.is_advanced():
# Advanced search
advanced_string = []
for key, value in query_dict.items():
search_field = document_search.get_search_field(key)
advanced_string.append(u'%s: %s' % (search_field.label, smart_unicode(' '.join(value))))
display_string = u', '.join(advanced_string)
else:
# Is a simple search
display_string = smart_unicode(' '.join(query_dict['q']))
return u'%s (%s)' % (display_string, self.hits)
def save(self, *args, **kwargs):
self.datetime_created = datetime.now()
super(RecentSearch, self).save(*args, **kwargs)
def url(self):
view = 'results' if self.is_advanced() else 'search'
return '%s?%s' % (reverse(view), self.query)
def is_advanced(self):
return 'q' not in urlparse.parse_qs(self.query)
class Meta:
ordering = ('-datetime_created',)
verbose_name = _(u'recent search')
verbose_name_plural = _(u'recent searches')<|fim▁end|> | document_search = SearchModel.get('documents.Document')
query_dict = urlparse.parse_qs(urllib.unquote_plus(smart_str(self.query)))
|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>pub mod parser;
pub mod parser_error;
mod token;<|fim▁hole|><|fim▁end|> | mod lexer;
mod lexer_error; |
<|file_name|>test_flavor_rxtx.py<|end_file_name|><|fim▁begin|># Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.<|fim▁hole|>
from nova.compute import flavors
from nova import test
from nova.tests.unit.api.openstack import fakes
FAKE_FLAVORS = {
'flavor 1': {
"flavorid": '1',
"name": 'flavor 1',
"memory_mb": '256',
"root_gb": '10',
"swap": '5',
"disabled": False,
"ephemeral_gb": '20',
"rxtx_factor": '1.0',
"vcpus": 1,
},
'flavor 2': {
"flavorid": '2',
"name": 'flavor 2',
"memory_mb": '512',
"root_gb": '10',
"swap": '10',
"ephemeral_gb": '25',
"rxtx_factor": None,
"disabled": False,
"vcpus": 1,
},
}
def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
return FAKE_FLAVORS['flavor %s' % flavorid]
def fake_get_all_flavors_sorted_list(context=None, inactive=False,
filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
return [
fake_flavor_get_by_flavor_id(1),
fake_flavor_get_by_flavor_id(2)
]
class FlavorRxtxTestV21(test.NoDBTestCase):
content_type = 'application/json'
_prefix = "/v2/fake"
def setUp(self):
super(FlavorRxtxTestV21, self).setUp()
ext = ('nova.api.openstack.compute.contrib'
'.flavor_rxtx.Flavor_rxtx')
self.flags(osapi_compute_extension=[ext])
fakes.stub_out_nw_api(self)
self.stubs.Set(flavors, "get_all_flavors_sorted_list",
fake_get_all_flavors_sorted_list)
self.stubs.Set(flavors,
"get_flavor_by_flavor_id",
fake_flavor_get_by_flavor_id)
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(self._get_app())
return res
def _get_app(self):
return fakes.wsgi_app_v21(init_only=('servers',
'flavors', 'os-flavor-rxtx'))
def _get_flavor(self, body):
return jsonutils.loads(body).get('flavor')
def _get_flavors(self, body):
return jsonutils.loads(body).get('flavors')
def assertFlavorRxtx(self, flavor, rxtx):
self.assertEqual(str(flavor.get('rxtx_factor')), rxtx)
def test_show(self):
url = self._prefix + '/flavors/1'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
self.assertFlavorRxtx(self._get_flavor(res.body), '1.0')
def test_detail(self):
url = self._prefix + '/flavors/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
flavors = self._get_flavors(res.body)
self.assertFlavorRxtx(flavors[0], '1.0')
self.assertFlavorRxtx(flavors[1], '')<|fim▁end|> |
from oslo_serialization import jsonutils
import webob |
<|file_name|>constants.js<|end_file_name|><|fim▁begin|>/*
#########################################################################
#
# Copyright (C) 2019 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,<|fim▁hole|># You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
*/
export default 'ALERT_SETTINGS';<|fim▁end|> | # but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# |
<|file_name|>auto_tags.py<|end_file_name|><|fim▁begin|>"""
This module implements an Ansible plugin that is triggered at the start of a playbook.
The plugin dynamically generates a tag for each role. Each tag has the same name as its role.
The advantage of this is that it saves you some boilerplate, because you don't have to wrap
all tasks of a role in an additional block and assign a tag to that.
Additionally, it works automatically when you add new roles to your playbook.
Usage is exactly the same as without this plugin:
ansible-playbook --tags=some_tag provision.yml
Here, the "some_tag" tag was generated dynamically (assuming there is a "some_tag" role).
Installation:
1. Place this file in `plugins/callbacks/auto_tags.py` (relative to your playbook root)
2. Add the following two lines to your `ansible.cfg` file:
callback_plugins = plugins/callbacks
callback_whitelist = auto_tags
"""
from __future__ import print_function
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
"""
Ansible supports several types of plugins. We are using the *callback* type here, since
it seemed the best choice for our use case, because it allows you to hook into the start
of a playbook.
"""
def v2_playbook_on_start(self, playbook):
"""
Dynamically add a tag of the same name to each role.
Note: Plays, roles, task_blocks and tasks can have tags.
"""
plays = playbook.get_plays()
# Note: Although identical roles are shared between plays we cannot deduplicate them,
# since Ansible treats them as different objects internally
roles = [role for play in plays for role in play.get_roles()]
# Note: Tags for roles are set dynamically in `_load_role_data` instead of in __init__
# I don't know why they do that.
for role in roles:
role_name = role._role_name<|fim▁hole|> if role_name not in role.tags:
role.tags += [role_name]<|fim▁end|> | |
<|file_name|>locus.rs<|end_file_name|><|fim▁begin|>//! Provide a location.
//!
//! ```text
//! _ _ _
//! _ __ ___| (_)___(_) ___ _ __
//! | '__/ _ \ | / __| |/ _ \| '_ \
//! | | | __/ | \__ \ | (_) | | | |
//! |_| \___|_|_|___/_|\___/|_| |_|
//! ```
//! The relision term rewriting library.
//!
//! # License<|fim▁hole|>//!
//! Copyright (c) 2015 by Stacy Prowell. All rights reserved.
//!
//! Licensed under the BSD 2-Clause license. See the file LICENSE
//! that is part of this distribution. This file may not be copied,
//! modified, or distributed except according to those terms.
/// The location of a term's declaration.
/// Every term can have an associated location, which tells where the term
/// was originally declared. This can be *internal*, or it can be from a
/// *file*, or it could be from an interactive *console* session.
#[derive(Clone, Debug, PartialEq, PartialOrd)]
pub enum Locus {
/// The internal locus applies where a term is created not as the result of
/// a file or console input line.
Internal,
/// The console locus applies where a term is created as the result of an
/// input line without a known relevant file. This could be interactive,
/// or it could be from an unnamed stream.
Console(u32, u32),
/// The file locus applies where a file (or other named source like a URL)
/// is the source of the term.
File(String, u32, u32),
}
use std::fmt;
impl fmt::Display for Locus {
fn fmt(&self, form: &mut fmt::Formatter) -> fmt::Result {
match *self {
Locus::Internal => write!(form, ""),
Locus::Console(line, column) => {
write!(form, "{}:{}", line, column)
},
Locus::File(ref name, line, column) => {
write!(form, "{}:{}:{}", name, line, column)
}
}
}
}<|fim▁end|> | |
<|file_name|>test_directoryscanner.py<|end_file_name|><|fim▁begin|>import unittest
from os import path
from API.directoryscanner import find_runs_in_directory
path_to_module = path.abspath(path.dirname(__file__))
class TestDirectoryScanner(unittest.TestCase):
def test_sample_names_spaces(self):
runs = find_runs_in_directory(path.join(path_to_module, "sample-names-with-spaces"))
self.assertEqual(1, len(runs))
samples = runs[0].sample_list
self.assertEqual(3, len(samples))
for sample in samples:
self.assertEqual(sample.get_id(), sample.get_id().strip())
def test_single_end(self):
runs = find_runs_in_directory(path.join(path_to_module, "single_end"))
self.assertEqual(1, len(runs))
self.assertEqual("SINGLE_END", runs[0].metadata["layoutType"])
samples = runs[0].sample_list
self.assertEqual(3, len(samples))
for sample in samples:
self.assertFalse(sample.is_paired_end())
def test_completed_upload(self):
runs = find_runs_in_directory(path.join(path_to_module, "completed"))
self.assertEqual(0, len(runs))
def test_find_sample_sheet_name_variations(self):
runs = find_runs_in_directory(path.join(path_to_module, "sample-sheet-name-variations"))<|fim▁hole|><|fim▁end|> | self.assertEqual(1, len(runs)) |
<|file_name|>alert_processor.py<|end_file_name|><|fim▁begin|>"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from streamalert.shared import ALERT_PROCESSOR_NAME
from streamalert_cli.terraform.common import infinitedict
from streamalert_cli.terraform.lambda_module import generate_lambda
def generate_alert_processor(config):
"""Generate Terraform for the Alert Processor
Args:
config (dict): The loaded config from the 'conf/' directory
Returns:
dict: Alert Processor dict to be marshaled to JSON
"""
prefix = config['global']['account']['prefix']
result = infinitedict()
# Set variables for the IAM permissions module
result['module']['alert_processor_iam'] = {
'source': './modules/tf_alert_processor_iam',
'account_id': config['global']['account']['aws_account_id'],
'region': config['global']['account']['region'],
'prefix': prefix,
'role_id': '${module.alert_processor_lambda.role_id}',
'kms_key_arn': '${aws_kms_key.streamalert_secrets.arn}',
'sse_kms_key_arn': '${aws_kms_key.server_side_encryption.arn}',
'output_lambda_functions': [
# Strip qualifiers: only the function name is needed for the IAM permissions
func.split(':')[0] for func in list(config['outputs'].get('aws-lambda', {}).values())
],
'output_s3_buckets': list(config['outputs'].get('aws-s3', {}).values()),
'output_sns_topics': list(config['outputs'].get('aws-sns', {}).values()),
'output_sqs_queues': list(config['outputs'].get('aws-sqs', {}).values())
}
# Set variables for the Lambda module
result['module']['alert_processor_lambda'] = generate_lambda(
'{}_streamalert_{}'.format(config['global']['account']['prefix'], ALERT_PROCESSOR_NAME),
'streamalert.alert_processor.main.handler',
config['lambda']['alert_processor_config'],
config,<|fim▁hole|> 'ALERTS_TABLE': '{}_streamalert_alerts'.format(prefix),
'AWS_ACCOUNT_ID': config['global']['account']['aws_account_id'],
'STREAMALERT_PREFIX': prefix
}
)
return result<|fim▁end|> | environment={ |
<|file_name|>multi-thread-example.py<|end_file_name|><|fim▁begin|># coding: utf-8
#
# GIL limit python multi-thread effectiveness.
# But is seems fine, because these operation have so many socket IO
# So it seems no need to use multiprocess
#
import uiautomator2 as u2
import adbutils
import threading
from logzero import logger<|fim▁hole|>
def worker(d: u2.Device):
d.app_start("io.appium.android.apis", stop=True)
d(text="App").wait()
for el in d.xpath("@android:id/list").child("/android.widget.TextView").all():
logger.info("%s click %s", d.serial, el.text)
el.click()
d.press("back")
logger.info("%s DONE", d.serial)
for dev in adbutils.adb.device_list():
print("Dev:", dev)
d = u2.connect(dev.serial)
t = threading.Thread(target=worker, args=(d,))
t.start()<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|># ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
old_bl_idnames = {
'CentersPolsNode' : "centers",
# 'BakeryNode' : "bakery",
'CircleNode' : "circle",
'ListItemNode' : "list_item",
'GenRangeNode' : "range",
'GenSeriesNode' : "series",
# 'Test1Node' : "test",
# 'Test2Node' : "test",
# 'ToolsNode' : "tools",
'SvReRouteNode': "reroute",
'VoronoiNode': "voronoi",
'ViewerNode': "viewer",
'EvalKnievalNode': "eval_knieval",
'FormulaNode': 'formula',
}
# we should add some functions to load things there
import importlib
import inspect
import traceback
import bpy
from sverchok.node_tree import SverchCustomTreeNode
imported_mods = {}
def is_old(node_info):
'''
Check if node or node.bl_idname is among
the old nodes
'''
if isinstance(node_info, str):
# assumes bl_idname
return node_info in old_bl_idnames
elif isinstance(node_info, bpy.types.Node):
return node_info.bl_idname in old_bl_idnames
else:
return False
def scan_for_old(ng):
nodes = [n for n in ng.nodes if n.bl_idname in old_bl_idnames]
for node in nodes:
mark_old(node)
def mark_old(node):
if node.parent and node.parent.label == "Deprecated node!":
return
ng = node.id_data
frame = ng.nodes.new("NodeFrame")
if node.parent:
frame.parent = node.parent
node.parent = frame
frame.label = "Deprecated node!"
frame.use_custom_color = True
frame.color = (.8, 0, 0)
frame.shrink = True
def reload_old(ng=False):
if ng:
bl_idnames = {n.bl_idname for n in ng.nodes if n.bl_idname in old_bl_idnames}
for bl_id in bl_idnames:
mod = register_old(bl_id)
if mod:
importlib.reload(mod)
else:
print("Couldn't reload {}".format(bl_id))
else:
for ng in bpy.data.node_groups:
reload_old(ng)
#if ng.bl_idname in { 'SverchCustomTreeType', 'SverchGroupTreeType'}:
# reload_old(ng)
def load_old(ng):
"""
This approach didn't work, bl_idname of undefined node isn't as I expected
bl_idnames = {n.bl_idname for n in ng.nodes}
old_bl_ids = bl_idnames.intersection(old_bl_idnames)
if old_bl_ids:
"""
not_reged_nodes = list(n for n in ng.nodes if not n.is_registered_node_type())
if not_reged_nodes:
for bl_id in old_bl_idnames:
register_old(bl_id)
nodes = [n for n in ng.nodes if n.bl_idname == bl_id]
if nodes:
for node in nodes:
mark_old(node)
not_reged_nodes = list(n for n in ng.nodes if not n.is_registered_node_type())
node_count = len(not_reged_nodes)
print("Loaded {}. {} nodes are left unregisted.".format(bl_id, node_count))
if node_count == 0:
return
else: # didn't help remove
unregister_old(bl_id)
def register_old(bl_id):
if bl_id in old_bl_idnames:
mod = importlib.import_module(".{}".format(old_bl_idnames[bl_id]), __name__)
res = inspect.getmembers(mod)
for name, cls in res:
if inspect.isclass(cls):
if issubclass(cls, bpy.types.Node) and cls.bl_idname == bl_id:
if bl_id not in imported_mods:
try:
mod.register()
except:
traceback.print_exc()
imported_mods[bl_id] = mod
return mod
print("Cannot find {} among old nodes".format(bl_id))
return None
def unregister_old(bl_id):
global imported_mods
mod = imported_mods.get(bl_id)
if mod:
#print("Unloaded old node type {}".format(bl_id))
mod.unregister()
del imported_mods[bl_id]
def unregister():
global imported_mods
print(imported_mods)
for mod in imported_mods.values():
mod.unregister()
imported_mods = {}<|fim▁end|> | |
<|file_name|>doormon.py<|end_file_name|><|fim▁begin|># This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2016 Digi International Inc. All Rights Reserved.
"""
Monitor the WR31 door enclosure
"""
import time
import sys
import sarcli
import idigidata
def millisecond_timestamp():
"""
Return a timestamp, in milliseconds
:return ms_timestamp: int, Timestamp in milliseconds
"""
ms_timestamp = int(time.time() * 1000)
return ms_timestamp
def cli_command(cmd):
"""
Send a command to the SarOS CLI and receive the response
:param cmd: str, Command to run
:return response: str, Response to cmd
"""
cli = sarcli.open()
cli.write(cmd)
response = cli.read()
cli.close()
return response
class SmsAlert(object):
"""
Send an SMS alert
"""
def __init__(self, destination, custom_text):
self.destination = destination
self.custom_text = custom_text
def send_alert(self, message):
"""
Send an SMS alert
:param message: str, Content of SMS message
:return response: str, Response to sendsms command
"""
message = "{0}: {1}".format(self.custom_text, message)
command = 'sendsms ' + self.destination + ' "' + message + '" '
response = cli_command(command)
return response
class DatapointAlert(object):
"""
Send a Datapoint alert
"""
def __init__(self, destination):
self.destination = destination
def send_alert(self, message):
"""
Send a Datapoint alert
:param message: str, Datapoint content
:return response: tuple, Result code of datapoint upload attempt
"""
timestamp = millisecond_timestamp()
dpoint = """\
<DataPoint>
<dataType>STRING</dataType>
<data>{0}</data>
<timestamp>{1}</timestamp>
<streamId>{2}</streamId>
</DataPoint>""".format(message, timestamp, self.destination)
response = idigidata.send_to_idigi(dpoint, "DataPoint/stream.xml")
return response
class DoorMonitor(object):
"""
Provides methods to monitor the enclosure door status
"""
def __init__(self, alert_list):
self.d1_status = ""
self.alert_list = alert_list
@classmethod
def switch_status(cls):
"""
Reads line status and sends an alert if the status is different
:return status: str, Door status, "OPEN" or "CLOSED"
"""
response = cli_command("gpio dio")
if "D1: DOUT=OFF, DIN=LOW" in response:
if not "D0: DOUT=ON" in response:
# Door is closed
status = "CLOSED"
else:
# Door is open
status = "OPEN"
return status
def send_alert(self, text):
"""
:param text: str, Alert content
:return:
"""
for alert in self.alert_list:
alert.send_alert(text)
def monitor_switch(self):
"""
Runs line monitoring and alerting in a loop
:return:
"""
while True:
status = self.switch_status()
if status != self.d1_status:
print "WR31 door is: {0}".format(status)
<|fim▁hole|>
if __name__ == '__main__':
ALERT_FUNCTIONS = [DatapointAlert("WR31_door")]
if len(sys.argv) >= 3:
CUSTOM_TEXT = sys.argv[2]
else:
CUSTOM_TEXT = "WR31 Door"
if len(sys.argv) >= 2:
ALERT_FUNCTIONS.append(SmsAlert(sys.argv[1], CUSTOM_TEXT))
MONITOR = DoorMonitor(ALERT_FUNCTIONS)
MONITOR.monitor_switch()<|fim▁end|> | self.send_alert(status)
self.d1_status = status
time.sleep(.5)
|
<|file_name|>SendMessageContext.java<|end_file_name|><|fim▁begin|>/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.rocketmq.client.hook;
import java.util.Map;
import com.alibaba.rocketmq.client.impl.CommunicationMode;
import com.alibaba.rocketmq.client.producer.SendResult;<|fim▁hole|>import com.alibaba.rocketmq.common.message.MessageQueue;
public class SendMessageContext {
private String producerGroup;
private Message message;
private MessageQueue mq;
private String brokerAddr;
private String bornHost;
private CommunicationMode communicationMode;
private SendResult sendResult;
private Exception exception;
private Object mqTraceContext;
private Map<String, String> props;
public String getProducerGroup() {
return producerGroup;
}
public void setProducerGroup(String producerGroup) {
this.producerGroup = producerGroup;
}
public Message getMessage() {
return message;
}
public void setMessage(Message message) {
this.message = message;
}
public MessageQueue getMq() {
return mq;
}
public void setMq(MessageQueue mq) {
this.mq = mq;
}
public String getBrokerAddr() {
return brokerAddr;
}
public void setBrokerAddr(String brokerAddr) {
this.brokerAddr = brokerAddr;
}
public CommunicationMode getCommunicationMode() {
return communicationMode;
}
public void setCommunicationMode(CommunicationMode communicationMode) {
this.communicationMode = communicationMode;
}
public SendResult getSendResult() {
return sendResult;
}
public void setSendResult(SendResult sendResult) {
this.sendResult = sendResult;
}
public Exception getException() {
return exception;
}
public void setException(Exception exception) {
this.exception = exception;
}
public Object getMqTraceContext() {
return mqTraceContext;
}
public void setMqTraceContext(Object mqTraceContext) {
this.mqTraceContext = mqTraceContext;
}
public Map<String, String> getProps() {
return props;
}
public void setProps(Map<String, String> props) {
this.props = props;
}
public String getBornHost() {
return bornHost;
}
public void setBornHost(String bornHost) {
this.bornHost = bornHost;
}
}<|fim▁end|> | import com.alibaba.rocketmq.common.message.Message; |
<|file_name|>Event.java<|end_file_name|><|fim▁begin|>package org.intellimate.izou.sdk.events;
import org.intellimate.izou.events.EventBehaviourControllerModel;
import org.intellimate.izou.events.EventLifeCycle;
import org.intellimate.izou.events.EventModel;
import org.intellimate.izou.identification.Identification;
import org.intellimate.izou.resource.ListResourceProvider;
import org.intellimate.izou.resource.ResourceModel;
import org.intellimate.izou.sdk.resource.*;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Consumer;
/**
* This class represents an Event.
* This class is immutable! for every change it will return an new instance!
*/
public class Event implements EventModel<Event> {
private final String type;
private final Identification source;
private final List<String> descriptors;
private final ListResourceProvider listResourceContainer;
private final EventBehaviourController eventBehaviourController;
private final ConcurrentHashMap<EventLifeCycle, List<Consumer<EventLifeCycle>>> lifeCycleListeners = new ConcurrentHashMap<>();
/**
* Creates a new Event Object
* @param type the Type of the Event, try to use the predefined Event types
* @param source the source of the Event, most likely a this reference.
* @param descriptors the descriptors to initialize the Event with
* @throws IllegalArgumentException if one of the Arguments is null or empty
*/
protected Event(String type, Identification source, List<String> descriptors) throws IllegalArgumentException {
if(type == null || type.isEmpty()) throw new IllegalArgumentException("illegal type");
if(source == null) throw new IllegalArgumentException("source is null");
this.type = type;
this.source = source;
this.descriptors = Collections.unmodifiableList(descriptors);
listResourceContainer = new ListResourceProviderImpl();
eventBehaviourController = new EventBehaviourController();
}
/**
* Creates a new Event Object
* @param type the Type of the Event, try to use the predefined Event types
* @param source the source of the Event, most likely a this reference.
* @param listResourceContainer the ResourceContainer
* @param descriptors the descriptors to initialize the Event with
* @param eventBehaviourController the Controller of the Event
* @throws IllegalArgumentException if one of the Arguments is null or empty
*/
protected Event(String type, Identification source, ListResourceProvider listResourceContainer, List<String> descriptors,
EventBehaviourController eventBehaviourController)throws IllegalArgumentException {
if(type == null || type.isEmpty()) throw new IllegalArgumentException("illegal type");
if(source == null) throw new IllegalArgumentException("source is null");
this.type = type;
this.source = source;
this.descriptors = Collections.unmodifiableList(new ArrayList<>(descriptors));
this.listResourceContainer = listResourceContainer;
this.eventBehaviourController = eventBehaviourController;
}
/**
* Creates a new Event Object
* @param type the Type of the Event, try to use the predefined Event types
* @param source the source of the Event, most likely a this reference.
* @return an Optional, that may be empty if type is null or empty or source is null
*/
public static Optional<Event> createEvent(String type, Identification source) {
try {
return Optional.of(new Event(type, source, new ArrayList<>()));
} catch (IllegalArgumentException e) {
return Optional.empty();
}
}
/**
* Creates a new Event Object
* @param type the Type of the Event, try to use the predefined Event types
* @param source the source of the Event, most likely a this reference.
* @param descriptors the descriptors
* @return an Optional, that may be empty if type is null or empty or source is null
*/
public static Optional<Event> createEvent(String type, Identification source, List<String> descriptors) {
try {
return Optional.of(new Event(type, source, descriptors));
} catch (IllegalArgumentException e) {
return Optional.empty();
}
}
/**
* The ID of the Event.
* It describes the Type of the Event.
* @return A String containing an ID
*/
@Override
public String getID() {
return type;
}
/**
* The type of the Event.
* It describes the Type of the Event.
* @return A String containing an ID
*/
@Override
public String getType() {
return type;
}
/**
* returns the Source of the Event, e.g. the object who fired it.
* @return an identifiable
*/
@Override
public Identification getSource() {
return source;
}
/**
* returns all the Resources the Event currently has
* @return an instance of ListResourceContainer
*/
@Override
public ListResourceProvider getListResourceContainer() {
return listResourceContainer;
}
/**
* adds a Resource to the Container
* @param resource an instance of the resource to add
* @return the resulting Event (which is the same instance)<|fim▁hole|> @Override
public Event addResource(ResourceModel resource) {
listResourceContainer.addResource(resource);
return this;
}
/**
* adds a List of Resources to the Container
* @param resources a list containing all the resources
*/
@Override
public Event addResources(List<ResourceModel> resources) {
listResourceContainer.addResource(resources);
return this;
}
/**
* returns a List containing all the Descriptors.
* @return a List containing the Descriptors
*/
@Override
public List<String> getDescriptors() {
return descriptors;
}
/**
* returns a List containing all the Descriptors and the type.
* @return a List containing the Descriptors
*/
@Override
public List<String> getAllInformations() {
ArrayList<String> strings = new ArrayList<>(descriptors);
strings.add(type);
return strings;
}
/**
* sets the Descriptors (but not the Event-Type).
* <p>
* Replaces all existing descriptors.
* Since Event is immutable, it will create a new Instance.
* </p>
* @param descriptors a List containing all the Descriptors
* @return the resulting Event (which is the same instance)
*/
public Event setDescriptors(List<String> descriptors) {
return new Event(getType(), getSource(), descriptors);
}
/**
* sets the Descriptors (but not the Event-Type).
* @param descriptor a String describing the Event.
* @return the resulting Event (which is the same instance)
*/
public Event addDescriptor(String descriptor) {
List<String> newDescriptors = new ArrayList<>();
newDescriptors.addAll(descriptors);
newDescriptors.add(descriptor);
return new Event(getType(), getSource(), newDescriptors);
}
/**
* replaces the Descriptors
* @param descriptors a list containing the Descriptors.
* @return the resulting Event (which is the same instance)
*/
public Event replaceDescriptors(List<String> descriptors) {
return new Event(getType(), getSource(), descriptors);
}
/**
* returns whether the event contains the specific descriptor.
* this method also checks whether it matches the type.
* @param descriptor a String with the ID of the Descriptor
* @return boolean when the Event contains the descriptor, false when not.
*/
@Override
public boolean containsDescriptor(String descriptor) {
return descriptors.contains(descriptor) || type.equals(descriptor);
}
/**
* returns the associated EventBehaviourController
* @return an instance of EventBehaviourController
*/
@Override
public EventBehaviourControllerModel getEventBehaviourController() {
return eventBehaviourController;
}
@Override
public void lifecycleCallback(EventLifeCycle eventLifeCycle) {
lifeCycleListeners.getOrDefault(eventLifeCycle, new LinkedList<>()).stream()
.forEach(eventLifeCycleConsumer -> eventLifeCycleConsumer.accept(eventLifeCycle));
}
/**
* adds the Consumer to the specified EventLifeCycle.
* In its current implementation the invocation of the Callback method is parallel, but the notificaton of the listners not.
* @param eventLifeCycle the EventLifeCycle to target
* @param cycleCallback the callback
*/
@SuppressWarnings("unused")
public Event addEventLifeCycleListener(EventLifeCycle eventLifeCycle, Consumer<EventLifeCycle> cycleCallback) {
lifeCycleListeners.compute(eventLifeCycle, (unused, list) -> {
if (list == null)
list = new ArrayList<>();
list.add(cycleCallback);
return list;
});
return this;
}
@Override
public String toString() {
return "Event{" +
"type='" + type + '\'' +
", source=" + source +
", descriptors=" + descriptors +
", listResourceContainer=" + listResourceContainer +
'}';
}
}<|fim▁end|> | */ |
<|file_name|>mailPreview.js<|end_file_name|><|fim▁begin|>import MailPreview from '../components/MailPreview.vue';
import icons from "trumbowyg/dist/ui/icons.svg";
import "trumbowyg/dist/ui/trumbowyg.css";
import "trumbowyg/dist/trumbowyg.js";
import "./trumbowyg-snippets-plugin.js";
$.trumbowyg.svgPath = icons;
window.remplib = typeof(remplib) === 'undefined' ? {} : window.remplib;
let beautify = require('js-beautify').html;
(function() {
'use strict';
remplib.templateForm = {
textareaSelector: '.js-mail-body-html-input',
codeMirror: (element) => {
return CodeMirror( element, {
value: beautify($(remplib.templateForm.textareaSelector).val()),
theme: 'base16-dark',
mode: 'htmlmixed',
indentUnit: 4,
indentWithTabs: true,
lineNumbers: true,
lineWrapping: false,
styleActiveLine: true,
styleSelectedText: true,
continueComments: true,
gutters:[
'CodeMirror-lint-markers'
],
lint: true,
autoRefresh: true,
autoCloseBrackets: true,
autoCloseTags: true,
matchBrackets: true,
matchTags: {
bothTags: true
},
htmlhint: {
'doctype-first': false,
'alt-require': false,
'space-tab-mixed-disabled': 'tab'
}
});
},
trumbowyg: (element) => {
let buttons = $.trumbowyg.defaultOptions.btns;
let plugins = {};
const snippetsData = $(element).data('snippets');
const viewHTMLButton = 'viewHTML';
buttons = $.grep(buttons, function (value) {
return value.toString() !== viewHTMLButton;
});
if (snippetsData) {
buttons.push([['snippets']]);
for (const item in snippetsData) {
// let html = `<div contentEditable="false">{{ include('${snippetsData[item].name}') }}</div>`;
let html = `{{ include('${snippetsData[item].code}') }}`;
snippetsData[item].html = html;
}
plugins.snippets = snippetsData;
}
return $(element).trumbowyg({
semanticKeepAttributes: true,
semantic: false,
autogrow: true,
btns: buttons,
plugins: plugins,
});
},
codeMirrorChanged: false,
trumbowygChanged: false,
editorChoice: () => {
return $('.js-editor-choice:checked').val();
},
previewInit: (element, mailLayoutSelect, layoutsHtmlTemplates, initialContent) => {
const getLayoutValue = () => mailLayoutSelect[mailLayoutSelect.selectedIndex].value;
const getLayoutTemplate = () => layoutsHtmlTemplates[getLayoutValue()];
const vue = new Vue({
el: element,
data: function() {
return {
"htmlContent": initialContent,
"htmlLayout": getLayoutTemplate().layout_html,
}
},
render: h => h(MailPreview),
});
mailLayoutSelect.addEventListener('change', function(e) {
vue.htmlLayout = getLayoutTemplate().layout_html;
$('body').trigger('preview:change');
});
return vue;
},
showTrumbowyg: (codeMirror, trumbowyg) => {
trumbowyg.data('trumbowyg').$box.show();
// load changed data from codemirror
if (remplib.templateForm.codeMirrorChanged) {
trumbowyg.trumbowyg('html', codeMirror.doc.getValue());
remplib.templateForm.codeMirrorChanged = false;
}
$(codeMirror.display.wrapper).hide();
},
showCodemirror: (codeMirror, trumbowyg) => {
trumbowyg.data('trumbowyg').$box.hide();
// load changed and beautified data from trumbowyg
if (remplib.templateForm.trumbowygChanged) {
codeMirror.doc.setValue(beautify(trumbowyg.trumbowyg('html')));
remplib.templateForm.trumbowygChanged = false;
}
setTimeout(function() {
codeMirror.refresh();
}, 0);
$(codeMirror.display.wrapper).show();
},
selectEditor: (codeMirror, trumbowyg) => {
if (remplib.templateForm.editorChoice() === 'editor')
remplib.templateForm.showTrumbowyg(codeMirror, trumbowyg);
else {
remplib.templateForm.showCodemirror(codeMirror, trumbowyg);
}
},
init: () => {
// initialize preview right away so user can see the email
const vue = remplib.templateForm.previewInit(
'#js-mail-preview',
$('[name="mail_layout_id"]')[0],
$('.js-mail-layouts-templates').data('mail-layouts'),
$('.js-mail-body-html-input').val(),
);
const codeMirror = remplib.templateForm.codeMirror($('.js-codemirror')[0]);
const trumbowyg = remplib.templateForm.trumbowyg('.js-html-editor');
remplib.templateForm.syncCodeMirrorWithPreview(vue, codeMirror);
remplib.templateForm.syncTrumbowygWithPreview(vue, trumbowyg);
// initialize code editors on tab change, prevents bugs with initialisation of invisible elements.
$('a[data-toggle="tab"]').one('shown.bs.tab', function (e) {
const target = $(e.target).attr("href") // activated tab
if (target === '#email') {
remplib.templateForm.selectEditor(codeMirror, trumbowyg);
}
});
// change editor when user wants to change it (radio buttons)
$('.js-editor-choice').on('change', function(e) {
e.stopPropagation();
remplib.templateForm.selectEditor(codeMirror, trumbowyg)
});
},
syncTrumbowygWithPreview: (vue, trumbowyg) => {
trumbowyg.on('tbwchange', () => {
if (remplib.templateForm.editorChoice() !== 'editor') {
return;
}
vue.htmlContent = trumbowyg.trumbowyg('html');
$('body').trigger('preview:change');
remplib.templateForm.trumbowygChanged = true;
});<|fim▁hole|> syncCodeMirrorWithPreview: (vue, codeMirror) => {
codeMirror.on('change', function( editor, change ) {
if (remplib.templateForm.editorChoice() !== 'code') {
return;
}
// ignore if update is made programmatically and not by user (avoid circular loop)
if ( change.origin === 'setValue' ) {
return;
}
vue.htmlContent = editor.doc.getValue();
$(remplib.templateForm.textareaSelector).val(editor.doc.getValue());
$('body').trigger('preview:change');
remplib.templateForm.codeMirrorChanged = true;
});
}
}
})();<|fim▁end|> | }, |
<|file_name|>1.ExchangeIfGrater.js<|end_file_name|><|fim▁begin|><|fim▁hole|>console.log('-----Problem 1. Exchange if greater-----');
function exchangeIfIsGrater (first, second){
console.log('Before exchange:', first, second);
var temp;
if(first > second){
temp = first;
first = second;
second = temp;
}
console.log('After exchange:', first, second);
}
exchangeIfIsGrater(15, 10);<|fim▁end|> | /*Write an if statement that takes two double variables a and b and exchanges their values if the first one is greater than the second.
As a result print the values a and b, separated by a space.*/
|
<|file_name|>output.py<|end_file_name|><|fim▁begin|># Copyright 1998-2004 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Id: output.py,v 1.1 2006/03/06 18:13:31 henrique Exp $
import os
import sys
import re
havecolor = 1
dotitles = 1
spinpos = 0
spinner = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
esc_seq = "\x1b["
g_attr = {}
g_attr["normal"] = 0
g_attr["bold"] = 1
g_attr["faint"] = 2
g_attr["standout"] = 3
g_attr["underline"] = 4
g_attr["blink"] = 5
g_attr["overline"] = 6 # Why is overline actually useful?
g_attr["reverse"] = 7
g_attr["invisible"] = 8
g_attr["no-attr"] = 22
g_attr["no-standout"] = 23
g_attr["no-underline"] = 24
g_attr["no-blink"] = 25
g_attr["no-overline"] = 26
g_attr["no-reverse"] = 27
# 28 isn't defined?
# 29 isn't defined?
g_attr["black"] = 30
g_attr["red"] = 31
g_attr["green"] = 32
g_attr["yellow"] = 33
g_attr["blue"] = 34
g_attr["magenta"] = 35
g_attr["cyan"] = 36
g_attr["white"] = 37
# 38 isn't defined?
g_attr["default"] = 39
g_attr["bg_black"] = 40
g_attr["bg_red"] = 41
g_attr["bg_green"] = 42
g_attr["bg_yellow"] = 43
g_attr["bg_blue"] = 44
g_attr["bg_magenta"] = 45
g_attr["bg_cyan"] = 46
g_attr["bg_white"] = 47
g_attr["bg_default"] = 49
# make_seq("blue", "black", "normal")
def color(fg, bg="default", attr=["normal"]):
mystr = esc_seq[:] + "%02d" % g_attr[fg]
for x in [bg] + attr:
mystr += ";%02d" % g_attr[x]
return mystr + "m"
codes = {}
codes["reset"] = esc_seq + "39;49;00m"
codes["bold"] = esc_seq + "01m"
codes["faint"] = esc_seq + "02m"
codes["standout"] = esc_seq + "03m"
codes["underline"] = esc_seq + "04m"
codes["blink"] = esc_seq + "05m"
codes["overline"] = esc_seq + "06m" # Who made this up? Seriously.
codes["teal"] = esc_seq + "36m"
codes["turquoise"] = esc_seq + "36;01m"
codes["fuchsia"] = esc_seq + "35;01m"
codes["purple"] = esc_seq + "35m"
codes["blue"] = esc_seq + "34;01m"
codes["darkblue"] = esc_seq + "34m"
codes["green"] = esc_seq + "32;01m"
codes["darkgreen"] = esc_seq + "32m"
codes["yellow"] = esc_seq + "33;01m"
codes["brown"] = esc_seq + "33m"
codes["red"] = esc_seq + "31;01m"
codes["darkred"] = esc_seq + "31m"
def nc_len(mystr):
tmp = re.sub(esc_seq + "^m]+m", "", mystr)
return len(tmp)
def xtermTitle(mystr):
if havecolor and dotitles and "TERM" in os.environ and sys.stderr.isatty():
myt = os.environ["TERM"]
legal_terms = [
"xterm", "Eterm", "aterm", "rxvt", "screen", "kterm", "rxvt-unicode"]
for term in legal_terms:
if myt.startswith(term):
sys.stderr.write("\x1b]2;" + str(mystr) + "\x07")
sys.stderr.flush()
break
def xtermTitleReset():
if havecolor and dotitles and "TERM" in os.environ:
myt = os.environ["TERM"]
xtermTitle(os.environ["TERM"])
def notitles():
"turn off title setting"
dotitles = 0
def nocolor():
"turn off colorization"
havecolor = 0
for x in codes.keys():
codes[x] = ""
def resetColor():
return codes["reset"]
def ctext(color, text):
return codes[ctext] + text + codes["reset"]
def bold(text):
return codes["bold"] + text + codes["reset"]
def faint(text):
return codes["faint"] + text + codes["reset"]
def white(text):
return bold(text)
def teal(text):
return codes["teal"] + text + codes["reset"]
def turquoise(text):
return codes["turquoise"] + text + codes["reset"]
<|fim▁hole|>
def darkteal(text):
return turquoise(text)
def fuscia(text): # Don't use this one. It's spelled wrong!
return codes["fuchsia"] + text + codes["reset"]
def fuchsia(text):
return codes["fuchsia"] + text + codes["reset"]
def purple(text):
return codes["purple"] + text + codes["reset"]
def blue(text):
return codes["blue"] + text + codes["reset"]
def darkblue(text):
return codes["darkblue"] + text + codes["reset"]
def green(text):
return codes["green"] + text + codes["reset"]
def darkgreen(text):
return codes["darkgreen"] + text + codes["reset"]
def yellow(text):
return codes["yellow"] + text + codes["reset"]
def brown(text):
return codes["brown"] + text + codes["reset"]
def darkyellow(text):
return brown(text)
def red(text):
return codes["red"] + text + codes["reset"]
def darkred(text):
return codes["darkred"] + text + codes["reset"]
def update_basic_spinner():
global spinner, spinpos
spinpos = (spinpos + 1) % 500
if (spinpos % 100) == 0:
if spinpos == 0:
sys.stdout.write(". ")
else:
sys.stdout.write(".")
sys.stdout.flush()
def update_scroll_spinner():
global spinner, spinpos
if(spinpos >= len(spinner)):
sys.stdout.write(
darkgreen(" \b\b\b" + spinner[len(spinner) - 1 - (spinpos % len(spinner))]))
else:
sys.stdout.write(green("\b " + spinner[spinpos]))
sys.stdout.flush()
spinpos = (spinpos + 1) % (2 * len(spinner))
def update_spinner():
global spinner, spinpos
spinpos = (spinpos + 1) % len(spinner)
sys.stdout.write("\b\b " + spinner[spinpos])
sys.stdout.flush()<|fim▁end|> | |
<|file_name|>gotree.go<|end_file_name|><|fim▁begin|>// Package gotree create and print tree.
package gotree
import (
"strings"
)
const (
newLine = "\n"
emptySpace = " "
middleItem = "├── "
continueItem = "│ "
lastItem = "└── "
)
type (
tree struct {
text string
items []Tree
}
// Tree is tree interface
Tree interface {
Add(text string) Tree
AddTree(tree Tree)
Items() []Tree
Text() string
Print() string
}
printer struct {
}<|fim▁hole|> }
)
//New returns a new GoTree.Tree
func New(text string) Tree {
return &tree{
text: text,
items: []Tree{},
}
}
//Add adds a node to the tree
func (t *tree) Add(text string) Tree {
n := New(text)
t.items = append(t.items, n)
return n
}
//AddTree adds a tree as an item
func (t *tree) AddTree(tree Tree) {
t.items = append(t.items, tree)
}
//Text returns the node's value
func (t *tree) Text() string {
return t.text
}
//Items returns all items in the tree
func (t *tree) Items() []Tree {
return t.items
}
//Print returns an visual representation of the tree
func (t *tree) Print() string {
return newPrinter().Print(t)
}
func newPrinter() Printer {
return &printer{}
}
//Print prints a tree to a string
func (p *printer) Print(t Tree) string {
return t.Text() + newLine + p.printItems(t.Items(), []bool{})
}
func (p *printer) printText(text string, spaces []bool, last bool) string {
var result string
for _, space := range spaces {
if space {
result += emptySpace
} else {
result += continueItem
}
}
indicator := middleItem
if last {
indicator = lastItem
}
var out string
lines := strings.Split(text, "\n")
for i := range lines {
text := lines[i]
if i == 0 {
out += result + indicator + text + newLine
continue
}
if last {
indicator = emptySpace
} else {
indicator = continueItem
}
out += result + indicator + text + newLine
}
return out
}
func (p *printer) printItems(t []Tree, spaces []bool) string {
var result string
for i, f := range t {
last := i == len(t)-1
result += p.printText(f.Text(), spaces, last)
if len(f.Items()) > 0 {
spacesChild := append(spaces, last)
result += p.printItems(f.Items(), spacesChild)
}
}
return result
}<|fim▁end|> |
// Printer is printer interface
Printer interface {
Print(Tree) string |
<|file_name|>struct_grape_f_s_1_1___performance_operation.js<|end_file_name|><|fim▁begin|>var struct_grape_f_s_1_1___performance_operation =
[
[ "_PerformanceOperation", "struct_grape_f_s_1_1___performance_operation.html#a9251cc499a0ab5c9d2fe2762ca2eb7a2", null ],
[ "FillProc", "struct_grape_f_s_1_1___performance_operation.html#ac8a4badb2cb3594ba22870efc8d68434", null ],
[ "ToString", "struct_grape_f_s_1_1___performance_operation.html#a742c658271c3f69f124539db6e0c3f89", null ],
[ "ToString", "struct_grape_f_s_1_1___performance_operation.html#ad3427b318ecbca0944f3bf326abf4de5", null ],<|fim▁hole|> [ "identifier", "struct_grape_f_s_1_1___performance_operation.html#ab66edf64c9e08ae157f5f592ccd9bf95", null ],
[ "operation", "struct_grape_f_s_1_1___performance_operation.html#af6cedcbc967016a023da9b2348216b2c", null ],
[ "values", "struct_grape_f_s_1_1___performance_operation.html#a14e15664f2ea761afbe217b00162ece7", null ]
];<|fim▁end|> | |
<|file_name|>WebGLMultiview.d.ts<|end_file_name|><|fim▁begin|>import { Camera } from './../../cameras/Camera';
import { Object3D } from './../../core/Object3D';<|fim▁hole|>import { WebGLUniforms } from './WebGLUniforms';
export class WebGLMultiview {
constructor( renderer: WebGLRenderer, gl: WebGLRenderingContext );
isAvailable(): boolean;
attachCamera( camera: Camera ): void;
detachCamera( camera: Camera ): void;
updateCameraProjectionMatricesUniform( camera: Camera, uniforms: WebGLUniforms ): void;
updateCameraViewMatricesUniform( camera: Camera, uniforms: WebGLUniforms ): void;
updateObjectMatricesUniforms( object: Object3D, camera: Camera, uniforms: WebGLUniforms ): void;
}<|fim▁end|> | import { WebGLRenderer } from '../WebGLRenderer'; |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | """Contributed 'recipes'""" |
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render
from django.utils.html import escape
from django.views.decorators.csrf import ensure_csrf_cookie
from django.contrib.auth import authenticate, login, logout
import json
# Create your views here.
@ensure_csrf_cookie
def index(request):
return render(request, 'fin/index.html', {})
def table(request,ticker):
template_name='fin/table_'+ticker+'.html'<|fim▁hole|># Fill the type of user programmatically - TBD
return render(request, template_name, {'user_profile':'anonymous'})
#return render(request, template_name, {'user_profile':'nameduser'})
#return render(request, template_name, {'user_profile':'premiumuser'})
def jspractice(request):
return render(request, 'fin/js.html', {})
def dfcf_input_modify(request):
txt=""
for key in request.POST:
value = request.POST[key]
txt += str(key) + "::" + str(value) + "<br>"
txt += "<br><br>"
dat = request.POST['dfcf_ip_params']
jdat = json.loads(dat)
for key in jdat:
value = jdat[key]
txt += str(key) + "::" + str(value) + "<br>"
txt += "<br><br>"
for key in jdat:
rev_growth = float(jdat[key]['rev_growth'])
ear_growth = float(jdat[key]['earnings_growth'])
txt += str(key) + "::" + "revenue grows at" + str(100*rev_growth) + "% <br>"
txt += str(key) + "::" + "Earnings grow at" + str(100*ear_growth) + "% <br>"
txt += "<br><br>Changeset details<br><br>"
changeset = request.POST['dfcf_ip_changeset']
jchangeset = json.loads(changeset)
for key in jchangeset:
value = jchangeset[key]
txt += str(key) + "::" + str(value) + "<br>"
txt += "<br><br>"
txt += escape(repr(request))
return HttpResponse(txt)
# return HttpResponse(request.POST['fname'])
# caller should ensure it is a POST etc.
def fin_auth (request):
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return True
return False
@ensure_csrf_cookie
def dfcf_input(request, action="none"):
template_name='fin/dfcf_input_parameters.html'
u = request.user
if action == "logout":
logout(request)
return render(request, template_name, {'user_profile':'anonymous'})
if u.is_authenticated():
template_name = 'fin/'+u.username+'/dfcf_input_parameters.html'
return render(request, template_name, {'user_profile':'anonymous'})
if (request.method != 'POST'):
return render(request, template_name, {'user_profile':'anonymous'})
if (fin_auth(request)):
template_name='fin/'+request.POST.get('username')+'/dfcf_input_parameters.html'
return render(request, template_name, {'user_profile':'anonymous'})
#return render(request, template_name, {'user_profile':'nameduser'})
#return render(request, template_name, {'user_profile':'premiumuser'})<|fim▁end|> | |
<|file_name|>lexical_chains.py<|end_file_name|><|fim▁begin|>"""
lexical chain module for text tiling
"""
from tile_reader import TileReader
from scoring import boundarize, depth_scoring, window_diff
# ======================================================================================================================
# Main
# ======================================================================================================================
class LexicalChains(object):
def __init__(self):
self.sentences = []
self.actives = {}
self.gap_scores = []
self.boundary_vector = []
def analyze(self, sents, window=4, pos_filter=('PUNCT', 'SYM', 'SPACE', 'DET'), boundary_type='liberal'):
"""
Set attributes
:param sents: (list) spacy-analyzed sentences
:param window: (int) distance threshold within which chains are considered active
:param boundary_type: (str) 'liberal' or 'conservative' boundary scoring
:param pos_filter: (tuple) spacy pos_ labels to exclude (i.e. a pos-based stoplist)
:return: void
"""
self.sentences = self._preproc(sents, pos_filter)
self.actives = self._get_actives(self.sentences, window)
self.gap_scores = [len(self.actives[k]) for k in self.actives.keys()]
self.boundary_vector = self._get_boundaries(self.gap_scores, boundary_type)
@staticmethod
def _preproc(sentences, pos_filter):
"""
Filters out stop POSs and lemmatizes sentences
:param sentences: list of tokenized sentences in doc
:param pos_filter: tuple of spacy pos_ labels to filter out
:return: list
"""
filtered = [[tok for tok in sent if tok.pos_ not in pos_filter] for sent in sentences]
lemmatized = [[tok.lemma_ for tok in sent] for sent in filtered]
return lemmatized
@staticmethod
def _get_actives(sents, window):
"""
Get active lexical chains for each gap between sentences
:param sents: list of tokenized sentences
:param window: difference threshold over which lexical chains are considered active
:return: dictionary containing active lexical chains for each sentence transition
"""
# initialize active chains dictionary
actives = {}
for i in xrange(len(sents)-1):
actives[i] = set()
# loop over all sentences
for sent in sents:
# get index and unique tokens from current sentence
i = sents.index(sent)
uniques_i = set(sent)
# loop over all sentences within dist thresh of current<|fim▁hole|> diff -= 1
# find shared tokens between current sent[i] and sent[i+diff]
n = i + diff
uniques_n = set(sents[n])
intersection = uniques_i.intersection(uniques_n)
# add the intersections to all affected transitions between sent[i] and sent[i+diff]
for k in list(xrange(diff)):
[actives[i+k].add(word) for word in intersection]
return actives
@staticmethod
def _get_boundaries(scores, boundary_type):
"""
Calculate boundaries from gap scores
:param scores: list containing # of active chains across each sentence gap in doc
:param boundary_type: string indicating 'liberal' or 'conservative' boundary scoring
:return: list indicating which sentences in doc constitute beginnings of new topic tiles
"""
d_scores = depth_scoring(scores)
boundaries = boundarize(d_scores, type=boundary_type)
boundary_vector = [1] + [0 if i not in boundaries else 1 for i in xrange(len(scores))]
return boundary_vector
# ======================================================================================================================
# Test if invoked directly
# ======================================================================================================================
if __name__ == "__main__":
from decimal import Decimal
import matplotlib.pyplot as plt
import sys
import os
# set doc
try:
doc = sys.argv[1]
except IndexError:
sys.exit("ERROR: Expected 1 arg, got {}\nUsage: (python) lexical_chains.py <docname> <docpath>".format(
len(sys.argv)-1))
# get doc path
path = os.path.dirname(__file__)
if doc in ('coron','athens','chatham','cuba','merida'):
doc_path = os.path.join(path, os.path.join("data", "GUM_voyage_{}_noheads.txt".format(doc)))
else:
raise ValueError("unrecognized document: {}".format(doc))
# get gold
gold_file = os.path.join(path, os.path.join("data", "GUM_5_gold_tiles.txt"))
with open(gold_file) as f:
boundaries = [[int(x) for x in line.split(",")] for line in f.read().split()]
texts = ["athens", "chatham", "coron", "cuba", "merida"]
gold_dict = dict(zip(texts, boundaries))
gold = gold_dict[doc]
# Instantiate TileReader
reader = TileReader()
reader.read(doc_path, newline_tokenization=True)
sents = reader.sentences
# Instantiate Lexical Chains
chains = LexicalChains()
chains.analyze(sents)
# compare gold and predicted boundaries
print "GOLD: {}".format(gold)
print "MINE: {}".format(chains.boundary_vector)
# get window_diff
window_size = len(gold)/4
wdiff = window_diff(chains.boundary_vector, gold, window_size)
print "Window Diff: {}".format(wdiff)
# Plot scores
scores = [0] + chains.gap_scores
plt.plot([x for x in xrange(len(scores))], scores)
for index, grp in enumerate(zip(gold, chains.boundary_vector)):
if 1 == grp[0] == grp[1]:
plt.axvline(x=index, color = 'green', linewidth='2.0')
elif 1 == grp[0] != grp[1]:
plt.axvline(x=index, color = 'red')
elif 1 == grp[1] != grp[0]:
plt.axvline(x=index, color = 'gray')
ymin, ymax = plt.ylim()
xmin, xmax = plt.xlim()
wdiff_rounded = round(Decimal(wdiff), 3)
plt.text(xmax-(xmax-xmin)/4,ymax+0.5, "window diff: {}".format(wdiff_rounded))
plt.show()<|fim▁end|> | for diff in xrange(window, 0, -1):
# back off diff when there are less sentences left than dist thresh
while not i + diff < len(sents): |
<|file_name|>counts.py<|end_file_name|><|fim▁begin|>import logging
import time
from collections import OrderedDict, defaultdict
from datetime import datetime, timedelta
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
from django.conf import settings
from django.db import connection
from django.db.models import F
from psycopg2.sql import SQL, Composable, Identifier, Literal
from analytics.models import (
BaseCount,
FillState,
InstallationCount,
RealmCount,
StreamCount,
UserCount,
installation_epoch,
last_successful_fill,
)
from zerver.lib.logging_util import log_to_file
from zerver.lib.timestamp import ceiling_to_day, ceiling_to_hour, floor_to_hour, verify_UTC
from zerver.models import (
Message,
Realm,
RealmAuditLog,
Stream,
UserActivityInterval,
UserProfile,
models,
)
## Logging setup ##
logger = logging.getLogger('zulip.management')
log_to_file(logger, settings.ANALYTICS_LOG_PATH)
# You can't subtract timedelta.max from a datetime, so use this instead
TIMEDELTA_MAX = timedelta(days=365*1000)
## Class definitions ##
class CountStat:
HOUR = 'hour'
DAY = 'day'
FREQUENCIES = frozenset([HOUR, DAY])
def __init__(self, property: str, data_collector: 'DataCollector', frequency: str,
interval: Optional[timedelta]=None) -> None:
self.property = property
self.data_collector = data_collector
# might have to do something different for bitfields
if frequency not in self.FREQUENCIES:
raise AssertionError(f"Unknown frequency: {frequency}")
self.frequency = frequency
if interval is not None:
self.interval = interval
elif frequency == CountStat.HOUR:
self.interval = timedelta(hours=1)
else: # frequency == CountStat.DAY
self.interval = timedelta(days=1)
def __str__(self) -> str:
return f"<CountStat: {self.property}>"
class LoggingCountStat(CountStat):
def __init__(self, property: str, output_table: Type[BaseCount], frequency: str) -> None:
CountStat.__init__(self, property, DataCollector(output_table, None), frequency)
class DependentCountStat(CountStat):
def __init__(self, property: str, data_collector: 'DataCollector', frequency: str,
interval: Optional[timedelta] = None, dependencies: Sequence[str] = []) -> None:
CountStat.__init__(self, property, data_collector, frequency, interval=interval)
self.dependencies = dependencies
class DataCollector:
def __init__(self, output_table: Type[BaseCount],
pull_function: Optional[Callable[[str, datetime, datetime, Optional[Realm]], int]]) -> None:
self.output_table = output_table
self.pull_function = pull_function
## CountStat-level operations ##
def process_count_stat(stat: CountStat, fill_to_time: datetime,
realm: Optional[Realm]=None) -> None:
# TODO: The realm argument is not yet supported, in that we don't
# have a solution for how to update FillState if it is passed. It
# exists solely as partial plumbing for when we do fully implement
# doing single-realm analytics runs for use cases like data import.
#
# Also, note that for the realm argument to be properly supported,
# the CountStat object passed in needs to have come from
# E.g. get_count_stats(realm), i.e. have the realm_id already
# entered into the SQL query defined by the CountState object.
if stat.frequency == CountStat.HOUR:
time_increment = timedelta(hours=1)
elif stat.frequency == CountStat.DAY:
time_increment = timedelta(days=1)
else:
raise AssertionError(f"Unknown frequency: {stat.frequency}")
verify_UTC(fill_to_time)
if floor_to_hour(fill_to_time) != fill_to_time:
raise ValueError(f"fill_to_time must be on an hour boundary: {fill_to_time}")
fill_state = FillState.objects.filter(property=stat.property).first()
if fill_state is None:
currently_filled = installation_epoch()
fill_state = FillState.objects.create(property=stat.property,
end_time=currently_filled,
state=FillState.DONE)
logger.info("INITIALIZED %s %s", stat.property, currently_filled)
elif fill_state.state == FillState.STARTED:
logger.info("UNDO START %s %s", stat.property, fill_state.end_time)
do_delete_counts_at_hour(stat, fill_state.end_time)
currently_filled = fill_state.end_time - time_increment
do_update_fill_state(fill_state, currently_filled, FillState.DONE)
logger.info("UNDO DONE %s", stat.property)
elif fill_state.state == FillState.DONE:
currently_filled = fill_state.end_time
else:
raise AssertionError(f"Unknown value for FillState.state: {fill_state.state}.")
if isinstance(stat, DependentCountStat):
for dependency in stat.dependencies:
dependency_fill_time = last_successful_fill(dependency)
if dependency_fill_time is None:
logger.warning("DependentCountStat %s run before dependency %s.",
stat.property, dependency)
return
fill_to_time = min(fill_to_time, dependency_fill_time)
currently_filled = currently_filled + time_increment
while currently_filled <= fill_to_time:
logger.info("START %s %s", stat.property, currently_filled)
start = time.time()
do_update_fill_state(fill_state, currently_filled, FillState.STARTED)
do_fill_count_stat_at_hour(stat, currently_filled, realm)
do_update_fill_state(fill_state, currently_filled, FillState.DONE)
end = time.time()
currently_filled = currently_filled + time_increment
logger.info("DONE %s (%dms)", stat.property, (end-start)*1000)
def do_update_fill_state(fill_state: FillState, end_time: datetime, state: int) -> None:
fill_state.end_time = end_time
fill_state.state = state
fill_state.save()
# We assume end_time is valid (e.g. is on a day or hour boundary as appropriate)
# and is timezone aware. It is the caller's responsibility to enforce this!
def do_fill_count_stat_at_hour(stat: CountStat, end_time: datetime, realm: Optional[Realm]=None) -> None:
start_time = end_time - stat.interval
if not isinstance(stat, LoggingCountStat):
timer = time.time()
assert(stat.data_collector.pull_function is not None)
rows_added = stat.data_collector.pull_function(stat.property, start_time, end_time, realm)
logger.info("%s run pull_function (%dms/%sr)",
stat.property, (time.time()-timer)*1000, rows_added)
do_aggregate_to_summary_table(stat, end_time, realm)
def do_delete_counts_at_hour(stat: CountStat, end_time: datetime) -> None:
if isinstance(stat, LoggingCountStat):
InstallationCount.objects.filter(property=stat.property, end_time=end_time).delete()
if stat.data_collector.output_table in [UserCount, StreamCount]:
RealmCount.objects.filter(property=stat.property, end_time=end_time).delete()
else:
UserCount.objects.filter(property=stat.property, end_time=end_time).delete()
StreamCount.objects.filter(property=stat.property, end_time=end_time).delete()
RealmCount.objects.filter(property=stat.property, end_time=end_time).delete()
InstallationCount.objects.filter(property=stat.property, end_time=end_time).delete()
def do_aggregate_to_summary_table(stat: CountStat, end_time: datetime,
realm: Optional[Realm]=None) -> None:
cursor = connection.cursor()
# Aggregate into RealmCount
output_table = stat.data_collector.output_table
if realm is not None:
realm_clause = SQL("AND zerver_realm.id = {}").format(Literal(realm.id))
else:
realm_clause = SQL("")
if output_table in (UserCount, StreamCount):
realmcount_query = SQL("""
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
zerver_realm.id, COALESCE(sum({output_table}.value), 0), %(property)s,
{output_table}.subgroup, %(end_time)s
FROM zerver_realm
JOIN {output_table}
ON
zerver_realm.id = {output_table}.realm_id
WHERE
{output_table}.property = %(property)s AND
{output_table}.end_time = %(end_time)s
{realm_clause}
GROUP BY zerver_realm.id, {output_table}.subgroup
""").format(
output_table=Identifier(output_table._meta.db_table),
realm_clause=realm_clause,
)
start = time.time()
cursor.execute(realmcount_query, {
'property': stat.property,
'end_time': end_time,
})
end = time.time()
logger.info(
"%s RealmCount aggregation (%dms/%sr)",
stat.property, (end - start) * 1000, cursor.rowcount,
)
if realm is None:
# Aggregate into InstallationCount. Only run if we just
# processed counts for all realms.
#
# TODO: Add support for updating installation data after
# changing an individual realm's values.
installationcount_query = SQL("""
INSERT INTO analytics_installationcount
(value, property, subgroup, end_time)
SELECT
sum(value), %(property)s, analytics_realmcount.subgroup, %(end_time)s
FROM analytics_realmcount
WHERE
property = %(property)s AND
end_time = %(end_time)s
GROUP BY analytics_realmcount.subgroup
""")
start = time.time()
cursor.execute(installationcount_query, {
'property': stat.property,
'end_time': end_time,
})
end = time.time()
logger.info(
"%s InstallationCount aggregation (%dms/%sr)",
stat.property, (end - start) * 1000, cursor.rowcount,
)
cursor.close()
## Utility functions called from outside counts.py ##
# called from zerver/lib/actions.py; should not throw any errors
def do_increment_logging_stat(zerver_object: Union[Realm, UserProfile, Stream], stat: CountStat,
subgroup: Optional[Union[str, int, bool]], event_time: datetime,
increment: int=1) -> None:
if not increment:
return
table = stat.data_collector.output_table
if table == RealmCount:
id_args = {'realm': zerver_object}
elif table == UserCount:
id_args = {'realm': zerver_object.realm, 'user': zerver_object}
else: # StreamCount
id_args = {'realm': zerver_object.realm, 'stream': zerver_object}
if stat.frequency == CountStat.DAY:
end_time = ceiling_to_day(event_time)
else: # CountStat.HOUR:
end_time = ceiling_to_hour(event_time)
row, created = table.objects.get_or_create(
property=stat.property, subgroup=subgroup, end_time=end_time,
defaults={'value': increment}, **id_args)
if not created:
row.value = F('value') + increment
row.save(update_fields=['value'])
def do_drop_all_analytics_tables() -> None:
UserCount.objects.all().delete()
StreamCount.objects.all().delete()
RealmCount.objects.all().delete()
InstallationCount.objects.all().delete()
FillState.objects.all().delete()
def do_drop_single_stat(property: str) -> None:
UserCount.objects.filter(property=property).delete()
StreamCount.objects.filter(property=property).delete()
RealmCount.objects.filter(property=property).delete()
InstallationCount.objects.filter(property=property).delete()
FillState.objects.filter(property=property).delete()
## DataCollector-level operations ##
QueryFn = Callable[[Dict[str, Composable]], Composable]
def do_pull_by_sql_query(
property: str,
start_time: datetime,
end_time: datetime,
query: QueryFn,
group_by: Optional[Tuple[models.Model, str]],
) -> int:
if group_by is None:<|fim▁hole|> group_by_clause = SQL(', {}').format(subgroup)
# We do string replacement here because cursor.execute will reject a
# group_by_clause given as a param.
# We pass in the datetimes as params to cursor.execute so that we don't have to
# think about how to convert python datetimes to SQL datetimes.
query_ = query({
'subgroup': subgroup,
'group_by_clause': group_by_clause,
})
cursor = connection.cursor()
cursor.execute(query_, {
'property': property,
'time_start': start_time,
'time_end': end_time,
})
rowcount = cursor.rowcount
cursor.close()
return rowcount
def sql_data_collector(
output_table: Type[BaseCount],
query: QueryFn,
group_by: Optional[Tuple[models.Model, str]],
) -> DataCollector:
def pull_function(property: str, start_time: datetime, end_time: datetime,
realm: Optional[Realm] = None) -> int:
# The pull function type needs to accept a Realm argument
# because the 'minutes_active::day' CountStat uses
# DataCollector directly for do_pull_minutes_active, which
# requires the realm argument. We ignore it here, because the
# realm should have been already encoded in the `query` we're
# passed.
return do_pull_by_sql_query(property, start_time, end_time, query, group_by)
return DataCollector(output_table, pull_function)
def do_pull_minutes_active(property: str, start_time: datetime, end_time: datetime,
realm: Optional[Realm] = None) -> int:
user_activity_intervals = UserActivityInterval.objects.filter(
end__gt=start_time, start__lt=end_time,
).select_related(
'user_profile',
).values_list(
'user_profile_id', 'user_profile__realm_id', 'start', 'end')
seconds_active: Dict[Tuple[int, int], float] = defaultdict(float)
for user_id, realm_id, interval_start, interval_end in user_activity_intervals:
if realm is None or realm.id == realm_id:
start = max(start_time, interval_start)
end = min(end_time, interval_end)
seconds_active[(user_id, realm_id)] += (end - start).total_seconds()
rows = [UserCount(user_id=ids[0], realm_id=ids[1], property=property,
end_time=end_time, value=int(seconds // 60))
for ids, seconds in seconds_active.items() if seconds >= 60]
UserCount.objects.bulk_create(rows)
return len(rows)
def count_message_by_user_query(realm: Optional[Realm]) -> QueryFn:
if realm is None:
realm_clause = SQL("")
else:
realm_clause = SQL("zerver_userprofile.realm_id = {} AND").format(Literal(realm.id))
return lambda kwargs: SQL("""
INSERT INTO analytics_usercount
(user_id, realm_id, value, property, subgroup, end_time)
SELECT
zerver_userprofile.id, zerver_userprofile.realm_id, count(*),
%(property)s, {subgroup}, %(time_end)s
FROM zerver_userprofile
JOIN zerver_message
ON
zerver_userprofile.id = zerver_message.sender_id
WHERE
zerver_userprofile.date_joined < %(time_end)s AND
zerver_message.date_sent >= %(time_start)s AND
{realm_clause}
zerver_message.date_sent < %(time_end)s
GROUP BY zerver_userprofile.id {group_by_clause}
""").format(**kwargs, realm_clause=realm_clause)
# Note: ignores the group_by / group_by_clause.
def count_message_type_by_user_query(realm: Optional[Realm]) -> QueryFn:
if realm is None:
realm_clause = SQL("")
else:
realm_clause = SQL("zerver_userprofile.realm_id = {} AND").format(Literal(realm.id))
return lambda kwargs: SQL("""
INSERT INTO analytics_usercount
(realm_id, user_id, value, property, subgroup, end_time)
SELECT realm_id, id, SUM(count) AS value, %(property)s, message_type, %(time_end)s
FROM
(
SELECT zerver_userprofile.realm_id, zerver_userprofile.id, count(*),
CASE WHEN
zerver_recipient.type = 1 THEN 'private_message'
WHEN
zerver_recipient.type = 3 THEN 'huddle_message'
WHEN
zerver_stream.invite_only = TRUE THEN 'private_stream'
ELSE 'public_stream'
END
message_type
FROM zerver_userprofile
JOIN zerver_message
ON
zerver_userprofile.id = zerver_message.sender_id AND
zerver_message.date_sent >= %(time_start)s AND
{realm_clause}
zerver_message.date_sent < %(time_end)s
JOIN zerver_recipient
ON
zerver_message.recipient_id = zerver_recipient.id
LEFT JOIN zerver_stream
ON
zerver_recipient.type_id = zerver_stream.id
GROUP BY
zerver_userprofile.realm_id, zerver_userprofile.id,
zerver_recipient.type, zerver_stream.invite_only
) AS subquery
GROUP BY realm_id, id, message_type
""").format(**kwargs, realm_clause=realm_clause)
# This query joins to the UserProfile table since all current queries that
# use this also subgroup on UserProfile.is_bot. If in the future there is a
# stat that counts messages by stream and doesn't need the UserProfile
# table, consider writing a new query for efficiency.
def count_message_by_stream_query(realm: Optional[Realm]) -> QueryFn:
if realm is None:
realm_clause = SQL("")
else:
realm_clause = SQL("zerver_stream.realm_id = {} AND").format(Literal(realm.id))
return lambda kwargs: SQL("""
INSERT INTO analytics_streamcount
(stream_id, realm_id, value, property, subgroup, end_time)
SELECT
zerver_stream.id, zerver_stream.realm_id, count(*), %(property)s, {subgroup}, %(time_end)s
FROM zerver_stream
JOIN zerver_recipient
ON
zerver_stream.id = zerver_recipient.type_id
JOIN zerver_message
ON
zerver_recipient.id = zerver_message.recipient_id
JOIN zerver_userprofile
ON
zerver_message.sender_id = zerver_userprofile.id
WHERE
zerver_stream.date_created < %(time_end)s AND
zerver_recipient.type = 2 AND
zerver_message.date_sent >= %(time_start)s AND
{realm_clause}
zerver_message.date_sent < %(time_end)s
GROUP BY zerver_stream.id {group_by_clause}
""").format(**kwargs, realm_clause=realm_clause)
# Hardcodes the query needed by active_users:is_bot:day, since that is
# currently the only stat that uses this.
def count_user_by_realm_query(realm: Optional[Realm]) -> QueryFn:
if realm is None:
realm_clause = SQL("")
else:
realm_clause = SQL("zerver_userprofile.realm_id = {} AND").format(Literal(realm.id))
return lambda kwargs: SQL("""
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
zerver_realm.id, count(*), %(property)s, {subgroup}, %(time_end)s
FROM zerver_realm
JOIN zerver_userprofile
ON
zerver_realm.id = zerver_userprofile.realm_id
WHERE
zerver_realm.date_created < %(time_end)s AND
zerver_userprofile.date_joined >= %(time_start)s AND
zerver_userprofile.date_joined < %(time_end)s AND
{realm_clause}
zerver_userprofile.is_active = TRUE
GROUP BY zerver_realm.id {group_by_clause}
""").format(**kwargs, realm_clause=realm_clause)
# Currently hardcodes the query needed for active_users_audit:is_bot:day.
# Assumes that a user cannot have two RealmAuditLog entries with the same event_time and
# event_type in [RealmAuditLog.USER_CREATED, USER_DEACTIVATED, etc].
# In particular, it's important to ensure that migrations don't cause that to happen.
def check_realmauditlog_by_user_query(realm: Optional[Realm]) -> QueryFn:
if realm is None:
realm_clause = SQL("")
else:
realm_clause = SQL("realm_id = {} AND").format(Literal(realm.id))
return lambda kwargs: SQL("""
INSERT INTO analytics_usercount
(user_id, realm_id, value, property, subgroup, end_time)
SELECT
ral1.modified_user_id, ral1.realm_id, 1, %(property)s, {subgroup}, %(time_end)s
FROM zerver_realmauditlog ral1
JOIN (
SELECT modified_user_id, max(event_time) AS max_event_time
FROM zerver_realmauditlog
WHERE
event_type in ({user_created}, {user_activated}, {user_deactivated}, {user_reactivated}) AND
{realm_clause}
event_time < %(time_end)s
GROUP BY modified_user_id
) ral2
ON
ral1.event_time = max_event_time AND
ral1.modified_user_id = ral2.modified_user_id
JOIN zerver_userprofile
ON
ral1.modified_user_id = zerver_userprofile.id
WHERE
ral1.event_type in ({user_created}, {user_activated}, {user_reactivated})
""").format(
**kwargs,
user_created=Literal(RealmAuditLog.USER_CREATED),
user_activated=Literal(RealmAuditLog.USER_ACTIVATED),
user_deactivated=Literal(RealmAuditLog.USER_DEACTIVATED),
user_reactivated=Literal(RealmAuditLog.USER_REACTIVATED),
realm_clause=realm_clause,
)
def check_useractivityinterval_by_user_query(realm: Optional[Realm]) -> QueryFn:
if realm is None:
realm_clause = SQL("")
else:
realm_clause = SQL("zerver_userprofile.realm_id = {} AND").format(Literal(realm.id))
return lambda kwargs: SQL("""
INSERT INTO analytics_usercount
(user_id, realm_id, value, property, subgroup, end_time)
SELECT
zerver_userprofile.id, zerver_userprofile.realm_id, 1, %(property)s, {subgroup}, %(time_end)s
FROM zerver_userprofile
JOIN zerver_useractivityinterval
ON
zerver_userprofile.id = zerver_useractivityinterval.user_profile_id
WHERE
zerver_useractivityinterval.end >= %(time_start)s AND
{realm_clause}
zerver_useractivityinterval.start < %(time_end)s
GROUP BY zerver_userprofile.id {group_by_clause}
""").format(**kwargs, realm_clause=realm_clause)
def count_realm_active_humans_query(realm: Optional[Realm]) -> QueryFn:
if realm is None:
realm_clause = SQL("")
else:
realm_clause = SQL("realm_id = {} AND").format(Literal(realm.id))
return lambda kwargs: SQL("""
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
usercount1.realm_id, count(*), %(property)s, NULL, %(time_end)s
FROM (
SELECT realm_id, user_id
FROM analytics_usercount
WHERE
property = 'active_users_audit:is_bot:day' AND
subgroup = 'false' AND
{realm_clause}
end_time = %(time_end)s
) usercount1
JOIN (
SELECT realm_id, user_id
FROM analytics_usercount
WHERE
property = '15day_actives::day' AND
{realm_clause}
end_time = %(time_end)s
) usercount2
ON
usercount1.user_id = usercount2.user_id
GROUP BY usercount1.realm_id
""").format(**kwargs, realm_clause=realm_clause)
# Currently unused and untested
count_stream_by_realm_query = lambda kwargs: SQL("""
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
zerver_realm.id, count(*), %(property)s, {subgroup}, %(time_end)s
FROM zerver_realm
JOIN zerver_stream
ON
zerver_realm.id = zerver_stream.realm_id AND
WHERE
zerver_realm.date_created < %(time_end)s AND
zerver_stream.date_created >= %(time_start)s AND
zerver_stream.date_created < %(time_end)s
GROUP BY zerver_realm.id {group_by_clause}
""").format(**kwargs)
def get_count_stats(realm: Optional[Realm]=None) -> Dict[str, CountStat]:
## CountStat declarations ##
count_stats_ = [
# Messages Sent stats
# Stats that count the number of messages sent in various ways.
# These are also the set of stats that read from the Message table.
CountStat('messages_sent:is_bot:hour',
sql_data_collector(UserCount, count_message_by_user_query(
realm), (UserProfile, 'is_bot')),
CountStat.HOUR),
CountStat('messages_sent:message_type:day',
sql_data_collector(
UserCount, count_message_type_by_user_query(realm), None),
CountStat.DAY),
CountStat('messages_sent:client:day',
sql_data_collector(UserCount, count_message_by_user_query(realm),
(Message, 'sending_client_id')), CountStat.DAY),
CountStat('messages_in_stream:is_bot:day',
sql_data_collector(StreamCount, count_message_by_stream_query(realm),
(UserProfile, 'is_bot')), CountStat.DAY),
# Number of Users stats
# Stats that count the number of active users in the UserProfile.is_active sense.
# 'active_users_audit:is_bot:day' is the canonical record of which users were
# active on which days (in the UserProfile.is_active sense).
# Important that this stay a daily stat, so that 'realm_active_humans::day' works as expected.
CountStat('active_users_audit:is_bot:day',
sql_data_collector(UserCount, check_realmauditlog_by_user_query(
realm), (UserProfile, 'is_bot')),
CountStat.DAY),
# Important note: LoggingCountStat objects aren't passed the
# Realm argument, because by nature they have a logging
# structure, not a pull-from-database structure, so there's no
# way to compute them for a single realm after the fact (the
# use case for passing a Realm argument).
# Sanity check on 'active_users_audit:is_bot:day', and a archetype for future LoggingCountStats.
# In RealmCount, 'active_users_audit:is_bot:day' should be the partial
# sum sequence of 'active_users_log:is_bot:day', for any realm that
# started after the latter stat was introduced.
LoggingCountStat('active_users_log:is_bot:day',
RealmCount, CountStat.DAY),
# Another sanity check on 'active_users_audit:is_bot:day'. Is only an
# approximation, e.g. if a user is deactivated between the end of the
# day and when this stat is run, they won't be counted. However, is the
# simplest of the three to inspect by hand.
CountStat('active_users:is_bot:day',
sql_data_collector(RealmCount, count_user_by_realm_query(realm), (UserProfile, 'is_bot')),
CountStat.DAY, interval=TIMEDELTA_MAX),
# Messages read stats. messages_read::hour is the total
# number of messages read, whereas
# messages_read_interactions::hour tries to count the total
# number of UI interactions resulting in messages being marked
# as read (imperfect because of batching of some request
# types, but less likely to be overwhelmed by a single bulk
# operation).
LoggingCountStat('messages_read::hour', UserCount, CountStat.HOUR),
LoggingCountStat('messages_read_interactions::hour', UserCount, CountStat.HOUR),
# User Activity stats
# Stats that measure user activity in the UserActivityInterval sense.
CountStat('1day_actives::day',
sql_data_collector(
UserCount, check_useractivityinterval_by_user_query(realm), None),
CountStat.DAY, interval=timedelta(days=1)-UserActivityInterval.MIN_INTERVAL_LENGTH),
CountStat('15day_actives::day',
sql_data_collector(
UserCount, check_useractivityinterval_by_user_query(realm), None),
CountStat.DAY, interval=timedelta(days=15)-UserActivityInterval.MIN_INTERVAL_LENGTH),
CountStat('minutes_active::day', DataCollector(
UserCount, do_pull_minutes_active), CountStat.DAY),
# Rate limiting stats
# Used to limit the number of invitation emails sent by a realm
LoggingCountStat('invites_sent::day', RealmCount, CountStat.DAY),
# Dependent stats
# Must come after their dependencies.
# Canonical account of the number of active humans in a realm on each day.
DependentCountStat('realm_active_humans::day',
sql_data_collector(
RealmCount, count_realm_active_humans_query(realm), None),
CountStat.DAY,
dependencies=['active_users_audit:is_bot:day', '15day_actives::day']),
]
return OrderedDict([(stat.property, stat) for stat in count_stats_])
# To avoid refactoring for now COUNT_STATS can be used as before
COUNT_STATS = get_count_stats()<|fim▁end|> | subgroup = SQL('NULL')
group_by_clause = SQL('')
else:
subgroup = Identifier(group_by[0]._meta.db_table, group_by[1]) |
<|file_name|>trace_report.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""
"""
__version__ = "$Id$"
#end_pymotw_header<|fim▁hole|>tracer = trace.Trace(count=True, trace=False, outfile='trace_report.dat')
tracer.runfunc(recurse, 2)
report_tracer = trace.Trace(count=False, trace=False, infile='trace_report.dat')
results = tracer.results()
results.write_results(summary=True, coverdir='/tmp')<|fim▁end|> |
import trace
from trace_example.recurse import recurse
|
<|file_name|>inherited_svg.mako.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
// SVG 1.1 (Second Edition)
// https://www.w3.org/TR/SVG/
<% data.new_style_struct("InheritedSVG",
inherited=True,
gecko_name="SVG") %>
// Section 10 - Text
${helpers.single_keyword("text-anchor",
"start middle end",
products="gecko",
animation_value_type="discrete",
spec="https://www.w3.org/TR/SVG/text.html#TextAnchorProperty")}
// Section 11 - Painting: Filling, Stroking and Marker Symbols
${helpers.single_keyword("color-interpolation",
"srgb auto linearrgb",
products="gecko",
animation_value_type="discrete",
spec="https://www.w3.org/TR/SVG11/painting.html#ColorInterpolationProperty")}
${helpers.single_keyword("color-interpolation-filters", "linearrgb auto srgb",
products="gecko",
gecko_constant_prefix="NS_STYLE_COLOR_INTERPOLATION",
animation_value_type="discrete",
spec="https://www.w3.org/TR/SVG11/painting.html#ColorInterpolationFiltersProperty")}
${helpers.predefined_type(
"fill", "SVGPaint",
"::values::computed::SVGPaint::black()",
products="gecko",
animation_value_type="IntermediateSVGPaint",
boxed=True,
spec="https://www.w3.org/TR/SVG2/painting.html#SpecifyingFillPaint")}
${helpers.predefined_type("fill-opacity", "SVGOpacity", "Default::default()",
products="gecko", animation_value_type="ComputedValue",
spec="https://www.w3.org/TR/SVG11/painting.html#FillOpacityProperty")}
${helpers.single_keyword("fill-rule", "nonzero evenodd",
gecko_enum_prefix="StyleFillRule",
products="gecko", animation_value_type="discrete",
spec="https://www.w3.org/TR/SVG11/painting.html#FillRuleProperty")}
${helpers.single_keyword("shape-rendering",
"auto optimizespeed crispedges geometricprecision",
products="gecko",
animation_value_type="discrete",
spec="https://www.w3.org/TR/SVG11/painting.html#ShapeRenderingProperty")}
${helpers.predefined_type(
"stroke", "SVGPaint",
"Default::default()",
products="gecko",
animation_value_type="IntermediateSVGPaint",
boxed=True,
spec="https://www.w3.org/TR/SVG2/painting.html#SpecifyingStrokePaint")}
${helpers.predefined_type(
"stroke-width", "SVGWidth",
"::values::computed::NonNegativeLength::new(1.).into()",
products="gecko",
boxed=not RUSTC_HAS_PR45225,
animation_value_type="::values::computed::SVGWidth",
spec="https://www.w3.org/TR/SVG2/painting.html#StrokeWidth")}
${helpers.single_keyword("stroke-linecap", "butt round square",
products="gecko", animation_value_type="discrete",
spec="https://www.w3.org/TR/SVG11/painting.html#StrokeLinecapProperty")}
${helpers.single_keyword("stroke-linejoin", "miter round bevel",
products="gecko", animation_value_type="discrete",
spec="https://www.w3.org/TR/SVG11/painting.html#StrokeLinejoinProperty")}
${helpers.predefined_type("stroke-miterlimit", "GreaterThanOrEqualToOneNumber",
"From::from(4.0)",
products="gecko",
animation_value_type="::values::computed::GreaterThanOrEqualToOneNumber",
spec="https://www.w3.org/TR/SVG11/painting.html#StrokeMiterlimitProperty")}
${helpers.predefined_type("stroke-opacity", "SVGOpacity", "Default::default()",
products="gecko", animation_value_type="ComputedValue",
spec="https://www.w3.org/TR/SVG11/painting.html#StrokeOpacityProperty")}
${helpers.predefined_type(
"stroke-dasharray",
"SVGStrokeDashArray",
"Default::default()",
products="gecko",
animation_value_type="::values::computed::SVGStrokeDashArray",
spec="https://www.w3.org/TR/SVG2/painting.html#StrokeDashing",
)}
${helpers.predefined_type(
"stroke-dashoffset", "SVGLength",
"Au(0).into()",
products="gecko",
boxed=not RUSTC_HAS_PR45225,
animation_value_type="ComputedValue",
spec="https://www.w3.org/TR/SVG2/painting.html#StrokeDashing")}
<|fim▁hole|>// Section 14 - Clipping, Masking and Compositing
${helpers.single_keyword("clip-rule", "nonzero evenodd",
products="gecko",
gecko_enum_prefix="StyleFillRule",
animation_value_type="discrete",
spec="https://www.w3.org/TR/SVG11/masking.html#ClipRuleProperty")}
${helpers.predefined_type("marker-start", "UrlOrNone", "Either::Second(None_)",
products="gecko",
boxed= product == "gecko",
animation_value_type="discrete",
spec="https://www.w3.org/TR/SVG2/painting.html#VertexMarkerProperties")}
${helpers.predefined_type("marker-mid", "UrlOrNone", "Either::Second(None_)",
products="gecko",
boxed= product == "gecko",
animation_value_type="discrete",
spec="https://www.w3.org/TR/SVG2/painting.html#VertexMarkerProperties")}
${helpers.predefined_type("marker-end", "UrlOrNone", "Either::Second(None_)",
products="gecko",
boxed= product == "gecko",
animation_value_type="discrete",
spec="https://www.w3.org/TR/SVG2/painting.html#VertexMarkerProperties")}
${helpers.predefined_type("paint-order", "SVGPaintOrder", "computed::SVGPaintOrder::normal()",
products="gecko",
animation_value_type="discrete",
spec="https://www.w3.org/TR/SVG2/painting.html#PaintOrder")}
${helpers.predefined_type("-moz-context-properties",
"MozContextProperties",
initial_value=None,
vector=True,
animation_value_type="none",
products="gecko",
spec="Nonstandard (https://developer.mozilla.org/en-US/docs/Web/CSS/-moz-context-properties)",
allow_empty=True)}<|fim▁end|> | |
<|file_name|>0009_playlistitem_created_at.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('app', '0008_playlistitem_network'),<|fim▁hole|> ]
operations = [
migrations.AddField(
model_name='playlistitem',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2014, 10, 6, 10, 0, 29, 893833), auto_now_add=True),
preserve_default=False,
),
]<|fim▁end|> | |
<|file_name|>index.ts<|end_file_name|><|fim▁begin|>namespace webgl_2d_geometry_matrix_transform_with_projection {
function main() {
// Get A WebGL context
/** @type {HTMLCanvasElement} */
let canvas = <HTMLCanvasElement>document.getElementById("canvas");
webglLessonsHelper.setupLesson(canvas);
let gl = canvas.getContext("webgl");
if (!gl) {
webglLessonsHelper.showNeedWebGL(canvas);
return;
}
// setup GLSL program
let program = webglUtils.createProgramFromScripts(gl, ["2d-vertex-shader", "2d-fragment-shader"]);
gl.useProgram(program);
// look up where the vertex data needs to go.
let positionLocation = gl.getAttribLocation(program, "a_position");
// lookup uniforms
let resolutionLocation = gl.getUniformLocation(program, "u_resolution");
let colorLocation = gl.getUniformLocation(program, "u_color");
let matrixLocation = gl.getUniformLocation(program, "u_matrix");
// Create a buffer to put positions in
let positionBuffer = gl.createBuffer();
// Bind it to ARRAY_BUFFER (think of it as ARRAY_BUFFER = positionBuffer)
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
// Put geometry data into buffer
setGeometry(gl);
let scale = new Float32Array([1, 1]);
let angleInRadians = 0;
let translation = new Float32Array([200, 150]);
let width = 100;
let height = 30;
let color = new Float32Array([Math.random(), Math.random(), Math.random(), 1]);
drawScene();
// Setup a ui.
webglLessonsHelper.setupSlider("#x", { value: translation[0], slide: updatePosition(0), max: gl.canvas.width });
webglLessonsHelper.setupSlider("#y", { value: translation[1], slide: updatePosition(1), max: gl.canvas.height });
webglLessonsHelper.setupSlider("#angle", { slide: updateAngle, max: 360 });
webglLessonsHelper.setupSlider("#scaleX", { value: scale[0], slide: updateScale(0), min: -5, max: 5, step: 0.01, precision: 2 });
webglLessonsHelper.setupSlider("#scaleY", { value: scale[1], slide: updateScale(1), min: -5, max: 5, step: 0.01, precision: 2 });
function updateAngle(event, ui) {
let angleInDegrees = 360 - ui.value;
angleInRadians = angleInDegrees * Math.PI / 180;
drawScene();
}
function updateScale(index) {
return function (event, ui) {
scale[index] = ui.value;
drawScene();
}
}
function updatePosition(index) {
return function (event, ui) {
translation[index] = ui.value;
drawScene();
}
}
// Draw a the scene.
function drawScene() {
webglUtils.resizeCanvasToDisplaySize(gl.canvas);
// Tell WebGL how to convert from clip space to pixels
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
// Clear the canvas.
gl.clear(gl.COLOR_BUFFER_BIT);
// Tell it to use our program (pair of shaders)
gl.useProgram(program);
// Turn on the attribute
gl.enableVertexAttribArray(positionLocation);
// Bind the position buffer.
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
// Tell the attribute how to get data out of positionBuffer (ARRAY_BUFFER)
let size = 2; // 2 components per iteration
let type = gl.FLOAT; // the data is 32bit floats
let normalize = false; // don't normalize the data
let stride = 0; // 0 = move forward size * sizeof(type) each iteration to get the next position
let offset = 0; // start at the beginning of the buffer<|fim▁hole|> gl.uniform2f(resolutionLocation, gl.canvas.width, gl.canvas.height);
// set the color
gl.uniform4fv(colorLocation, color);
let projectionMatrix = m3.projection(
gl.canvas.clientWidth, gl.canvas.clientHeight);
// Compute the matrix
let matrix = m3.projection(gl.canvas.clientWidth, gl.canvas.clientHeight);
matrix = m3.translate(matrix, translation[0], translation[1]);
matrix = m3.rotate(matrix, angleInRadians);
matrix = m3.scale(matrix, scale[0], scale[1]);
// Set the matrix.
gl.uniformMatrix3fv(matrixLocation, false, new Float32Array(matrix));
// Draw the rectangle.
let primitiveType = gl.TRIANGLES;
offset = 0;
let count = 3;
gl.drawArrays(primitiveType, offset, count);
}
}
// Fill the buffer with the values that define a triangle.
function setGeometry(gl) {
gl.bufferData(
gl.ARRAY_BUFFER,
new Float32Array([
0, -100,
150, 125,
-175, 100]),
gl.STATIC_DRAW);
}
main();
}<|fim▁end|> | gl.vertexAttribPointer(
positionLocation, size, type, normalize, stride, offset)
// set the resolution |
<|file_name|>application.py<|end_file_name|><|fim▁begin|>__author__ = 'hujin'
import sys
from os import path
from twisted.internet import reactor
from twisted.web import server, resource<|fim▁hole|>from dockerman.api import Root
from dockerman.docker import Client
from dockerman.manager import Manager
from dockerman.event import Dispatcher
class Application(object):
def __init__(self, config):
self.config = config
log.startLogging(sys.stdout)
self._initialize()
def _initialize(self):
store_file = self.config['store_file']
if not path.exists(store_file):
open(store_file, 'w').close()
self.store = ServiceStore(store_file)
self.store.applicaion = self
host = self.config['docker_host']
port = self.config['docker_port']
self.client = Client(host, port)
self.dispatcher = Dispatcher(self)
self.manager = Manager(self.client, self.store, self.dispatcher)
def get_config(self, name, default=None):
try:
return self.config[name]
except KeyError:
return default
def _on_event(self, message):
self.manager.handle_event(message)
def start(self, port):
self.startHttpServer(port)
self.client.subscribe(self._on_event)
self.client.monitor()
reactor.run()
def startHttpServer(self, port):
site = server.Site(Root(self))
reactor.listenTCP(port, site)<|fim▁end|> | from twisted.python import log
from dockerman.storage import ServiceStore |
<|file_name|>for-loop-goofiness.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at<|fim▁hole|>// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum BogusOption<T> {
None,
Some(T),
}
type Iterator = int;
pub fn main() {
let x = [ 3i, 3, 3 ];
for i in x.iter() {
assert_eq!(*i, 3);
}
}<|fim▁end|> | // http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
<|file_name|>machine.rs<|end_file_name|><|fim▁begin|>use std::io::stdio::{stdin_raw, stdout_raw};
use storage::{Tape, VectorTape};
use operators::{Sub, Incr, Decr, Prev, Next, Put, Get};
use ast::Ast;
/**
A brainfuck interpreter machine.
Models the internal state of a Brainfuck machine. It is a simple
tape machine with a program counter representing the current
operator being executed in an AST.
*/
pub struct Machine {
/// A tape to be used as the main storage.
tape: VectorTape<u8>,
/// Program counter pointing at the current operator.
pc: uint,
}
impl Machine {
// Produce a new pristine machine.
pub fn new() -> Machine {
Machine {
tape: VectorTape::new(),
pc: 0,
}
}
/**
Run a program, given in the form of a parsed AST, on this
machine's tape. Will return the cycles that have been executed.
*/
pub fn run_program<'a>(&mut self, program: &Ast) -> Result<uint, ~str> {
self.pc = 0; // Begin interpreting at the start of the AST.
let mut cycles: uint = 0; // Keep track of the executed cycles.
let Ast(ref ops) = *program; // Extract the actual ops from the AST.
loop {
match ops.get(self.pc) {
// Operations on tape. Match tape methods perfectly.
Some(&Decr) => { self.tape.mutate( |v|{ *v -= 1; } ); }
Some(&Incr) => { self.tape.mutate( |v|{ *v += 1; } ); }
Some(&Prev) => { self.tape.wind(-1); }
Some(&Next) => { self.tape.wind( 1); }
// Reads a single char from `stdin` and replaces the
// current cell's contents with it.
Some(&Get) => {
let byte_in = stdin_raw().read_u8().ok()
.unwrap_or(0); // This machine respects EOF -> 0
self.tape.mutate( |v|{ *v = byte_in; } );
}
// Prints the cell's contents to `stdout` as char.
Some(&Put) => {
let byte_out = self.tape.cell().clone();
match stdout_raw().write_u8(byte_out) {
Ok(_) => { /* nop */ },
_ => return Err(~"Cannot not write to stdout."),
}<|fim▁hole|> }
// Executes a sub-AST. If the current cell's value
// is zero, the ops in the sub-AST will be executed,
// else skipping them entirely.
Some(&Sub(ref ast)) => {
let pc = self.pc; // Save PC and reset
while *self.tape.cell() != 0 {
match self.run_program(ast) {
Ok(cls) => cycles += cls,
Err(msg) => return Err(msg),
}
}
self.pc = pc; // Restore PC
}
// Unknown. Nop.
Some(_) => { /* nop */ },
// End of program. Stop execution.
_ => break
}
// Track this last cycle and advance to the next operator.
cycles += 1;
self.pc += 1;
}
// Everything went well. Just return the stats back.
Ok(cycles)
}
}<|fim▁end|> | |
<|file_name|>ConnectionComplexityDao.java<|end_file_name|><|fim▁begin|>/*
* Waltz - Enterprise Architecture
* Copyright (C) 2016, 2017, 2018, 2019 Waltz open source project
* See README.md for more information
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific
*
*/
package com.khartec.waltz.data.complexity;
import com.khartec.waltz.model.EntityKind;
import com.khartec.waltz.model.tally.ImmutableTally;
import com.khartec.waltz.model.tally.Tally;
import org.jooq.*;
import org.jooq.impl.DSL;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import java.math.BigDecimal;
import java.util.List;
import static com.khartec.waltz.common.Checks.checkNotNull;
import static com.khartec.waltz.data.logical_flow.LogicalFlowDao.LOGICAL_NOT_REMOVED;
import static com.khartec.waltz.schema.tables.LogicalFlow.LOGICAL_FLOW;
@Deprecated
@Repository
public class ConnectionComplexityDao {
private static final Field<Integer> CONNECTION_COUNT_ALIAS = DSL.field("connection_count", Integer.class);
private static final Field<Long> APP_ID_ALIAS = DSL.field("app_id", Long.class);
private static final Field<Integer> TOTAL_CONNECTIONS_FIELD = DSL.field("total_connections", Integer.class);
private static final Field<Long> SOURCE_APP_FIELD = LOGICAL_FLOW.SOURCE_ENTITY_ID.as(APP_ID_ALIAS);
private static final Field<Long> TARGET_APP_FIELD = LOGICAL_FLOW.TARGET_ENTITY_ID.as(APP_ID_ALIAS);
private static final Field<Integer> TARGET_COUNT_FIELD = DSL.countDistinct(LOGICAL_FLOW.TARGET_ENTITY_ID).as(CONNECTION_COUNT_ALIAS);
private static final Field<Integer> SOURCE_COUNT_FIELD = DSL.countDistinct(LOGICAL_FLOW.SOURCE_ENTITY_ID).as(CONNECTION_COUNT_ALIAS);
private static final String APPLICATION_KIND = EntityKind.APPLICATION.name();
private static final Condition BOTH_ARE_APPLICATIONS_AND_NOT_REMOVED =
LOGICAL_FLOW.SOURCE_ENTITY_KIND
.eq(APPLICATION_KIND)
.and(LOGICAL_FLOW.TARGET_ENTITY_KIND
.eq(APPLICATION_KIND))
.and(LOGICAL_NOT_REMOVED);
private static final SelectHavingStep<Record2<Long, Integer>> OUTBOUND_FLOWS =
DSL.select(SOURCE_APP_FIELD, TARGET_COUNT_FIELD)
.from(LOGICAL_FLOW)
.where(BOTH_ARE_APPLICATIONS_AND_NOT_REMOVED)
.groupBy(LOGICAL_FLOW.SOURCE_ENTITY_ID);
private static final SelectHavingStep<Record2<Long, Integer>> INBOUND_FLOWS =
DSL.select(TARGET_APP_FIELD, SOURCE_COUNT_FIELD)
.from(LOGICAL_FLOW)
.where(BOTH_ARE_APPLICATIONS_AND_NOT_REMOVED)
.groupBy(LOGICAL_FLOW.TARGET_ENTITY_ID);
private static final SelectHavingStep<Record2<Long, BigDecimal>> TOTAL_FLOW_COUNTS =
DSL.select(APP_ID_ALIAS, DSL.sum(CONNECTION_COUNT_ALIAS).as(TOTAL_CONNECTIONS_FIELD))
.from(OUTBOUND_FLOWS.unionAll(INBOUND_FLOWS))
.groupBy(APP_ID_ALIAS);
private DSLContext dsl;
@Autowired
public ConnectionComplexityDao(DSLContext dsl) {
this.dsl = dsl;
checkNotNull(dsl, "DSL cannot be null");
}
// ---- convenience functions
public int calculateBaseline() {
return calculateBaseline(DSL.trueCondition());
}
public int calculateBaseline(Select<Record1<Long>> appIdProvider) {
return calculateBaseline(APP_ID_ALIAS.in(appIdProvider));
}
public int calculateBaseline(Long appIds) {
return calculateBaseline(APP_ID_ALIAS.in(appIds));
}
public List<Tally<Long>> findCounts() {
return findCounts(DSL.trueCondition());
}
public List<Tally<Long>> findCounts(Select<Record1<Long>> appIdProvider) {
return findCounts(APP_ID_ALIAS.in(appIdProvider));
}
public List<Tally<Long>> findCounts(Long... appIds) {
return findCounts(APP_ID_ALIAS.in(appIds));
}
// ---- base queries
private int calculateBaseline(Condition condition) {
<|fim▁hole|> return dsl.select(DSL.max(TOTAL_CONNECTIONS_FIELD))
.from(TOTAL_FLOW_COUNTS)
.where(condition)
.fetchOptional(0, Integer.class)
.orElse(0);
}
private List<Tally<Long>> findCounts(Condition condition) {
return dsl.select(APP_ID_ALIAS, TOTAL_CONNECTIONS_FIELD)
.from(TOTAL_FLOW_COUNTS)
.where(condition)
.fetch(r -> ImmutableTally.<Long>builder()
.id(r.value1())
.count(r.value2())
.build());
}
}<|fim▁end|> | |
<|file_name|>cleanup_instruction_set_fix_operands.cc<|end_file_name|><|fim▁begin|>// Copyright 2016 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "exegesis/x86/cleanup_instruction_set_fix_operands.h"
#include <algorithm>
#include <iterator>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/node_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "exegesis/base/cleanup_instruction_set.h"
#include "exegesis/proto/instructions.pb.h"
#include "exegesis/util/instruction_syntax.h"
#include "exegesis/x86/cleanup_instruction_set_utils.h"
#include "glog/logging.h"
#include "src/google/protobuf/repeated_field.h"
#include "util/gtl/map_util.h"
namespace exegesis {
namespace x86 {
namespace {
using ::google::protobuf::RepeatedPtrField;
// Mapping from memory operands to their sizes as used in the Intel assembly
// syntax.
const std::pair<const char*, const char*> kOperandToPointerSize[] = {
{"m8", "BYTE"}, {"m16", "WORD"}, {"m32", "DWORD"}, {"m64", "QWORD"}};
// List of RSI-indexed source arrays.
const char* kRSIIndexes[] = {"BYTE PTR [RSI]", "WORD PTR [RSI]",
"DWORD PTR [RSI]", "QWORD PTR [RSI]"};
// List of RDI-indexed destination arrays.
const char* kRDIIndexes[] = {"BYTE PTR [RDI]", "WORD PTR [RDI]",
"DWORD PTR [RDI]", "QWORD PTR [RDI]"};
} // namespace
absl::Status FixOperandsOfCmpsAndMovs(InstructionSetProto* instruction_set) {
CHECK(instruction_set != nullptr);
const absl::flat_hash_set<std::string> kMnemonics = {"CMPS", "MOVS"};
const absl::flat_hash_set<std::string> kSourceOperands(
std::begin(kRSIIndexes), std::begin(kRSIIndexes));
const absl::flat_hash_set<std::string> kDestinationOperands(
std::begin(kRDIIndexes), std::begin(kRDIIndexes));
const absl::flat_hash_map<std::string, std::string> operand_to_pointer_size(
std::begin(kOperandToPointerSize), std::end(kOperandToPointerSize));
absl::Status status = absl::OkStatus();
for (InstructionProto& instruction :
*instruction_set->mutable_instructions()) {
InstructionFormat* const vendor_syntax =
GetOrAddUniqueVendorSyntaxOrDie(&instruction);
if (!kMnemonics.contains(vendor_syntax->mnemonic())) {
continue;
}
if (vendor_syntax->operands_size() != 2) {
status = absl::InvalidArgumentError(
"Unexpected number of operands of a CMPS/MOVS instruction.");
LOG(ERROR) << status;
continue;
}
std::string pointer_size;
if (!gtl::FindCopy(operand_to_pointer_size,
vendor_syntax->operands(0).name(), &pointer_size) &&
!kSourceOperands.contains(vendor_syntax->operands(0).name()) &&
!kDestinationOperands.contains(vendor_syntax->operands(0).name())) {
status = absl::InvalidArgumentError(
absl::StrCat("Unexpected operand of a CMPS/MOVS instruction: ",
vendor_syntax->operands(0).name()));
LOG(ERROR) << status;
continue;
}
CHECK_EQ(vendor_syntax->operands_size(), 2);
// The correct syntax for MOVS is MOVSB BYTE PTR [RDI],BYTE PTR [RSI]
// (destination is the right operand, as expected in the Intel syntax),
// while for CMPS LLVM only supports CMPSB BYTE PTR [RSI],BYTE PTR [RDI].
// The following handles this.
constexpr const char* const kIndexings[] = {"[RDI]", "[RSI]"};
const int dest = vendor_syntax->mnemonic() == "MOVS" ? 0 : 1;
const int src = 1 - dest;
vendor_syntax->mutable_operands(0)->set_name(
absl::StrCat(pointer_size, " PTR ", kIndexings[dest]));
vendor_syntax->mutable_operands(0)->set_usage(
dest == 0 ? InstructionOperand::USAGE_WRITE
: InstructionOperand::USAGE_READ);
vendor_syntax->mutable_operands(1)->set_name(
absl::StrCat(pointer_size, " PTR ", kIndexings[src]));
vendor_syntax->mutable_operands(1)->set_usage(
InstructionOperand::USAGE_READ);
}
return status;
}
REGISTER_INSTRUCTION_SET_TRANSFORM(FixOperandsOfCmpsAndMovs, 2000);
absl::Status FixOperandsOfInsAndOuts(InstructionSetProto* instruction_set) {
constexpr char kIns[] = "INS";
constexpr char kOuts[] = "OUTS";
const absl::flat_hash_map<std::string, std::string> operand_to_pointer_size(
std::begin(kOperandToPointerSize), std::end(kOperandToPointerSize));
absl::Status status = absl::OkStatus();
for (InstructionProto& instruction :
*instruction_set->mutable_instructions()) {
InstructionFormat* const vendor_syntax =
GetOrAddUniqueVendorSyntaxOrDie(&instruction);
const bool is_ins = vendor_syntax->mnemonic() == kIns;
const bool is_outs = vendor_syntax->mnemonic() == kOuts;
if (!is_ins && !is_outs) {
continue;
}
if (vendor_syntax->operands_size() != 2) {
status = absl::InvalidArgumentError(
"Unexpected number of operands of an INS/OUTS instruction.");
LOG(ERROR) << status;
continue;
}
std::string pointer_size;
if (!gtl::FindCopy(operand_to_pointer_size,
vendor_syntax->operands(0).name(), &pointer_size) &&
!gtl::FindCopy(operand_to_pointer_size,
vendor_syntax->operands(1).name(), &pointer_size)) {
status = absl::InvalidArgumentError(
absl::StrCat("Unexpected operands of an INS/OUTS instruction: ",
vendor_syntax->operands(0).name(), ", ",
vendor_syntax->operands(1).name()));
LOG(ERROR) << status;
continue;
}
CHECK_EQ(vendor_syntax->operands_size(), 2);
if (is_ins) {
vendor_syntax->mutable_operands(0)->set_name(
absl::StrCat(pointer_size, " PTR [RDI]"));
vendor_syntax->mutable_operands(0)->set_usage(
InstructionOperand::USAGE_WRITE);
vendor_syntax->mutable_operands(1)->set_name("DX");
vendor_syntax->mutable_operands(1)->set_usage(
InstructionOperand::USAGE_READ);
} else {
CHECK(is_outs);
vendor_syntax->mutable_operands(0)->set_name("DX");
vendor_syntax->mutable_operands(0)->set_usage(
InstructionOperand::USAGE_READ);
vendor_syntax->mutable_operands(1)->set_name(
absl::StrCat(pointer_size, " PTR [RSI]"));
vendor_syntax->mutable_operands(1)->set_usage(
InstructionOperand::USAGE_READ);
}
}
return status;
}
REGISTER_INSTRUCTION_SET_TRANSFORM(FixOperandsOfInsAndOuts, 2000);
absl::Status FixOperandsOfLddqu(InstructionSetProto* instruction_set) {
constexpr char kMemOperand[] = "mem";
constexpr char kM128Operand[] = "m128";
constexpr char kLddquEncoding[] = "F2 0F F0 /r";
for (InstructionProto& instruction :
*instruction_set->mutable_instructions()) {
if (instruction.raw_encoding_specification() != kLddquEncoding) continue;
InstructionFormat* const vendor_syntax =
GetOrAddUniqueVendorSyntaxOrDie(&instruction);
for (InstructionOperand& operand : *vendor_syntax->mutable_operands()) {
if (operand.name() == kMemOperand) {
operand.set_name(kM128Operand);
}
}
}
return absl::OkStatus();
}
REGISTER_INSTRUCTION_SET_TRANSFORM(FixOperandsOfLddqu, 2000);
absl::Status FixOperandsOfLodsScasAndStos(
InstructionSetProto* instruction_set) {
// Note that we're matching only the versions with operands. These versions
// use the mnemonics without the size suffix. By matching exactly these names,
// we can easily avoid the operand-less versions.
constexpr char kLods[] = "LODS";
constexpr char kScas[] = "SCAS";
constexpr char kStos[] = "STOS";
const absl::flat_hash_map<std::string, std::string> operand_to_pointer_size(
std::begin(kOperandToPointerSize), std::end(kOperandToPointerSize));
const absl::flat_hash_map<std::string, std::string> kOperandToRegister = {
{"m8", "AL"}, {"m16", "AX"}, {"m32", "EAX"}, {"m64", "RAX"}};
absl::Status status = absl::OkStatus();
for (InstructionProto& instruction :
*instruction_set->mutable_instructions()) {
InstructionFormat* const vendor_syntax =
GetOrAddUniqueVendorSyntaxOrDie(&instruction);
const bool is_lods = vendor_syntax->mnemonic() == kLods;
const bool is_stos = vendor_syntax->mnemonic() == kStos;
const bool is_scas = vendor_syntax->mnemonic() == kScas;
if (!is_lods && !is_stos && !is_scas) {
continue;
}
if (vendor_syntax->operands_size() != 1) {
status = absl::InvalidArgumentError(
"Unexpected number of operands of a LODS/STOS instruction.");
LOG(ERROR) << status;
continue;
}
std::string register_operand;
std::string pointer_size;
if (!gtl::FindCopy(kOperandToRegister, vendor_syntax->operands(0).name(),
®ister_operand) ||
!gtl::FindCopy(operand_to_pointer_size,
vendor_syntax->operands(0).name(), &pointer_size)) {
status = absl::InvalidArgumentError(
absl::StrCat("Unexpected operand of a LODS/STOS instruction: ",
vendor_syntax->operands(0).name()));
LOG(ERROR) << status;
continue;
}
vendor_syntax->clear_operands();
if (is_stos) {
auto* const operand = vendor_syntax->add_operands();
operand->set_name(absl::StrCat(pointer_size, " PTR [RDI]"));
operand->set_encoding(InstructionOperand::IMPLICIT_ENCODING);
operand->set_usage(InstructionOperand::USAGE_READ);
}
auto* const operand = vendor_syntax->add_operands();
operand->set_encoding(InstructionOperand::IMPLICIT_ENCODING);
operand->set_name(register_operand);
operand->set_usage(InstructionOperand::USAGE_READ);
if (is_lods) {
auto* const operand = vendor_syntax->add_operands();
operand->set_encoding(InstructionOperand::IMPLICIT_ENCODING);
operand->set_name(absl::StrCat(pointer_size, " PTR [RSI]"));
operand->set_usage(InstructionOperand::USAGE_READ);
}
if (is_scas) {
auto* const operand = vendor_syntax->add_operands();
operand->set_encoding(InstructionOperand::IMPLICIT_ENCODING);
operand->set_name(absl::StrCat(pointer_size, " PTR [RDI]"));
operand->set_usage(InstructionOperand::USAGE_READ);
}
}
return status;
}
REGISTER_INSTRUCTION_SET_TRANSFORM(FixOperandsOfLodsScasAndStos, 2000);
absl::Status FixOperandsOfSgdtAndSidt(InstructionSetProto* instruction_set) {
CHECK(instruction_set != nullptr);
const absl::flat_hash_set<std::string> kEncodings = {"0F 01 /0", "0F 01 /1"};
constexpr char kMemoryOperandName[] = "m";
constexpr char kUpdatedMemoryOperandName[] = "m16&64";
for (InstructionProto& instruction :
*instruction_set->mutable_instructions()) {
if (kEncodings.contains(instruction.raw_encoding_specification())) {
InstructionFormat* const vendor_syntax =
GetOrAddUniqueVendorSyntaxOrDie(&instruction);
for (InstructionOperand& operand : *vendor_syntax->mutable_operands()) {
if (operand.name() == kMemoryOperandName) {<|fim▁hole|> }
}
}
return absl::OkStatus();
}
REGISTER_INSTRUCTION_SET_TRANSFORM(FixOperandsOfSgdtAndSidt, 2000);
absl::Status FixOperandsOfVMovq(InstructionSetProto* instruction_set) {
CHECK(instruction_set != nullptr);
constexpr char kVMovQEncoding[] = "VEX.128.F3.0F.WIG 7E /r";
constexpr char kRegisterOrMemoryOperand[] = "xmm2/m64";
::google::protobuf::RepeatedPtrField<InstructionProto>* const instructions =
instruction_set->mutable_instructions();
for (InstructionProto& instruction : *instructions) {
if (instruction.raw_encoding_specification() != kVMovQEncoding) continue;
InstructionFormat* const vendor_syntax =
GetOrAddUniqueVendorSyntaxOrDie(&instruction);
if (vendor_syntax->operands_size() != 2) {
return absl::InvalidArgumentError(
absl::StrCat("Unexpected number of operands of a VMOVQ instruction: ",
instruction.DebugString()));
}
vendor_syntax->mutable_operands(1)->set_name(kRegisterOrMemoryOperand);
}
return absl::OkStatus();
}
REGISTER_INSTRUCTION_SET_TRANSFORM(FixOperandsOfVMovq, 2000);
absl::Status FixRegOperands(InstructionSetProto* instruction_set) {
CHECK(instruction_set != nullptr);
constexpr char kR8Operand[] = "r8";
constexpr char kR16Operand[] = "r16";
constexpr char kR32Operand[] = "r32";
constexpr char kR64Operand[] = "r64";
constexpr char kRegOperand[] = "reg";
// The mnemonics for which we add new entries.
const absl::flat_hash_set<std::string> kExpandToAllSizes = {"LAR"};
// The mnemonics for which we just replace reg with r8/r16/r32.
const absl::flat_hash_set<std::string> kRenameToReg8 = {"VPBROADCASTB"};
const absl::flat_hash_set<std::string> kRenameToReg16 = {"VPBROADCASTW"};
const absl::flat_hash_set<std::string> kRenameToReg32 = {
"EXTRACTPS", "MOVMSKPD", "MOVMSKPS", "PEXTRB", "PEXTRW", "PMOVMSKB",
"VMOVMSKPD", "VMOVMSKPS", "VPEXTRB", "VPEXTRW", "VPMOVMSKB"};
// We can't safely add new entries to 'instructions' while we iterate over it.
// Instead, we collect the instructions in a separate vector and add it to the
// proto at the end.
std::vector<InstructionProto> new_instruction_protos;
::google::protobuf::RepeatedPtrField<InstructionProto>* const instructions =
instruction_set->mutable_instructions();
absl::Status status = absl::OkStatus();
for (InstructionProto& instruction : *instructions) {
InstructionFormat* const vendor_syntax =
GetOrAddUniqueVendorSyntaxOrDie(&instruction);
const std::string& mnemonic = vendor_syntax->mnemonic();
for (auto& operand : *vendor_syntax->mutable_operands()) {
if (operand.name() == kRegOperand) {
if (kExpandToAllSizes.contains(mnemonic)) {
// This is a bit hacky. To avoid complicated matching of registers, we
// just override the existing entry in the instruction set proto, add
// the modified proto to new_instruction_protos except for the last
// modification which we keep in the instruction set proto.
//
// This is safe as long as there is only one reg operand per entry
// (which is true in the current version of the data).
operand.set_name(kR32Operand);
new_instruction_protos.push_back(instruction);
operand.set_name(kR64Operand);
instruction.set_raw_encoding_specification(
"REX.W + " + instruction.raw_encoding_specification());
} else if (kRenameToReg8.contains(mnemonic)) {
operand.set_name(kR8Operand);
} else if (kRenameToReg16.contains(mnemonic)) {
operand.set_name(kR16Operand);
} else if (kRenameToReg32.contains(mnemonic)) {
operand.set_name(kR32Operand);
} else {
status = absl::InvalidArgumentError(
absl::StrCat("Unexpected instruction mnemonic: ", mnemonic));
LOG(ERROR) << status;
continue;
}
}
}
}
std::copy(new_instruction_protos.begin(), new_instruction_protos.end(),
::google::protobuf::RepeatedPtrFieldBackInserter(instructions));
return status;
}
REGISTER_INSTRUCTION_SET_TRANSFORM(FixRegOperands, 2000);
absl::Status RenameOperands(InstructionSetProto* instruction_set) {
CHECK(instruction_set != nullptr);
const absl::flat_hash_map<std::string, std::string> kOperandRenaming = {
// Synonyms (different names used for the same type in different parts of
// the manual).
{"m80dec", "m80bcd"},
{"r8/m8", "r/m8"},
{"r16/m16", "r/m16"},
{"r32/m32", "r/m32"},
{"r64/m64", "r/m64"},
{"ST", "ST(0)"},
// Variants that depend on the mode of the CPU. The 32- and 64-bit modes
// always use the larger of the two values.
{"m14/28byte", "m28byte"},
{"m94/108byte", "m108byte"}};
for (InstructionProto& instruction :
*instruction_set->mutable_instructions()) {
InstructionFormat* const vendor_syntax =
GetOrAddUniqueVendorSyntaxOrDie(&instruction);
for (auto& operand : *vendor_syntax->mutable_operands()) {
const std::string* renaming =
gtl::FindOrNull(kOperandRenaming, operand.name());
if (renaming != nullptr) {
operand.set_name(*renaming);
}
}
}
return absl::OkStatus();
}
REGISTER_INSTRUCTION_SET_TRANSFORM(RenameOperands, 2000);
absl::Status RemoveImplicitST0Operand(InstructionSetProto* instruction_set) {
CHECK(instruction_set != nullptr);
static constexpr char kImplicitST0Operand[] = "ST(0)";
const absl::flat_hash_set<std::string> kUpdatedInstructionEncodings = {
"D8 C0+i", "D8 C8+i", "D8 E0+i", "D8 E8+i", "D8 F0+i", "D8 F8+i",
"DB E8+i", "DB F0+i", "DE C0+i", "DE C8+i", "DE E0+i", "DE E8+i",
"DE F0+i", "DE F8+i", "DF E8+i", "DF F0+i",
};
for (InstructionProto& instruction :
*instruction_set->mutable_instructions()) {
if (!kUpdatedInstructionEncodings.contains(
instruction.raw_encoding_specification())) {
continue;
}
RepeatedPtrField<InstructionOperand>* const operands =
GetOrAddUniqueVendorSyntaxOrDie(&instruction)->mutable_operands();
operands->erase(std::remove_if(operands->begin(), operands->end(),
[](const InstructionOperand& operand) {
return operand.name() ==
kImplicitST0Operand;
}),
operands->end());
}
return absl::OkStatus();
}
REGISTER_INSTRUCTION_SET_TRANSFORM(RemoveImplicitST0Operand, 2000);
absl::Status RemoveImplicitOperands(InstructionSetProto* instruction_set) {
CHECK(instruction_set != nullptr);
const absl::flat_hash_set<absl::string_view> kImplicitXmmOperands = {
"<EAX>", "<XMM0>", "<XMM0-2>", "<XMM0-6>", "<XMM0-7>", "<XMM4-6>"};
for (InstructionProto& instruction :
*instruction_set->mutable_instructions()) {
RepeatedPtrField<InstructionOperand>* const operands =
GetOrAddUniqueVendorSyntaxOrDie(&instruction)->mutable_operands();
operands->erase(
std::remove_if(
operands->begin(), operands->end(),
[&kImplicitXmmOperands](const InstructionOperand& operand) {
return kImplicitXmmOperands.contains(operand.name());
}),
operands->end());
}
return absl::OkStatus();
}
REGISTER_INSTRUCTION_SET_TRANSFORM(RemoveImplicitOperands, 2000);
} // namespace x86
} // namespace exegesis<|fim▁end|> | operand.set_name(kUpdatedMemoryOperandName);
} |
<|file_name|>htmllabelelement.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::activation::{Activatable, ActivationSource, synthetic_click_activation};
use dom::attr::Attr;
use dom::bindings::codegen::Bindings::HTMLLabelElementBinding;
use dom::bindings::codegen::Bindings::HTMLLabelElementBinding::HTMLLabelElementMethods;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::Root;
use dom::bindings::str::DOMString;
use dom::document::Document;<|fim▁hole|>use dom::event::Event;
use dom::eventtarget::EventTarget;
use dom::htmlelement::HTMLElement;
use dom::htmlformelement::{FormControl, FormControlElementHelpers, HTMLFormElement};
use dom::node::{document_from_node, Node};
use dom::virtualmethods::VirtualMethods;
use dom_struct::dom_struct;
use html5ever_atoms::LocalName;
use style::attr::AttrValue;
#[dom_struct]
pub struct HTMLLabelElement {
htmlelement: HTMLElement
}
impl HTMLLabelElement {
fn new_inherited(local_name: LocalName,
prefix: Option<DOMString>,
document: &Document) -> HTMLLabelElement {
HTMLLabelElement {
htmlelement:
HTMLElement::new_inherited(local_name, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(local_name: LocalName,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLLabelElement> {
Node::reflect_node(box HTMLLabelElement::new_inherited(local_name, prefix, document),
document,
HTMLLabelElementBinding::Wrap)
}
}
impl Activatable for HTMLLabelElement {
fn as_element(&self) -> &Element {
self.upcast::<Element>()
}
fn is_instance_activatable(&self) -> bool {
true
}
// https://html.spec.whatwg.org/multipage/#run-pre-click-activation-steps
// https://html.spec.whatwg.org/multipage/#the-button-element:activation-behavior
fn pre_click_activation(&self) {
}
// https://html.spec.whatwg.org/multipage/#run-canceled-activation-steps
fn canceled_activation(&self) {
}
// https://html.spec.whatwg.org/multipage/#run-post-click-activation-steps
fn activation_behavior(&self, _event: &Event, _target: &EventTarget) {
if let Some(e) = self.GetControl() {
let elem = e.upcast::<Element>();
synthetic_click_activation(elem,
false,
false,
false,
false,
ActivationSource::NotFromClick);
}
}
// https://html.spec.whatwg.org/multipage/#implicit-submission
fn implicit_submission(&self, _ctrl_key: bool, _shift_key: bool, _alt_key: bool, _meta_key: bool) {
//FIXME: Investigate and implement implicit submission for label elements
// Issue filed at https://github.com/servo/servo/issues/8263
}
}
impl HTMLLabelElementMethods for HTMLLabelElement {
// https://html.spec.whatwg.org/multipage/#dom-fae-form
fn GetForm(&self) -> Option<Root<HTMLFormElement>> {
self.form_owner()
}
// https://html.spec.whatwg.org/multipage/#dom-label-htmlfor
make_getter!(HtmlFor, "for");
// https://html.spec.whatwg.org/multipage/#dom-label-htmlfor
make_atomic_setter!(SetHtmlFor, "for");
// https://html.spec.whatwg.org/multipage/#dom-label-control
fn GetControl(&self) -> Option<Root<HTMLElement>> {
if !self.upcast::<Node>().is_in_doc() {
return None;
}
let for_attr = match self.upcast::<Element>().get_attribute(&ns!(), &local_name!("for")) {
Some(for_attr) => for_attr,
None => return self.first_labelable_descendant(),
};
let for_value = for_attr.value();
document_from_node(self).get_element_by_id(for_value.as_atom())
.and_then(Root::downcast::<HTMLElement>)
.into_iter()
.filter(|e| e.is_labelable_element())
.next()
}
}
impl VirtualMethods for HTMLLabelElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn parse_plain_attribute(&self, name: &LocalName, value: DOMString) -> AttrValue {
match name {
&local_name!("for") => AttrValue::from_atomic(value.into()),
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
fn attribute_mutated(&self, attr: &Attr, mutation: AttributeMutation) {
self.super_type().unwrap().attribute_mutated(attr, mutation);
match attr.local_name() {
&local_name!("form") => {
self.form_attribute_mutated(mutation);
},
_ => {},
}
}
}
impl HTMLLabelElement {
pub fn first_labelable_descendant(&self) -> Option<Root<HTMLElement>> {
self.upcast::<Node>()
.traverse_preorder()
.filter_map(Root::downcast::<HTMLElement>)
.filter(|elem| elem.is_labelable_element())
.next()
}
}
impl FormControl for HTMLLabelElement {
fn form_owner(&self) -> Option<Root<HTMLFormElement>> {
self.GetControl().map(Root::upcast::<Element>).and_then(|elem| {
elem.as_maybe_form_control().and_then(|control| control.form_owner())
})
}
fn set_form_owner(&self, _: Option<&HTMLFormElement>) {
// Label is a special case for form owner, it reflects its control's
// form owner. Therefore it doesn't hold form owner itself.
}
fn to_element<'a>(&'a self) -> &'a Element {
self.upcast::<Element>()
}
}<|fim▁end|> | use dom::element::{AttributeMutation, Element}; |
<|file_name|>WordnetManager.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import itertools
"""
Languages | ShortCode | Wordnet
Albanian | sq | als
Arabic | ar | arb
Bulgarian | bg | bul
Catalan | ca | cat
Chinese | zh | cmn
Chinese (Taiwan) | qn | qcn
Greek | el | ell
Basque | eu | eus
Persian | fa | fas
Finish | fi | fin
French | fr | fra
Galician | gl | glg
Hebrew | he | heb
Croatian | hr | hrv
Indonesian | id | ind
Italian | it | ita
Japanese | ja | jpn
Norwegian NyNorsk | nn | nno
Norwegian Bokmål | nb/no | nob
Polish | pl | pol
Portuguese | pt | por<|fim▁hole|>Thai | tt | tha
Malay | ms | zsm
"""
"""
Language short codes => Wordnet Code
"""
AVAILABLE_LANGUAGES = dict([('sq','als'), ('ar', 'arb'), ('bg', 'bul'), ('ca', 'cat'), ('da', 'dan'), ('zh', 'cmn'),
('el','ell'), ('eu', 'eus'), ('fa', 'fas'), ('fi', 'fin'), ('fr', 'fra'),
('gl','glg'), ('he', 'heb'), ('hr', 'hrv'), ('id', 'ind'), ('it', 'ita'),
('ja','jpn'),
('nn', 'nno'), ('nb', 'nob'),
('no', 'nob'), ('pl', 'pol'),
('pt', 'por'),
('qn','qcn'), ('sl', 'slv'), ('es', 'spa'), ('sv', 'swe'), ('tt', 'tha'),
('ms', 'zsm'),
('en', 'eng')])
"""
Language names => Short Code
"""
AVAILABLE_LANGUAGES_NAMES = dict([
('albanian', 'sq'), ('arabic', 'ar'),('bulgarian', 'bg'), ('catalan', 'cat'), ('danish', 'da'),
('chinese', 'zh'), ('basque', 'eu'), ('persian', 'fa'), ('finnish', 'fi'), ('france', 'fr'),
('galician', 'gl'), ('hebrew', 'he'), ('croatian', 'hr'), ('indonesian', 'id'), ('italian', 'it'),
('japanese', 'ja'), ('norwegian_nynorsk', 'nn'), ('norwegian', 'no'), ('norwegian_bokmal', 'nb'),
('polish', 'pl'), ('portuguese', 'pt'), ('slovenian', 'sl'), ('spanish', 'es'),
('swedish', 'sv'), ('thai', 'sv'), ('malay', 'ms'), ('english', 'en')
])
class WordnetManager(object):
def __init__(self, language="en"):
"""
Constructor for the wordnet manager.
It takes a main language.
"""
self.__language = language
def __isLanguageAvailable(self, code=None, language_name=None):
"""
Check if a language is available
"""
if code is None and language_name is None:
raise Exception("Error evaluating the correct language")
if code is not None and code.lower() in AVAILABLE_LANGUAGES:
return True
if language_name is not None and language_name.lower() in AVAILABLE_LANGUAGES_NAMES:
return True
return False
def __nameToWordnetCode(self, name):
"""
It returns the wordnet code for a given language name
"""
if not self.__isLanguageAvailable(language_name=name):
raise Exception("Wordnet code not found for the language name %s " % name)
name = name.lower()
languageShortCode = AVAILABLE_LANGUAGES_NAMES[name]
wordnetCode = self.__shortCodeToWordnetCode(code=languageShortCode)
return wordnetCode
def __shortCodeToWordnetCode(self, shortCode):
"""
It returns the wordnet code from a given language short code
"""
if not self.__isLanguageAvailable(code=shortCode):
raise Exception("Wordnet code not found for the language short code %s " % shortCode)
code = shortCode.lower()
wordnetCode = AVAILABLE_LANGUAGES[code]
return wordnetCode
def __getSynsets(self, word, wordNetCode):
"""
It returns the synsets given both word and language code
"""
from nltk.corpus import wordnet as wn
synsets = wn.synsets(word, lang=wordNetCode)
return synsets
def getLemmas(self, word, languageCode="en"):
"""
Get the lemmas for a given word
:word: The word
:languageCode: The language for a given lemma
"""
wnCode = self.__shortCodeToWordnetCode(shortCode=languageCode)
synsets = self.__getSynsets(word, wnCode) #wn.synsets(word, lang=wnCode)
lemmas = dict([('en', [])])
for synset in synsets:
enLemmas = synset.lemma_names()
lemmas['en'].extend(enLemmas)
if languageCode != "en" and self.__isLanguageAvailable(code=languageCode):
langLemmas = list(sorted(set(synset.lemma_names(lang=wnCode))))
lemmas[languageCode] = langLemmas
lemmas['en'] = list(sorted(set(lemmas.get('en', []))))
return lemmas
def getSynonyms(self, words=[], language_code="en"):
"""
Get the synonyms from a list of words.
:words: A list of words
:language_code: the language for the synonyms.
"""
if words is None or not isinstance(words, list) or list(words) <= 0:
return []
if not self.__isLanguageAvailable(code=language_code):
return []
wnCode = self.__shortCodeToWordnetCode(language_code)
result = {}
for word in words:
result[word] = dict([('lemmas', self.getLemmas(word,languageCode=language_code))])
return result
def getHyponyms(self, words, language_code="en"):
"""
Get specific synsets from a given synset
"""
wnCode = self.__shortCodeToWordnetCode(language_code)
result = {}
for word in words:
synonyms = self.__getSynsets(word, wnCode)
hyponyms = [hyp for synset in synonyms for hyp in synset.hyponyms()]
engLemmas = [hyp.lemma_names() for hyp in hyponyms]
lemmas = dict([('en', list(sorted(set(itertools.chain.from_iterable(engLemmas)), key=lambda s: s.lower())))])
if language_code != "en":
languageLemmas = [hyp.lemma_names(lang=wnCode) for hyp in hyponyms]
languageLemmas = list(sorted(set(itertools.chain.from_iterable(languageLemmas)), key=lambda s: s.lower()))
lemmas[language_code] = languageLemmas
result[word] = dict([ ('lemmas', lemmas), ('language', language_code)])
return result
def getHypernyms(self, words, language_code="en"):
"""
Get general synsets from a given synset
"""
wnCode = self.__shortCodeToWordnetCode(language_code)
result = {}
for word in words:
synonyms = self.__getSynsets(word, wnCode)
hypernyms = [hyp for synset in synonyms for hyp in synset.hypernyms()]
engLemmas = [hyp.lemma_names() for hyp in hypernyms]
lemmas = dict([('en', list(sorted(set(itertools.chain.from_iterable(engLemmas)), key=lambda s: s.lower())))])
if language_code != "en":
languageLemmas = [hyp.lemma_names(lang=wnCode) for hyp in hypernyms]
languageLemmas = list(sorted(set(itertools.chain.from_iterable(languageLemmas)), key=lambda s: s.lower()))
lemmas[language_code] = languageLemmas
result[word] = dict([ ('lemmas', lemmas), ('language', language_code)])
return result<|fim▁end|> | Slovenian | sl | slv
Spanish | es | spa
Swedish | sv | swe |
<|file_name|>open.js<|end_file_name|><|fim▁begin|>var opn = require('opn');
console.log('打开二维码...')
// Opens the image in the default image viewer
opn('static/img/qr.jpg').then(() => {<|fim▁hole|> console.log('关闭二维码!')
});<|fim▁end|> | |
<|file_name|>test_sources.py<|end_file_name|><|fim▁begin|>#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytz
import numpy as np
from six import integer_types
from unittest import TestCase
import zipline.utils.factory as factory
from zipline.sources import (DataFrameSource,
DataPanelSource,
RandomWalkSource)
from zipline.utils import tradingcalendar as calendar_nyse
from zipline.assets import AssetFinder
class TestDataFrameSource(TestCase):
def test_df_source(self):
source, df = factory.create_test_df_source()
assert isinstance(source.start, pd.lib.Timestamp)
assert isinstance(source.end, pd.lib.Timestamp)
for expected_dt, expected_price in df.iterrows():
sid0 = next(source)
assert expected_dt == sid0.dt
assert expected_price[0] == sid0.price
def test_df_sid_filtering(self):
_, df = factory.create_test_df_source()
source = DataFrameSource(df)
assert 1 not in [event.sid for event in source], \
"DataFrameSource should only stream selected sid 0, not sid 1."
def test_panel_source(self):
source, panel = factory.create_test_panel_source(source_type=5)
assert isinstance(source.start, pd.lib.Timestamp)
assert isinstance(source.end, pd.lib.Timestamp)
for event in source:
self.assertTrue('sid' in event)
self.assertTrue('arbitrary' in event)
self.assertTrue('type' in event)
self.assertTrue(hasattr(event, 'volume'))
self.assertTrue(hasattr(event, 'price'))
self.assertEquals(event['type'], 5)
self.assertEquals(event['arbitrary'], 1.)
self.assertEquals(event['sid'], 0)
self.assertTrue(isinstance(event['volume'], int))
self.assertTrue(isinstance(event['arbitrary'], float))
def test_yahoo_bars_to_panel_source(self):
finder = AssetFinder()
stocks = ['AAPL', 'GE']
start = pd.datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)
data = factory.load_bars_from_yahoo(stocks=stocks,
indexes={},
start=start,
end=end)
check_fields = ['sid', 'open', 'high', 'low', 'close',
'volume', 'price']
copy_panel = data.copy()
sids = finder.map_identifier_index_to_sids(
data.items, data.major_axis[0]
)
copy_panel.items = sids
source = DataPanelSource(copy_panel)
for event in source:
for check_field in check_fields:
self.assertIn(check_field, event)
self.assertTrue(isinstance(event['volume'], (integer_types)))
self.assertTrue(event['sid'] in sids)
def test_nan_filter_dataframe(self):
dates = pd.date_range('1/1/2000', periods=2, freq='B', tz='UTC')
df = pd.DataFrame(np.random.randn(2, 2),
index=dates,
columns=[4, 5])
# should be filtered
df.loc[dates[0], 4] = np.nan
# should not be filtered, should have been ffilled
df.loc[dates[1], 5] = np.nan
source = DataFrameSource(df)
event = next(source)
self.assertEqual(5, event.sid)
event = next(source)
self.assertEqual(4, event.sid)
event = next(source)
self.assertEqual(5, event.sid)
self.assertFalse(np.isnan(event.price))
def test_nan_filter_panel(self):
dates = pd.date_range('1/1/2000', periods=2, freq='B', tz='UTC')
df = pd.Panel(np.random.randn(2, 2, 2),
major_axis=dates,
items=[4, 5],
minor_axis=['price', 'volume'])
# should be filtered
df.loc[4, dates[0], 'price'] = np.nan
# should not be filtered, should have been ffilled
df.loc[5, dates[1], 'price'] = np.nan
source = DataPanelSource(df)
event = next(source)
self.assertEqual(5, event.sid)
event = next(source)
self.assertEqual(4, event.sid)
event = next(source)
self.assertEqual(5, event.sid)
self.assertFalse(np.isnan(event.price))<|fim▁hole|>class TestRandomWalkSource(TestCase):
def test_minute(self):
np.random.seed(123)
start_prices = {0: 100,
1: 500}
start = pd.Timestamp('1990-01-01', tz='UTC')
end = pd.Timestamp('1991-01-01', tz='UTC')
source = RandomWalkSource(start_prices=start_prices,
calendar=calendar_nyse, start=start,
end=end)
self.assertIsInstance(source.start, pd.lib.Timestamp)
self.assertIsInstance(source.end, pd.lib.Timestamp)
for event in source:
self.assertIn(event.sid, start_prices.keys())
self.assertIn(event.dt.replace(minute=0, hour=0),
calendar_nyse.trading_days)
self.assertGreater(event.dt, start)
self.assertLess(event.dt, end)
self.assertGreater(event.price, 0,
"price should never go negative.")
self.assertTrue(13 <= event.dt.hour <= 21,
"event.dt.hour == %i, not during market \
hours." % event.dt.hour)
def test_day(self):
np.random.seed(123)
start_prices = {0: 100,
1: 500}
start = pd.Timestamp('1990-01-01', tz='UTC')
end = pd.Timestamp('1992-01-01', tz='UTC')
source = RandomWalkSource(start_prices=start_prices,
calendar=calendar_nyse, start=start,
end=end, freq='daily')
self.assertIsInstance(source.start, pd.lib.Timestamp)
self.assertIsInstance(source.end, pd.lib.Timestamp)
for event in source:
self.assertIn(event.sid, start_prices.keys())
self.assertIn(event.dt.replace(minute=0, hour=0),
calendar_nyse.trading_days)
self.assertGreater(event.dt, start)
self.assertLess(event.dt, end)
self.assertGreater(event.price, 0,
"price should never go negative.")
self.assertEqual(event.dt.hour, 0)<|fim▁end|> | |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>'use strict';
var intlNameInitials = function () {
};
var pattern = '{0}{1}';
function _firstLetter(text) {
return text.charAt(0);
}
function _upperCase(letter) {
if (letter === 'ı'){
return 'I';
}
return letter.toUpperCase();
}
function _isHangul(l){
if ((l > 44032) && (l < 55203)) {
return true;
}
return false;
}
function _initials(letter) {
var l = letter.charCodeAt(0);
// Generated by regenerate and unicode-8.0.0
// Greek 117
// Latin 991
// Cyrillic 302
var alphaRegex = '[A-Za-z\xAA\xBA\xC0-\xD6\xD8-\xF6\xF8-\u02B8\u02E0-\u02E4\u0370-\u0373\u0375-\u0377\u037A-\u037D\u037F\u0384\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03E1\u03F0-\u0484\u0487-\u052F\u1D00-\u1D25\u1D2C-\u1D5C\u1D62-\u1D65\u1D6B-\u1D77\u1D79-\u1DBE\u1E00-\u1EFE]';
var re = new RegExp(alphaRegex,'i');
if (re.test(letter)){
return letter;
}
return '';
}
function _isSupportedInitials(letter) {
var alphaRegex = '[A-Za-z\xAA\xBA\xC0-\xD6\xD8-\xF6\xF8-\u02B8\u02E0-\u02E4\u0370-\u0373\u0375-\u0377\u037A-\u037D\u037F\u0384\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03E1\u03F0-\u0484\u0487-\u052F\u1D00-\u1D25\u1D2C-\u1D5C\u1D62-\u1D65\u1D6B-\u1D77\u1D79-\u1DBE\u1E00-\u1EFE]';
var re = new RegExp(alphaRegex,'i');
if (re.test(letter)){
return true;
}
return false;
}
function isThai(a){
var thaiRegex = '[\u0E01-\u0E3A\u0E40-\u0E5B]';
var re = new RegExp(thaiRegex,'i');
if (a.length === 1){
return true;
} else {
var letter = _firstLetter(a);
if (re.test(a)) {
return true;
}
}
return false;
}
function isCJK(a){
// HANGUL SYLLABLES
// We want to be sure the full name is Hangul
if (a.length < 3){
var i = 0;
for(var c=0;c< a.length;c++){
if (_isHangul(a.charCodeAt(c)) )
{
i++;
}
}
if (i === a.length){
return true;
}
}
return false;
}
intlNameInitials.prototype.format = function (name, options) {
var initials = '',
a = '',
b = '';
var fields = ['firstName', 'lastName'],
initialName = { firstName : '', lastName: '' };
if (name === null || typeof name !== 'object' ) {
return undefined;
}
fields.forEach(function(field){
if (name.hasOwnProperty(field)) {
if (name[field] === null || name[field].length === 0){
// Nothing to do. but keeping it as placeholder
} else {
if (_isSupportedInitials(_firstLetter(name[field]))) {
initialName[field] = _firstLetter(name[field]);
initials = initials + _upperCase(_initials(initialName[field]));
}
}
}
});
// for CJK
if (name.hasOwnProperty("lastName")){<|fim▁hole|> } else {
if (isCJK(name.lastName)) {
initials = name.lastName;
}
}
}
if (initials.length === 0){
return undefined;
}
return initials;
};
module.exports = intlNameInitials;<|fim▁end|> |
if (name.lastName === null || name.lastName.length === 0){
|
<|file_name|>spawn.rs<|end_file_name|><|fim▁begin|>use std::io::ErrorKind;
use std::process::Command;
fn main() {
// Make sure it doesn't try to run "hopefullydoesntexist bar.exe".
assert_eq!(Command::new("hopefullydoesntexist")
.arg("bar")
.spawn()
.unwrap_err()
.kind(),<|fim▁hole|> ErrorKind::NotFound);
}<|fim▁end|> | |
<|file_name|>DispatcherConfig.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2016 Code Above Lab LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.codeabovelab.dm.cluman.cluster.docker.model.swarm;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Data;
@Data
public class DispatcherConfig {
<|fim▁hole|> @JsonProperty("HeartbeatPeriod")
private Long heartbeatPeriod;
}<|fim▁end|> | |
<|file_name|>_fallback.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
logbook._fallback
~~~~~~~~~~~~~~~~~
Fallback implementations in case speedups is not around.
:copyright: (c) 2010 by Armin Ronacher, Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
from itertools import count
from logbook.helpers import get_iterator_next_method
from logbook.concurrency import (thread_get_ident, greenlet_get_ident,
thread_local, greenlet_local,
ThreadLock, GreenletRLock, is_gevent_enabled)
_missing = object()
_MAX_CONTEXT_OBJECT_CACHE = 256
def group_reflected_property(name, default, fallback=_missing):
"""Returns a property for a given name that falls back to the
value of the group if set. If there is no such group, the
provided default is used.
"""<|fim▁hole|> rv = getattr(self, '_' + name, _missing)
if rv is not _missing and rv != fallback:
return rv
if self.group is None:
return default
return getattr(self.group, name)
def _set(self, value):
setattr(self, '_' + name, value)
def _del(self):
delattr(self, '_' + name)
return property(_get, _set, _del)
class _StackBound(object):
def __init__(self, obj, push, pop):
self.__obj = obj
self.__push = push
self.__pop = pop
def __enter__(self):
self.__push()
return self.__obj
def __exit__(self, exc_type, exc_value, tb):
self.__pop()
class StackedObject(object):
"""Baseclass for all objects that provide stack manipulation
operations.
"""
def push_greenlet(self):
"""Pushes the stacked object to the greenlet stack."""
raise NotImplementedError()
def pop_greenlet(self):
"""Pops the stacked object from the greenlet stack."""
raise NotImplementedError()
def push_thread(self):
"""Pushes the stacked object to the thread stack."""
raise NotImplementedError()
def pop_thread(self):
"""Pops the stacked object from the thread stack."""
raise NotImplementedError()
def push_application(self):
"""Pushes the stacked object to the application stack."""
raise NotImplementedError()
def pop_application(self):
"""Pops the stacked object from the application stack."""
raise NotImplementedError()
def __enter__(self):
if is_gevent_enabled():
self.push_greenlet()
else:
self.push_thread()
return self
def __exit__(self, exc_type, exc_value, tb):
if is_gevent_enabled():
self.pop_greenlet()
else:
self.pop_thread()
def greenletbound(self, _cls=_StackBound):
"""Can be used in combination with the `with` statement to
execute code while the object is bound to the greenlet.
"""
return _cls(self, self.push_greenlet, self.pop_greenlet)
def threadbound(self, _cls=_StackBound):
"""Can be used in combination with the `with` statement to
execute code while the object is bound to the thread.
"""
return _cls(self, self.push_thread, self.pop_thread)
def applicationbound(self, _cls=_StackBound):
"""Can be used in combination with the `with` statement to
execute code while the object is bound to the application.
"""
return _cls(self, self.push_application, self.pop_application)
class ContextStackManager(object):
"""Helper class for context objects that manages a stack of
objects.
"""
def __init__(self):
self._global = []
self._thread_context_lock = ThreadLock()
self._thread_context = thread_local()
self._greenlet_context_lock = GreenletRLock()
self._greenlet_context = greenlet_local()
self._cache = {}
self._stackop = get_iterator_next_method(count())
def iter_context_objects(self):
"""Returns an iterator over all objects for the combined
application and context cache.
"""
use_gevent = is_gevent_enabled()
tid = greenlet_get_ident() if use_gevent else thread_get_ident()
objects = self._cache.get(tid)
if objects is None:
if len(self._cache) > _MAX_CONTEXT_OBJECT_CACHE:
self._cache.clear()
objects = self._global[:]
objects.extend(getattr(self._thread_context, 'stack', ()))
if use_gevent:
objects.extend(getattr(self._greenlet_context, 'stack', ()))
objects.sort(reverse=True)
objects = [x[1] for x in objects]
self._cache[tid] = objects
return iter(objects)
def push_greenlet(self, obj):
self._greenlet_context_lock.acquire()
try:
self._cache.pop(greenlet_get_ident(), None) # remote chance to conflict with thread ids
item = (self._stackop(), obj)
stack = getattr(self._greenlet_context, 'stack', None)
if stack is None:
self._greenlet_context.stack = [item]
else:
stack.append(item)
finally:
self._greenlet_context_lock.release()
def pop_greenlet(self):
self._greenlet_context_lock.acquire()
try:
self._cache.pop(greenlet_get_ident(), None) # remote chance to conflict with thread ids
stack = getattr(self._greenlet_context, 'stack', None)
assert stack, 'no objects on stack'
return stack.pop()[1]
finally:
self._greenlet_context_lock.release()
def push_thread(self, obj):
self._thread_context_lock.acquire()
try:
self._cache.pop(thread_get_ident(), None)
item = (self._stackop(), obj)
stack = getattr(self._thread_context, 'stack', None)
if stack is None:
self._thread_context.stack = [item]
else:
stack.append(item)
finally:
self._thread_context_lock.release()
def pop_thread(self):
self._thread_context_lock.acquire()
try:
self._cache.pop(thread_get_ident(), None)
stack = getattr(self._thread_context, 'stack', None)
assert stack, 'no objects on stack'
return stack.pop()[1]
finally:
self._thread_context_lock.release()
def push_application(self, obj):
self._global.append((self._stackop(), obj))
self._cache.clear()
def pop_application(self):
assert self._global, 'no objects on application stack'
popped = self._global.pop()[1]
self._cache.clear()
return popped<|fim▁end|> | def _get(self): |
<|file_name|>global.d.ts<|end_file_name|><|fim▁begin|>declare module "*.png";
declare module "*.gif";
declare module "*.jpg";
declare module "*.jpeg";
declare module "*.svg";
declare module "*.css";
declare module "*.less";
declare module "*.scss";
declare module "*.sass";<|fim▁hole|><|fim▁end|> | declare module "*.styl"; |
<|file_name|>app.js<|end_file_name|><|fim▁begin|>var Alloy = require("alloy"), _ = Alloy._, Backbone = Alloy.Backbone;
Alloy.Globals.steps = 0;
<|fim▁hole|>
Alloy.Globals.fruitCount = 0;
Alloy.createController("index");<|fim▁end|> | Alloy.Globals.capacity = 0;
Alloy.Globals.basketImage = ""; |
<|file_name|>bigtable_input.py<|end_file_name|><|fim▁begin|># Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Read Minigo game examples from a Bigtable.
"""
import bisect
import collections
import datetime<|fim▁hole|>import re
import struct
import time
import numpy as np
from tqdm import tqdm
from absl import flags
from google.cloud import bigtable
from google.cloud.bigtable import row_filters as bigtable_row_filters
from google.cloud.bigtable import column_family as bigtable_column_family
import tensorflow as tf
from tensorflow.contrib import cloud as contrib_cloud
import utils
flags.DEFINE_string('cbt_project', None,
'The project used to connect to the cloud bigtable ')
# cbt_instance: identifier of Cloud Bigtable instance in cbt_project.
flags.DEFINE_string('cbt_instance', None,
'The identifier of the cloud bigtable instance in cbt_project')
# cbt_table: identifier of Cloud Bigtable table in cbt_instance.
# The cbt_table is expected to be accompanied by one with an "-nr"
# suffix, for "no-resign".
flags.DEFINE_string('cbt_table', None,
'The table within the cloud bigtable instance to use')
FLAGS = flags.FLAGS
# Constants
ROW_PREFIX = 'g_{:0>10}_'
ROWCOUNT_PREFIX = 'ct_{:0>10}_'
# Model tabels (models, models_for_eval) row key
MODEL_PREFIX = "m_{run}_{num:0>10}"
# Name of model
MODEL_NAME = b'model'
# Maximum number of concurrent processes to use when issuing requests against
# Bigtable. Value taken from default in the load-testing tool described here:
#
# https://github.com/googleapis/google-cloud-go/blob/master/bigtable/cmd/loadtest/loadtest.go
MAX_BT_CONCURRENCY = 100
# Column family and qualifier constants.
# Column Families
METADATA = 'metadata'
TFEXAMPLE = 'tfexample'
# Column Qualifiers
# Note that in CBT, families are strings and qualifiers are bytes.
TABLE_STATE = b'table_state'
WAIT_CELL = b'wait_for_game_number'
GAME_COUNTER = b'game_counter'
MOVE_COUNT = b'move_count'
# Patterns
_game_row_key = re.compile(r'g_(\d+)_m_(\d+)')
_game_from_counter = re.compile(r'ct_(\d+)_')
# The string information needed to construct a client of a Bigtable table.
BigtableSpec = collections.namedtuple(
'BigtableSpec',
['project', 'instance', 'table'])
# Information needed to create a mix of two Game queues.
# r = resign/regular; c = calibration (no-resign)
GameMix = collections.namedtuple(
'GameMix',
['games_r', 'moves_r',
'games_c', 'moves_c',
'selection'])
def cbt_intvalue(value):
"""Decode a big-endian uint64.
Cloud Bigtable stores integers as big-endian uint64,
and performs this translation when integers are being
set. But when being read, the values need to be
decoded.
"""
return int(struct.unpack('>q', value)[0])
def make_single_array(ds, batch_size=8*1024):
"""Create a single numpy array from a dataset.
The dataset must have only one dimension, that is,
the length of its `output_shapes` and `output_types`
is 1, and its output shape must be `[]`, that is,
every tensor in the dataset must be a scalar.
Args:
ds: a TF Dataset.
batch_size: how many elements to read per pass
Returns:
a single numpy array.
"""
if isinstance(ds.output_types, tuple) or isinstance(ds.output_shapes, tuple):
raise ValueError('Dataset must have a single type and shape')
nshapes = len(ds.output_shapes)
if nshapes > 0:
raise ValueError('Dataset must be comprised of scalars (TensorShape=[])')
batches = []
with tf.Session() as sess:
ds = ds.batch(batch_size)
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
with tqdm(desc='Elements', unit_scale=1) as pbar:
try:
while True:
batches.append(sess.run(get_next))
pbar.update(len(batches[-1]))
except tf.errors.OutOfRangeError:
pass
if batches:
return np.concatenate(batches)
return np.array([], dtype=ds.output_types.as_numpy_dtype)
def _histogram_move_keys_by_game(sess, ds, batch_size=8*1024):
"""Given dataset of key names, return histogram of moves/game.
Move counts are written by the game players, so
this is mostly useful for repair or backfill.
Args:
sess: TF session
ds: TF dataset containing game move keys.
batch_size: performance tuning parameter
"""
ds = ds.batch(batch_size)
# Turns 'g_0000001234_m_133' into 'g_0000001234'
ds = ds.map(lambda x: tf.strings.substr(x, 0, 12))
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
h = collections.Counter()
try:
while True:
h.update(sess.run(get_next))
except tf.errors.OutOfRangeError:
pass
# NOTE: Cannot be truly sure the count is right till the end.
return h
def _game_keys_as_array(ds):
"""Turn keys of a Bigtable dataset into an array.
Take g_GGG_m_MMM and create GGG.MMM numbers.
Valuable when visualizing the distribution of a given dataset in
the game keyspace.
"""
ds = ds.map(lambda row_key, cell: row_key)
# want 'g_0000001234_m_133' is '0000001234.133' and so forth
ds = ds.map(lambda x:
tf.strings.to_number(tf.strings.substr(x, 2, 10) +
'.' +
tf.strings.substr(x, 15, 3),
out_type=tf.float64))
return make_single_array(ds)
def _delete_rows(args):
"""Delete the given row keys from the given Bigtable.
The args are (BigtableSpec, row_keys), but are passed
as a single argument in order to work with
multiprocessing.Pool.map. This is also the reason why this is a
top-level function instead of a method.
"""
btspec, row_keys = args
bt_table = bigtable.Client(btspec.project).instance(
btspec.instance).table(btspec.table)
rows = [bt_table.row(k) for k in row_keys]
for r in rows:
r.delete()
bt_table.mutate_rows(rows)
return row_keys
class GameQueue:
"""Queue of games stored in a Cloud Bigtable.
The state of the table is stored in the `table_state`
row, which includes the columns `metadata:game_counter`.
"""
def __init__(self, project_name, instance_name, table_name):
"""Constructor.
Args:
project_name: string name of GCP project having table.
instance_name: string name of CBT instance in project.
table_name: string name of CBT table in instance.
"""
self.btspec = BigtableSpec(project_name, instance_name, table_name)
self.bt_table = bigtable.Client(
self.btspec.project, admin=True).instance(
self.btspec.instance).table(self.btspec.table)
self.tf_table = contrib_cloud.BigtableClient(
self.btspec.project,
self.btspec.instance).table(self.btspec.table)
def create(self):
"""Create the table underlying the queue.
Create the 'metadata' and 'tfexample' column families
and their properties.
"""
if self.bt_table.exists():
utils.dbg('Table already exists')
return
max_versions_rule = bigtable_column_family.MaxVersionsGCRule(1)
self.bt_table.create(column_families={
METADATA: max_versions_rule,
TFEXAMPLE: max_versions_rule})
@property
def latest_game_number(self):
"""Return the number of the next game to be written."""
table_state = self.bt_table.read_row(
TABLE_STATE,
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, GAME_COUNTER, GAME_COUNTER))
if table_state is None:
return 0
return cbt_intvalue(table_state.cell_value(METADATA, GAME_COUNTER))
@latest_game_number.setter
def latest_game_number(self, latest):
table_state = self.bt_table.row(TABLE_STATE)
table_state.set_cell(METADATA, GAME_COUNTER, int(latest))
table_state.commit()
def games_by_time(self, start_game, end_game):
"""Given a range of games, return the games sorted by time.
Returns [(time, game_number), ...]
The time will be a `datetime.datetime` and the game
number is the integer used as the basis of the row ID.
Note that when a cluster of self-play nodes are writing
concurrently, the game numbers may be out of order.
"""
move_count = b'move_count'
rows = self.bt_table.read_rows(
ROWCOUNT_PREFIX.format(start_game),
ROWCOUNT_PREFIX.format(end_game),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, move_count, move_count))
def parse(r):
rk = str(r.row_key, 'utf-8')
game = _game_from_counter.match(rk).groups()[0]
return (r.cells[METADATA][move_count][0].timestamp, game)
return sorted([parse(r) for r in rows], key=operator.itemgetter(0))
def delete_row_range(self, format_str, start_game, end_game):
"""Delete rows related to the given game range.
Args:
format_str: a string to `.format()` by the game numbers
in order to create the row prefixes.
start_game: the starting game number of the deletion.
end_game: the ending game number of the deletion.
"""
row_keys = make_single_array(
self.tf_table.keys_by_range_dataset(
format_str.format(start_game),
format_str.format(end_game)))
row_keys = list(row_keys)
if not row_keys:
utils.dbg('No rows left for games %d..%d' % (
start_game, end_game))
return
utils.dbg('Deleting %d rows: %s..%s' % (
len(row_keys), row_keys[0], row_keys[-1]))
# Reverse the keys so that the queue is left in a more
# sensible end state if you change your mind (say, due to a
# mistake in the timestamp) and abort the process: there will
# be a bit trimmed from the end, rather than a bit
# trimmed out of the middle.
row_keys.reverse()
total_keys = len(row_keys)
utils.dbg('Deleting total of %d keys' % total_keys)
concurrency = min(MAX_BT_CONCURRENCY,
multiprocessing.cpu_count() * 2)
with multiprocessing.Pool(processes=concurrency) as pool:
batches = []
with tqdm(desc='Keys', unit_scale=2, total=total_keys) as pbar:
for b in utils.iter_chunks(bigtable.row.MAX_MUTATIONS,
row_keys):
pbar.update(len(b))
batches.append((self.btspec, b))
if len(batches) >= concurrency:
pool.map(_delete_rows, batches)
batches = []
pool.map(_delete_rows, batches)
batches = []
def trim_games_since(self, t, max_games=500000):
"""Trim off the games since the given time.
Search back no more than max_games for this time point, locate
the game there, and remove all games since that game,
resetting the latest game counter.
If `t` is a `datetime.timedelta`, then the target time will be
found by subtracting that delta from the time of the last
game. Otherwise, it will be the target time.
"""
latest = self.latest_game_number
earliest = int(latest - max_games)
gbt = self.games_by_time(earliest, latest)
if not gbt:
utils.dbg('No games between %d and %d' % (earliest, latest))
return
most_recent = gbt[-1]
if isinstance(t, datetime.timedelta):
target = most_recent[0] - t
else:
target = t
i = bisect.bisect_right(gbt, (target,))
if i >= len(gbt):
utils.dbg('Last game is already at %s' % gbt[-1][0])
return
when, which = gbt[i]
utils.dbg('Most recent: %s %s' % most_recent)
utils.dbg(' Target: %s %s' % (when, which))
which = int(which)
self.delete_row_range(ROW_PREFIX, which, latest)
self.delete_row_range(ROWCOUNT_PREFIX, which, latest)
self.latest_game_number = which
def bleakest_moves(self, start_game, end_game):
"""Given a range of games, return the bleakest moves.
Returns a list of (game, move, q) sorted by q.
"""
bleak = b'bleakest_q'
rows = self.bt_table.read_rows(
ROW_PREFIX.format(start_game),
ROW_PREFIX.format(end_game),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, bleak, bleak))
def parse(r):
rk = str(r.row_key, 'utf-8')
g, m = _game_row_key.match(rk).groups()
q = r.cell_value(METADATA, bleak)
return int(g), int(m), float(q)
return sorted([parse(r) for r in rows], key=operator.itemgetter(2))
def require_fresh_games(self, number_fresh):
"""Require a given number of fresh games to be played.
Args:
number_fresh: integer, number of new fresh games needed
Increments the cell `table_state=metadata:wait_for_game_number`
by the given number of games. This will cause
`self.wait_for_fresh_games()` to block until the game
counter has reached this number.
"""
latest = self.latest_game_number
table_state = self.bt_table.row(TABLE_STATE)
table_state.set_cell(METADATA, WAIT_CELL, int(latest + number_fresh))
table_state.commit()
print("== Setting wait cell to ", int(latest + number_fresh), flush=True)
def wait_for_fresh_games(self, poll_interval=15.0):
"""Block caller until required new games have been played.
Args:
poll_interval: number of seconds to wait between checks
If the cell `table_state=metadata:wait_for_game_number` exists,
then block the caller, checking every `poll_interval` seconds,
until `table_state=metadata:game_counter is at least the value
in that cell.
"""
wait_until_game = self.read_wait_cell()
if not wait_until_game:
return
latest_game = self.latest_game_number
last_latest = latest_game
while latest_game < wait_until_game:
utils.dbg('Latest game {} not yet at required game {} '
'(+{}, {:0.3f} games/sec)'.format(
latest_game,
wait_until_game,
latest_game - last_latest,
(latest_game - last_latest) / poll_interval
))
time.sleep(poll_interval)
last_latest = latest_game
latest_game = self.latest_game_number
def read_wait_cell(self):
"""Read the value of the cell holding the 'wait' value,
Returns the int value of whatever it has, or None if the cell doesn't
exist.
"""
table_state = self.bt_table.read_row(
TABLE_STATE,
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, WAIT_CELL, WAIT_CELL))
if table_state is None:
utils.dbg('No waiting for new games needed; '
'wait_for_game_number column not in table_state')
return None
value = table_state.cell_value(METADATA, WAIT_CELL)
if not value:
utils.dbg('No waiting for new games needed; '
'no value in wait_for_game_number cell '
'in table_state')
return None
return cbt_intvalue(value)
def count_moves_in_game_range(self, game_begin, game_end):
"""Count the total moves in a game range.
Args:
game_begin: integer, starting game
game_end: integer, ending game
Uses the `ct_` keyspace for rapid move summary.
"""
rows = self.bt_table.read_rows(
ROWCOUNT_PREFIX.format(game_begin),
ROWCOUNT_PREFIX.format(game_end),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, MOVE_COUNT, MOVE_COUNT))
return sum([int(r.cell_value(METADATA, MOVE_COUNT)) for r in rows])
def moves_from_games(self, start_game, end_game, moves, shuffle,
column_family, column):
"""Dataset of samples and/or shuffled moves from game range.
Args:
n: an integer indicating how many past games should be sourced.
moves: an integer indicating how many moves should be sampled
from those N games.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
shuffle: if True, shuffle the selected move examples.
Returns:
A dataset containing no more than `moves` examples, sampled
randomly from the last `n` games in the table.
"""
start_row = ROW_PREFIX.format(start_game)
end_row = ROW_PREFIX.format(end_game)
# NOTE: Choose a probability high enough to guarantee at least the
# required number of moves, by using a slightly lower estimate
# of the total moves, then trimming the result.
total_moves = self.count_moves_in_game_range(start_game, end_game)
probability = moves / (total_moves * 0.99)
utils.dbg('Row range: %s - %s; total moves: %d; probability %.3f; moves %d' % (
start_row, end_row, total_moves, probability, moves))
ds = self.tf_table.parallel_scan_range(start_row, end_row,
probability=probability,
columns=[(column_family, column)])
if shuffle:
utils.dbg('Doing a complete shuffle of %d moves' % moves)
ds = ds.shuffle(moves)
ds = ds.take(moves)
return ds
def moves_from_last_n_games(self, n, moves, shuffle,
column_family, column):
"""Randomly choose a given number of moves from the last n games.
Args:
n: number of games at the end of this GameQueue to source.
moves: number of moves to be sampled from `n` games.
shuffle: if True, shuffle the selected moves.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
Returns:
a dataset containing the selected moves.
"""
self.wait_for_fresh_games()
latest_game = self.latest_game_number
utils.dbg('Latest game in %s: %s' % (self.btspec.table, latest_game))
if latest_game == 0:
raise ValueError('Cannot find a latest game in the table')
start = int(max(0, latest_game - n))
ds = self.moves_from_games(start, latest_game, moves, shuffle,
column_family, column)
return ds
def _write_move_counts(self, sess, h):
"""Add move counts from the given histogram to the table.
Used to update the move counts in an existing table. Should
not be needed except for backfill or repair.
Args:
sess: TF session to use for doing a Bigtable write.
tf_table: TF Cloud Bigtable to use for writing.
h: a dictionary keyed by game row prefix ("g_0023561") whose values
are the move counts for each game.
"""
def gen():
for k, v in h.items():
# The keys in the histogram may be of type 'bytes'
k = str(k, 'utf-8')
vs = str(v)
yield (k.replace('g_', 'ct_') + '_%d' % v, vs)
yield (k + '_m_000', vs)
mc = tf.data.Dataset.from_generator(gen, (tf.string, tf.string))
wr_op = self.tf_table.write(mc,
column_families=[METADATA],
columns=[MOVE_COUNT])
sess.run(wr_op)
def update_move_counts(self, start_game, end_game, interval=1000):
"""Used to update the move_count cell for older games.
Should not be needed except for backfill or repair.
move_count cells will be updated in both g_<game_id>_m_000 rows
and ct_<game_id>_<move_count> rows.
"""
for g in range(start_game, end_game, interval):
with tf.Session() as sess:
start_row = ROW_PREFIX.format(g)
end_row = ROW_PREFIX.format(g + interval)
print('Range:', start_row, end_row)
start_time = time.time()
ds = self.tf_table.keys_by_range_dataset(start_row, end_row)
h = _histogram_move_keys_by_game(sess, ds)
self._write_move_counts(sess, h)
end_time = time.time()
elapsed = end_time - start_time
print(' games/sec:', len(h)/elapsed)
def set_fresh_watermark(game_queue, count_from, window_size,
fresh_fraction=0.05, minimum_fresh=20000):
"""Sets the metadata cell used to block until some quantity of games have been played.
This sets the 'freshness mark' on the `game_queue`, used to block training
until enough new games have been played. The number of fresh games required
is the larger of:
- The fraction of the total window size
- The `minimum_fresh` parameter
The number of games required can be indexed from the 'count_from' parameter.
Args:
game_queue: A GameQueue object, on whose backing table will be modified.
count_from: the index of the game to compute the increment from
window_size: an integer indicating how many past games are considered
fresh_fraction: a float in (0,1] indicating the fraction of games to wait for
minimum_fresh: an integer indicating the lower bound on the number of new
games.
"""
already_played = game_queue.latest_game_number - count_from
print("== already_played: ", already_played, flush=True)
if window_size > count_from: # How to handle the case when the window is not yet 'full'
game_queue.require_fresh_games(int(minimum_fresh * .9))
else:
num_to_play = max(0, math.ceil(window_size * .9 * fresh_fraction) - already_played)
print("== Num to play: ", num_to_play, flush=True)
game_queue.require_fresh_games(num_to_play)
def mix_by_decile(games, moves, deciles=9):
"""Compute a mix of regular and calibration games by decile.
deciles should be an integer between 0 and 10 inclusive.
"""
assert 0 <= deciles <= 10
# The prefixes and suffixes below have the following meanings:
# ct_: count
# fr_: fraction
# _r: resign (ordinary)
# _nr: no-resign
ct_total = 10
lesser = ct_total - math.floor(deciles)
greater = ct_total - lesser
ct_r, ct_nr = greater, lesser
fr_r = ct_r / ct_total
fr_nr = ct_nr / ct_total
games_r = math.ceil(games * fr_r)
moves_r = math.ceil(moves * fr_r)
games_c = math.floor(games * fr_nr)
moves_c = math.floor(moves * fr_nr)
selection = np.array([0] * ct_r + [1] * ct_nr, dtype=np.int64)
return GameMix(games_r, moves_r,
games_c, moves_c,
selection)
def get_unparsed_moves_from_last_n_games(games, games_nr, n,
moves=2**21,
shuffle=True,
column_family=TFEXAMPLE,
column='example',
values_only=True):
"""Get a dataset of serialized TFExamples from the last N games.
Args:
games, games_nr: GameQueues of the regular selfplay and calibration
(aka 'no resign') games to sample from.
n: an integer indicating how many past games should be sourced.
moves: an integer indicating how many moves should be sampled
from those N games.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
shuffle: if True, shuffle the selected move examples.
values_only: if True, return only column values, no row keys.
Returns:
A dataset containing no more than `moves` examples, sampled
randomly from the last `n` games in the table.
"""
mix = mix_by_decile(n, moves, 9)
resign = games.moves_from_last_n_games(
mix.games_r,
mix.moves_r,
shuffle,
column_family, column)
no_resign = games_nr.moves_from_last_n_games(
mix.games_c,
mix.moves_c,
shuffle,
column_family, column)
choice = tf.data.Dataset.from_tensor_slices(mix.selection).repeat().take(moves)
ds = tf.data.experimental.choose_from_datasets([resign, no_resign], choice)
if shuffle:
ds = ds.shuffle(len(mix.selection) * 2)
if values_only:
ds = ds.map(lambda row_name, s: s)
return ds
def get_unparsed_moves_from_games(games_r, games_c,
start_r, start_c,
mix,
shuffle=True,
column_family=TFEXAMPLE,
column='example',
values_only=True):
"""Get a dataset of serialized TFExamples from a given start point.
Args:
games_r, games_c: GameQueues of the regular selfplay and calibration
(aka 'no resign') games to sample from.
start_r: an integer indicating the game number to start at in games_r.
start_c: an integer indicating the game number to start at in games_c.
mix: the result of mix_by_decile()
shuffle: if True, shuffle the selected move examples.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
values_only: if True, return only column values, no row keys.
Returns:
A dataset containing no more than the moves implied by `mix`,
sampled randomly from the game ranges implied.
"""
resign = games_r.moves_from_games(
start_r, start_r + mix.games_r, mix.moves_r, shuffle, column_family, column)
calibrated = games_c.moves_from_games(
start_c, start_c + mix.games_c, mix.moves_c, shuffle, column_family, column)
moves = mix.moves_r + mix.moves_c
choice = tf.data.Dataset.from_tensor_slices(mix.selection).repeat().take(moves)
ds = tf.data.experimental.choose_from_datasets([resign, calibrated], choice)
if shuffle:
ds = ds.shuffle(len(mix.selection) * 2)
if values_only:
ds = ds.map(lambda row_name, s: s)
return ds
def count_elements_in_dataset(ds, batch_size=1*1024, parallel_batch=8):
"""Count and return all the elements in the given dataset.
Debugging function. The elements in a dataset cannot be counted
without enumerating all of them. By counting in batch and in
parallel, this method allows rapid traversal of the dataset.
Args:
ds: The dataset whose elements should be counted.
batch_size: the number of elements to count a a time.
parallel_batch: how many batches to count in parallel.
Returns:
The number of elements in the dataset.
"""
with tf.Session() as sess:
dsc = ds.apply(tf.data.experimental.enumerate_dataset())
dsc = dsc.apply(tf.data.experimental.map_and_batch(
lambda c, v: c, batch_size, num_parallel_batches=parallel_batch))
iterator = dsc.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
counted = 0
try:
while True:
# The numbers in the tensors are 0-based indicies,
# so add 1 to get the number counted.
counted = sess.run(tf.reduce_max(get_next)) + 1
utils.dbg('Counted so far: %d' % counted)
except tf.errors.OutOfRangeError:
pass
utils.dbg('Counted total: %d' % counted)
return counted<|fim▁end|> | import math
import multiprocessing
import operator |
<|file_name|>CalendarDayEntriesSpec.ts<|end_file_name|><|fim▁begin|>/// <reference path="../typings/jasmine/jasmine.d.ts" />
/// <reference path="../src/CalendarDayEntries.ts"/>
module CalendarDay
{
class CalendarDayEntriesMock extends CalendarDayEntries
{
callCounter : number = 0;
init(year : number) : void
{
this.callCounter++;
this.setDayEntry('20150604', { name: '04 ok', isFeastDay: false });
this.setDayEntry('20150614', { name: '14 ok', isFeastDay: true });
}
}
describe("Tests the storage of days", () => {
it("for correct initialisation", () => {
var calendarDayEntries : CalendarDayEntriesMock = new CalendarDayEntriesMock({});
var result = calendarDayEntries.getDayEntry('20150604');
expect(calendarDayEntries.callCounter).toBe(1);
expect(result.name).toBe('04 ok');
expect(result.isFeastDay).toBeFalsy();
result = calendarDayEntries.getDayEntry('20150614');
expect(calendarDayEntries.callCounter).toBe(1);
expect(result.name).toBe('14 ok');
expect(result.isFeastDay).toBeTruthy();
var result = calendarDayEntries.getDayEntry('20150604');
expect(calendarDayEntries.callCounter).toBe(1);
expect(result.name).toBe('04 ok');
expect(result.isFeastDay).toBeFalsy();
var result = calendarDayEntries.getDayEntry('20160604');
expect(calendarDayEntries.callCounter).toBe(2);
expect(result.name).toBe('');<|fim▁hole|>}<|fim▁end|> | expect(result.isFeastDay).toBeFalsy();
});
}); |
<|file_name|>XEMOutputControler.cpp<|end_file_name|><|fim▁begin|>/***************************************************************************
SRC/MIXMOD/XEMOutputControler.cpp description
copyright : (C) MIXMOD Team - 2001-2011
email : [email protected]
***************************************************************************/
/***************************************************************************
This file is part of MIXMOD
MIXMOD is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MIXMOD is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with MIXMOD. If not, see <http://www.gnu.org/licenses/>.
All informations available on : http://www.mixmod.org
***************************************************************************/
#include <string.h>
#include "XEMOutputControler.h"
#include "XEMUtil.h"
//------------
// Constructor
//------------
XEMOutputControler::XEMOutputControler(){
int64_t i;
_output = NULL;
_nbOutputFiles = maxNbOutputFiles;
for (i=0; i<_nbOutputFiles; i++){
_tabOutputTypes[i] = (XEMOutputType) i;
}
createEmptyFiles();
}
//------------
// Constructor
//------------
XEMOutputControler::XEMOutputControler(XEMOutput * output){
int64_t i;
_output = output;
_nbOutputFiles = maxNbOutputFiles;
for (i=0; i<_nbOutputFiles; i++){
_tabOutputTypes[i] = (XEMOutputType) i;
}
createEmptyFiles();
}
//-----------
// Destructor
//-----------
XEMOutputControler::~XEMOutputControler(){
int64_t i;
//cout<<"tabFileName"<<endl;
for (i=0; i<maxNbOutputFiles; i++){
//cout<<"tabFileName : "<<_tabFiles[i]<<endl;
delete _tabFiles[i];
}
}
//---------
// Selector
//---------
void XEMOutputControler::setOutput(XEMOutput * output){
_output = output;
}
//---------
// editFile
//---------
void XEMOutputControler::editFile(){
int64_t i;
if (_output){
_output->editFile(_tabFiles);
for (i=0; i<_nbOutputFiles; i++){
_tabFiles[i]->close();
}
}
}
//---------
// editErroMixmodFile
//---------
void XEMOutputControler::editErroMixmodFile(XEMErrorType error){
// errorMixmodOutput
int64_t i = (int64_t)errorMixmodOutput;
*_tabFiles[i]<<error<<endl;
}
//--------------------------
// Create Empty output Files
//--------------------------
void XEMOutputControler::createEmptyFiles(){
int64_t i,j;
string charCriterionType= "";
string fileName = "";
int64_t nbOutputFileType = 9;
string * fileNameTmp = new string[nbOutputFileType];
for (i=0 ; i<nbOutputFileType; i++){
fileNameTmp[i] = "";
}
fileNameTmp[0] = "standard.txt";
fileNameTmp[1] = "numericStandard.txt";
fileNameTmp[2] = "label.txt";
fileNameTmp[3] = "parameter.txt";
fileNameTmp[4] = "posteriorProbabilities.txt";
fileNameTmp[5] = "partition.txt";
fileNameTmp[6] = "likelihood.txt";
fileNameTmp[7] = "Error.txt";
fileNameTmp[8] = "numericLikelihood.txt";
// CV, BIC, NEC, ICL, DCV ... files
//---------------------------------
for (i=0;i<nbMaxSelection;i++){
if (i==0){
charCriterionType = "BIC";
}
if (i==1){
charCriterionType = "CV";
}
if (i==2){
charCriterionType = "ICL";
}
if (i==3){
charCriterionType = "NEC";
}
if (i==4){
charCriterionType = "DCV";
}
for (j=0; j<nbOutputFileType; j++){
fileName = charCriterionType;
_tabFiles[nbOutputFileType*i+j] = new ofstream(fileName.append(fileNameTmp[j]).c_str(), ios::out);
_tabFiles[nbOutputFileType*i+j]->setf(ios::fixed, ios::floatfield);
}
}
// other files
//------------
//char * completeFileName ="complete.txt";
_tabFiles[(int64_t)completeOutput] = new ofstream("complete.txt", ios::out);
_tabFiles[(int64_t)completeOutput]->setf(ios::fixed, ios::floatfield);
//char * numericCompleteFileName ="numericComplete.txt";
_tabFiles[(int64_t)numericCompleteOutput] = new ofstream("numericComplete.txt", ios::out);
_tabFiles[(int64_t)numericCompleteOutput]->setf(ios::fixed, ios::floatfield);
//char * CVlabelClassificationFileName = "CVlabelClassification.txt";
_tabFiles[(int64_t)CVlabelClassificationOutput] = new ofstream("CVlabelClassification.txt", ios::out);
_tabFiles[(int64_t)CVlabelClassificationOutput]->setf(ios::fixed, ios::floatfield);
//char * errorMixmodFileName = "errorMixmod.txt";
_tabFiles[(int64_t)errorMixmodOutput] = new ofstream("errorMixmod.txt", ios::out);
_tabFiles[(int64_t)errorMixmodOutput]->setf(ios::fixed, ios::floatfield);
//char * errorModelFileName = "errorModel.txt";
_tabFiles[(int64_t)errorModelOutput] = new ofstream("errorModel.txt", ios::out);
_tabFiles[(int64_t)errorModelOutput]->setf(ios::fixed, ios::floatfield);
// DCV files
_tabFiles[(int64_t)DCVinfo] = new ofstream("DCVinfo.txt", ios::out);
_tabFiles[(int64_t)DCVinfo]->setf(ios::fixed, ios::floatfield);
_tabFiles[(int64_t)DCVnumericInfo] = new ofstream("DCVnumericInfo.txt", ios::out);
_tabFiles[(int64_t)DCVnumericInfo]->setf(ios::fixed, ios::floatfield);
// verify if files are open
for (i=0 ;i<_nbOutputFiles; i++){
if (! _tabFiles[i]->is_open())
throw errorOpenFile;<|fim▁hole|> delete numericCompleteFileName;
delete CVlabelClassificationFileName;
delete errorMixmodFileName;
delete errorModelFileName;
*/
}<|fim▁end|> | }
delete [] fileNameTmp;
/*delete completeFileName; |
<|file_name|>CNAVMessageAsm.hpp<|end_file_name|><|fim▁begin|>#pragma ident "$Id:$"
//============================================================================
//
// This file is part of GPSTk, the GPS Toolkit.
//
// The GPSTk is free software; you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published
// by the Free Software Foundation; either version 2.1 of the License, or
// any later version.
//
// The GPSTk is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with GPSTk; if not, write to the Free Software Foundation,
// Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
//
// Copyright 2013, The University of Texas at Austin
//
//============================================================================
//============================================================================
//
//This software developed by Applied Research Laboratories at the University of
//Texas at Austin, under contract to an agency or agencies within the U.S.
//Department of Defense. The U.S. Government retains all rights to use,
//duplicate, distribute, disclose, or release this software.
//
//Pursuant to DoD Directive 523024
//
// DISTRIBUTION STATEMENT A: This software has been approved for public
// release, distribution is unlimited.
//
//=============================================================================
#ifndef CNAVMESSAGEASM_HPP
#define CNAVMESSAGEASM_HPP
// Utility class to covert MDPNavSubframe data into CNAV messages.
#include "MDPNavSubframe.hpp"
#include "GPSOrbElemStore.hpp"
#include "ObsID.hpp"
#include "PackedNavBits.hpp"
#include "SatID.hpp"
class CNAVMessageAsm
{
public:
CNAVMessageAsm();
~CNAVMessageAsm() {}
// Add a MDPNavSubframe to the current set.
// Add whatever larger structures are possible the ephemeris store.
void addMDPNavSubframe( const gpstk::MDPNavSubframe& mdpsf);
// const gpstk::GPSOrbElemStore& store);
// The index is 0 = msg type 11
// 1 = msg type 12
// 2 = msg type 3x
//
// These are used to group together the sets of message 11/12/3x.
//typedef std::map<gpstk::ObsID,MapByMsgType> MapByObsID;
<|fim▁hole|> gpstk::PackedNavBits* currentMsgMap[3];
short currentWeek;
bool weekSet;
private:
short getMessageType(const gpstk::MDPNavSubframe& mdpsf);
short getPRNID(const gpstk::MDPNavSubframe& mdpsf);
short getWeek(const gpstk::MDPNavSubframe& mdpsf);
unsigned long getTOWCount(const gpstk::MDPNavSubframe& mdpsf);
};
#endif<|fim▁end|> | //typedef std::map<gpstk::SatID,MapByObsID> CurrentMsgMap;
|
<|file_name|>post_migration.py<|end_file_name|><|fim▁begin|># -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2014 Akretion
# (<http://www.akretion.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,<|fim▁hole|># MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import pooler, SUPERUSER_ID
from openerp.openupgrade import openupgrade, openupgrade_80
@openupgrade.migrate()
def migrate(cr, version):
pool = pooler.get_pool(cr.dbname)
uid = SUPERUSER_ID
openupgrade_80.set_message_last_post(
cr, uid, pool, ['fleet.vehicule']
)<|fim▁end|> | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
<|file_name|>test_caps_num_lock_indicator.py<|end_file_name|><|fim▁begin|># Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Widget specific tests
import subprocess
from typing import List
import pytest
from libqtile.widget import caps_num_lock_indicator<|fim▁hole|>
class MockCapsNumLockIndicator:
CalledProcessError = None
info: List[List[str]] = []
is_error = False
index = 0
@classmethod
def reset(cls):
cls.info = [
[
"Keyboard Control:",
" auto repeat: on key click percent: 0 LED mask: 00000002",
" XKB indicators:",
" 00: Caps Lock: off 01: Num Lock: on 02: Scroll Lock: off",
" 03: Compose: off 04: Kana: off 05: Sleep: off",
],
[
"Keyboard Control:",
" auto repeat: on key click percent: 0 LED mask: 00000002",
" XKB indicators:",
" 00: Caps Lock: on 01: Num Lock: on 02: Scroll Lock: off",
" 03: Compose: off 04: Kana: off 05: Sleep: off",
],
]
cls.index = 0
cls.is_error = False
@classmethod
def call_process(cls, cmd):
if cls.is_error:
raise subprocess.CalledProcessError(-1, cmd=cmd, output="Couldn't call xset.")
if cmd[1:] == ["q"]:
track = cls.info[cls.index]
output = "\n".join(track)
return output
def no_op(*args, **kwargs):
pass
@pytest.fixture
def patched_cnli(monkeypatch):
MockCapsNumLockIndicator.reset()
monkeypatch.setattr(
"libqtile.widget.caps_num_lock_indicator.subprocess", MockCapsNumLockIndicator
)
monkeypatch.setattr(
"libqtile.widget.caps_num_lock_indicator.subprocess.CalledProcessError",
subprocess.CalledProcessError,
)
monkeypatch.setattr(
"libqtile.widget.caps_num_lock_indicator.base.ThreadPoolText.call_process",
MockCapsNumLockIndicator.call_process,
)
return caps_num_lock_indicator
def test_cnli(fake_qtile, patched_cnli, fake_window):
widget = patched_cnli.CapsNumLockIndicator()
fakebar = FakeBar([widget], window=fake_window)
widget._configure(fake_qtile, fakebar)
text = widget.poll()
assert text == "Caps off Num on"
def test_cnli_caps_on(fake_qtile, patched_cnli, fake_window):
widget = patched_cnli.CapsNumLockIndicator()
# Simulate Caps on
MockCapsNumLockIndicator.index = 1
fakebar = FakeBar([widget], window=fake_window)
widget._configure(fake_qtile, fakebar)
text = widget.poll()
assert text == "Caps on Num on"
def test_cnli_error_handling(fake_qtile, patched_cnli, fake_window):
widget = patched_cnli.CapsNumLockIndicator()
# Simulate a CalledProcessError exception
MockCapsNumLockIndicator.is_error = True
fakebar = FakeBar([widget], window=fake_window)
widget._configure(fake_qtile, fakebar)
text = widget.poll()
# Widget does nothing with error message so text is blank
assert text == ""<|fim▁end|> | from test.widgets.conftest import FakeBar |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>fn compute_kmp_table(word: &String) -> Vec<i32> {
let mut table : Vec<i32> = vec![0; word.len()];
let mut pos = 2;
let mut cnd = 0;
let word_chars : Vec<char> = word.chars().collect::<Vec<char>>();
table[0] = -1;
table[1] = 0;
while pos < word.len() {
if word_chars[pos - 1] == word_chars[cnd] {
table[pos] = (cnd + 1) as i32;
cnd += 1;
pos += 1;
} else if cnd > 0 {
cnd = (table[cnd]) as usize;
} else {
table[pos] = 0;
pos += 1;
}
}
table
}
fn kmp_serch(word: & String, text: & String) -> usize {
let mut m : usize = 0;
let mut i : usize = 0;
let table : Vec<i32> = compute_kmp_table(&word);
let text_len : usize = text.len();
let word_len : usize = word.len();
let word_chars : Vec<char> = word.chars().collect::<Vec<char>>();
let text_chars : Vec<char> = text.chars().collect::<Vec<char>>();
while m + i < text_len {
if word_chars[i] == text_chars[m + i] {
if i == word_len - 1 {
return m;
}
i += 1;
} else {
if table[i] > -1 {
m += i - (table[i] as usize);
i = table[i] as usize;
} else {
i = 0;
m += 1;
}
}
}
text_len // no match found
}
#[test]
fn compute_kmp_table_test_1() {
let word : String = "ABCDABD".to_string();
let expected_res : Vec<i32> = vec![-1, 0, 0, 0, 0, 1, 2];
let res : Vec<i32> = compute_kmp_table(&word);
assert_eq!(res.len(), expected_res.len());
for i in 0..res.len() {
assert_eq!(res[i], expected_res[i]);
}
}
#[test]
fn kmp_serch_1() {
let word : String = "ABCDABD".to_string();
let text : String = "ABC ABCDAB ABCDABCDABDE".to_string();
let expected_res : usize = 15;
let res : usize = kmp_serch(&word, &text);<|fim▁hole|><|fim▁end|> |
assert_eq!(res, expected_res);
} |
<|file_name|>createlink-base-min.js<|end_file_name|><|fim▁begin|>version https://git-lfs.github.com/spec/v1
oid sha256:a92e0a42b6e55c21f7494e8604852635b90e81170e9f49207a0a68c2174158ee<|fim▁hole|><|fim▁end|> | size 877 |
<|file_name|>hook-cryptography.py<|end_file_name|><|fim▁begin|># -----------------------------------------------------------------------------
# Copyright (c) 2014, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------<|fim▁hole|>
"""
Hook for cryptography module from the Python Cryptography Authority.
"""
import os.path
import glob
from PyInstaller.hooks.hookutils import (collect_submodules as cs,
get_module_file_attribute)
from PyInstaller.hooks.hookutils import PY_EXTENSION_SUFFIXES
# add the OpenSSL FFI binding modules as hidden imports
hiddenimports = cs('cryptography.hazmat.bindings.openssl') # pragma: no cover
def hook(mod):
"""
Include the cffi extensions as binaries in a subfolder named like the
package. The cffi verifier expects to find them inside the package
directory for the main module. We cannot use hiddenimports because that
would add the modules outside the package.
"""
crypto_dir = os.path.dirname(get_module_file_attribute('cryptography'))
for ext in PY_EXTENSION_SUFFIXES:
ffimods = glob.glob(os.path.join(crypto_dir,
'*_cffi_*%s*' % ext))
for f in ffimods:
name = os.path.join('cryptography', os.path.basename(f))
# TODO fix this hook to use attribute 'binaries'.
mod.pyinstaller_binaries.append((name, f, 'BINARY'))
return mod<|fim▁end|> | |
<|file_name|>dashboard.component.ts<|end_file_name|><|fim▁begin|>import { Component, OnInit } from '@angular/core';
import { Hero } from './hero';
import { HeroService } from './hero.service';
@Component ({
moduleId: module.id,<|fim▁hole|> styleUrls: [ './dashboard.component.css' ],
})
export class DashboardComponent implements OnInit {
heroes: Hero[] = [];
constructor(private heroService: HeroService){}
ngOnInit(): void {
this.heroService.getHeroes()
.then(heroes => this.heroes = heroes.slice(1,5));
}
}<|fim▁end|> | selector: 'my-dashboard',
templateUrl: 'dashboard.component.html', |
<|file_name|>Callback.C<|end_file_name|><|fim▁begin|>/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 1991-2010 OpenCFD Ltd.
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License<|fim▁hole|>
\*---------------------------------------------------------------------------*/
#include "Callback.H"
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
template<class CallbackType>
Foam::Callback<CallbackType>::Callback(CallbackRegistry<CallbackType>& cbr)
:
cbr_(cbr)
{
checkIn();
}
template<class CallbackType>
Foam::Callback<CallbackType>::Callback(const Callback<CallbackType>& cb)
:
cbr_(cb.cbr_)
{
checkIn();
}
// * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * //
template<class CallbackType>
Foam::Callback<CallbackType>::~Callback()
{
checkOut();
}
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
template<class CallbackType>
bool Foam::Callback<CallbackType>::checkIn()
{
if (!Callback<CallbackType>::link::registered())
{
cbr_.append(static_cast<CallbackType*>(this));
return true;
}
else
{
return false;
}
}
template<class CallbackType>
bool Foam::Callback<CallbackType>::checkOut()
{
if (Callback<CallbackType>::link::registered())
{
CallbackType* cbPtr = cbr_.remove(static_cast<CallbackType*>(this));
if (cbPtr)
{
return true;
}
else
{
return false;
}
}
else
{
return false;
}
}
// ************************ vim: set sw=4 sts=4 et: ************************ //<|fim▁end|> | along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. |
<|file_name|>__manifest__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright 2016 Onestein (<http://www.onestein.eu>)<|fim▁hole|>
{
'name': "Absence Management",
'summary': """Create time based absence notifications""",
'author': 'Onestein',
'website': 'http://www.onestein.eu',
'images': ['static/description/main_screenshot.png'],
'category': 'Human Resources',
'version': '10.0.1.0.0',
'license': 'AGPL-3',
'depends': [
'hr_holidays',
],
'data': [
'security/ir.model.access.csv',
'views/hr_holidays_status.xml',
'views/hr_holidays.xml',
'data/hr_absenteeism_cron.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
'application': False,
}<|fim▁end|> | # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). |
<|file_name|>imports.rs<|end_file_name|><|fim▁begin|>// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(unused)]
// Like other items, private imports can be imported and used non-lexically in paths.
mod a {
use a as foo;
use self::foo::foo as bar;
mod b {
use super::bar;
}
}
mod foo { pub fn f() {} }
mod bar { pub fn f() {} }
pub fn f() -> bool { true }
// Items and explicit imports shadow globs.
fn g() {
use foo::*;
use bar::*;
fn f() -> bool { true }
let _: bool = f();
}
fn h() {
use foo::*;
use bar::*;
use f;
let _: bool = f();
}
// Here, there appears to be shadowing but isn't because of namespaces.
mod b {
use foo::*; // This imports `f` in the value namespace.
use super::b as f; // This imports `f` only in the type namespace,
fn test() { self::f(); } // so the glob isn't shadowed.
}
// Here, there is shadowing in one namespace, but not the other.<|fim▁hole|> mod test {
pub fn f() {}
pub mod f {}
}
use self::test::*; // This glob-imports `f` in both namespaces.
mod f { pub fn f() {} } // This shadows the glob only in the value namespace.
fn test() {
self::f(); // Check that the glob-imported value isn't shadowed.
self::f::f(); // Check that the glob-imported module is shadowed.
}
}
// Unused names can be ambiguous.
mod d {
pub use foo::*; // This imports `f` in the value namespace.
pub use bar::*; // This also imports `f` in the value namespace.
}
mod e {
pub use d::*; // n.b. Since `e::f` is not used, this is not considered to be a use of `d::f`.
}
fn main() {}<|fim▁end|> | mod c { |
<|file_name|>CountAccumulateFunction.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.drools.core.base.accumulators;
import java.io.Externalizable;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.io.Serializable;
/**
* An implementation of an accumulator capable of counting occurences
*/
public class CountAccumulateFunction extends AbstractAccumulateFunction<CountAccumulateFunction.CountData> {
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
}
public void writeExternal(ObjectOutput out) throws IOException {
}
protected static class CountData implements Externalizable {
public long count = 0;
public CountData() {}
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
count = in.readLong();
}
public void writeExternal(ObjectOutput out) throws IOException {
out.writeLong(count);
}
}
/* (non-Javadoc)
* @see org.kie.base.accumulators.AccumulateFunction#createContext()
*/
public CountData createContext() {
return new CountData();
}
/* (non-Javadoc)
* @see org.kie.base.accumulators.AccumulateFunction#init(java.lang.Object)
*/
public void init(CountData data) {
data.count = 0;
}
/* (non-Javadoc)
* @see org.kie.base.accumulators.AccumulateFunction#accumulate(java.lang.Object, java.lang.Object)
*/
public void accumulate(CountData data,
Object value) {
data.count++;
}
/* (non-Javadoc)
* @see org.kie.base.accumulators.AccumulateFunction#reverse(java.lang.Object, java.lang.Object)
*/
public void reverse(CountData data,
Object value) {
data.count--;
}
/* (non-Javadoc)
* @see org.kie.base.accumulators.AccumulateFunction#getResult(java.lang.Object)
*/
public Object getResult(CountData data) {
return new Long( data.count );
}
/* (non-Javadoc)
* @see org.kie.base.accumulators.AccumulateFunction#supportsReverse()
*/
public boolean supportsReverse() {<|fim▁hole|> * {@inheritDoc}
*/
public Class< ? > getResultType() {
return Long.class;
}
}<|fim▁end|> | return true;
}
/** |
<|file_name|>base.py<|end_file_name|><|fim▁begin|>import json
from tornado import gen
from tornado.httpclient import AsyncHTTPClient, HTTPError
from .exceptions import SparkPostAPIException
class TornadoTransport(object):
@gen.coroutine
def request(self, method, uri, headers, **kwargs):
if "data" in kwargs:
kwargs["body"] = kwargs.pop("data")
client = AsyncHTTPClient()
try:
response = yield client.fetch(uri, method=method, headers=headers,<|fim▁hole|> **kwargs)
except HTTPError as ex:
raise SparkPostAPIException(ex.response)
if response.code == 204:
raise gen.Return(True)
if response.code == 200:
result = None
try:
result = json.loads(response.body.decode("utf-8"))
except:
pass
if result:
if 'results' in result:
raise gen.Return(result['results'])
raise gen.Return(result)
raise SparkPostAPIException(response)<|fim▁end|> | |
<|file_name|>pgconf.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# $Id: $
"""
postgresql.conf configuration file reader
Module contents:
readfile() - Read postgresql.conf file
class gucdict - Container for postgresql.conf settings
class setting - Holds one setting
class ConfigurationError - a subclass of EnvironmentError
Example:
import lib.pgconf as pgconf
d = pgconf.readfile()
port = d.int('port', 5432)
pe = d.bool('password_encryption', False)
sb = d.kB('shared_buffers')
at = d.time('authentication_timeout', 'ms', 2500)
"""
import os
import os.path
import re
# Max recursion level for postgresql.conf include directives.
# The max value is 10 in the postgres code, so it's the same here.
MAX_RECURSION_LEVEL=10
def readfile(filename='postgresql.conf', defaultpath=None):
"""
Read postgresql.conf file and put the settings into a dictionary.
Returns the dictionary: a newly created pgconf.gucdict object.
If filename does not specify an absolute path, it is treated as relative
to defaultpath, or to the current working directory.
"""
if not os.path.isabs(filename):
if defaultpath is None:
defaultpath = os.getcwd()
filename = os.path.normpath(os.path.join(defaultpath, filename))
fp = open(filename)
try:
dictionary = gucdict()
dictionary.populate(fp, filename)
return dictionary
except Exception:
raise
finally:
fp.close()
class gucdict(dict):
"""
A container for settings from a postgresql.conf file.
Behaves as an ordinary dictionary, with a few added methods.
The keys of the dictionary are GUC names in lower case, and the
values are instances of the pgconf.setting class.
The populate() method loads the dictionary with settings from a file.
The str(), bool(), int(), float(), kB(), and time() methods return a
value from the dictionary, converted to internal form.
"""
def populate(self, lines, filename='', recurLevel=0):
'''
Given a postgresql.conf input file (or a list of strings, or some
iterable object yielding lines), look for lines of the form
name[=][value][#comment]
For each one found, construct a pgconf.setting object and put it
into our dictionary.
'''
if recurLevel == MAX_RECURSION_LEVEL:
raise Exception('could not open configuration file "%s": maximum nesting depth exceeded' % filename)
linenumber = 0
for line in lines:
linenumber += 1
m = _setpat.match(line)
if m:
name, value, pos = m.group(1), m.group(3), m.start(3)
if name == 'include':
try:
# Remove the ' from the filename and then convert to abspath if needed.<|fim▁hole|> if not incfilename.startswith('/') and filename != '':
incfilename = '%s/%s' % (filename[0:filename.rfind('/')], incfilename)
fp = open(incfilename)
self.populate(fp, incfilename, recurLevel+1)
fp.close()
except IOError:
raise Exception('File %s included from %s:%d does not exist' % (incfilename, filename, linenumber))
else:
self[name.lower()] = setting(name, value, filename, linenumber, pos)
def str(self, name, default=None):
"""
Return string setting, or default if absent.
"""
v = self.get(name)
if v:
return v.str()
else:
return default
def bool(self, name, default=None):
"""
Return Boolean setting, or default if absent.
"""
v = self.get(name)
if v:
return v.bool()
else:
return default
def int(self, name, default=None):
"""
Return integer setting, or default if absent.
"""
v = self.get(name)
if v:
return v.int()
else:
return default
def float(self, name, default=None):
"""
Return floating-point setting, or default if absent.
"""
v = self.get(name)
if v:
return v.float()
else:
return default
def kB(self, name, default=None):
"""
Return memory setting in units of 1024 bytes, or default if absent.
"""
v = self.get(name)
if v:
return v.kB()
else:
return default
def time(self, name, unit='s', default=None):
"""
Return time setting, or default if absent.
Specify desired unit as 'ms', 's', or 'min'.
"""
v = self.get(name)
if v:
return v.time(unit)
else:
return default
class setting(object):
"""
Holds a GUC setting from a postgresql.conf file.
The str(), bool(), int(), float(), kB(), and time() methods return the
value converted to the requested internal form. pgconf.ConfigurationError
is raised if the conversion fails, i.e. the value does not conform to the
expected syntax.
"""
def __init__(self, name, value, filename='', linenumber=0, pos=0):
self.name = name
self.value = value
self.filename = filename
self.linenumber = linenumber
self.pos = pos # starting offset of value within the input line
def __repr__(self):
return repr(self.value)
def str(self):
"""
Return the value as a string.
"""
v = self.value
if v and v.endswith("'"):
# Single-quoted string. Remove the opening and closing quotes.
# Replace each escape sequence with the character it stands for.
i = v.index("'") + 1
v = _escapepat.sub(_escapefun, v[i:-1])
return v
def bool(self):
"""
Interpret the value as a Boolean. Returns True or False.
"""
s = self.value
if s:
s = s.lower()
n = len(s)
if (s == '1' or
s == 'on' or
s == 'true'[:n] or
s == 'yes'[:n]):
return True
if (s == '0' or
s == 'off'[:n] or
s == 'false'[:n] or
s == 'no'[:n]):
return False
raise self.ConfigurationError('Boolean value should be one of: 1, 0, '
'on, off, true, false, yes, no.')
def int(self):
"""
Interpret the value as an integer. Returns an int or long.
"""
try:
return int(self.value, 0)
except ValueError:
raise self.ConfigurationError('Value should be integer.')
def float(self):
"""
Interpret the value as floating point. Returns a float.
"""
try:
return float(self.value)
except ValueError:
raise self.ConfigurationError('Value should be floating point.')
def kB(self):
"""
Interpret the value as an amount of memory. Returns an int or long,
in units of 1024 bytes.
"""
try:
m = 1
t = re.split('(kB|MB|GB)', self.value)
if len(t) > 1:
i = ['kB', 'MB', 'GB'].index(t[1])
m = (1, 1024, 1024*1024)[i]
try:
return int(t[0], 0) * m
except ValueError:
pass
return int(float(t[0]) * m)
except (ValueError, IndexError):
raise self.ConfigurationError('Value should be integer or float '
'with optional suffix kB, MB, or GB '
'(kB is default).')
def time(self, unit='s'):
"""
Interpret the value as a time. Returns an int or long.
Specify desired unit as 'ms', 's', or 'min'.
"""
u = ['ms', 's', 'min'].index(unit)
u = (1, 1000, 60*1000)[u]
try:
m = u
t = re.split('(ms|s|min|h|d)', self.value)
if len(t) > 1:
i = ['ms', 's', 'min', 'h', 'd'].index(t[1])
m = (1, 1000, 60*1000, 3600*1000, 24*3600*1000)[i]
return int(t[0], 0) * m / u
except (ValueError, IndexError):
raise self.ConfigurationError('Value should be integer with '
'optional suffix ms, s, min, h, or d '
'(%s is default).' % unit)
def ConfigurationError(self, msg):
msg = '(%s = %s) %s' % (self.name, self.value, msg)
return ConfigurationError(msg, self.filename, self.linenumber)
class ConfigurationError(EnvironmentError):
def __init__(self, msg, filename='', linenumber=0):
self.msg = msg
self.filename = filename
self.linenumber = linenumber
if linenumber:
msg = '%s line %d: %s' % (filename, linenumber, msg)
elif filename:
msg = '%s: %s' % (filename, msg)
EnvironmentError.__init__(self, msg)
def __str__(self):
return self.message
#-------------------------------- private --------------------------------
_setpat = re.compile(r"\s*(\w+)\s*(=\s*)?" # name [=]
'('
r"[eE]?('((\\.)?[^\\']*)*')+|" # single-quoted string or
r"[^\s#']*" # token ending at whitespace or comment
')')
_escapepat = re.compile(r"''|" # pair of single quotes, or
r"\\(" # backslash followed by
r"[0-7][0-7]?[0-7]?|" # nnn (1 to 3 octal digits) or
r"x[0-9A-Fa-f][0-9A-Fa-f]?|" # xHH (1 or 2 hex digits) or
r".)") # one char
def _escapefun(matchobj):
"""Callback to interpret an escape sequence"""
s = matchobj.group()
c = s[1]
i = "bfnrt".find(c)
if i >= 0:
c = "\b\f\n\r\t"[i]
elif c == 'x':
c = chr(int(s[2:], 16))
elif c in '01234567':
c = chr(int(s[1:], 8))
return c<|fim▁end|> | incfilename = value.strip("'") |
<|file_name|>speedo.cpp<|end_file_name|><|fim▁begin|>/*
Copyright (c) 2013, ROSSER ALPHA LLC
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
<|fim▁hole|> names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "speedo.h"
void Speedometer::init(Settings* pSettings)
{
m_pSettings = pSettings;
}
void Speedometer::measure()
{
// nyi
}
void Speedometer::report()
{
// nyi
}<|fim▁end|> | * Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of ROSSER ALPHA LLC nor the
|
<|file_name|>025.py<|end_file_name|><|fim▁begin|>from fibonacci import Fibonacci<|fim▁hole|>
def ans():
return Fibonacci.index(Fibonacci.after(int('9' * 999)))
if __name__ == '__main__':
print(ans())<|fim▁end|> | |
<|file_name|>attention_graphs.py<|end_file_name|><|fim▁begin|>import gzip
import os
import pickle
import matplotlib<|fim▁hole|>import matplotlib.pyplot as plt
import numpy as np
matplotlib.use('TKagg')
def show_attention():
# Load attentions
print('Loading attentions to pickle file')
with gzip.open(
os.path.join('training_results', 'torch_train', 'attentions.pkl.gz'),
'r') as att_file:
attentions = pickle.load(att_file)
# Set up figure with colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(np.mean(np.array(attentions),axis=(0,1)), cmap='bone')
fig.colorbar(cax)
# # Set up axes
# ax.set_xticklabels([''] + input_sentence.split(' ') +
# ['<EOS>'], rotation=90)
# ax.set_yticklabels([''] + output_words)
#
# # Show label at every tick
# ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
# ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
show_attention()<|fim▁end|> | |
<|file_name|>views.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
def log_location(request):
"""
:params
:lat - latitude
:lon - longitude
:user_agent - useful for IOT applications that needs to log the client
that send the location
"""
if request.method == 'GET':
user_agent = request.GET.get('user_agent','test')
try:
lat = request.GET['lat']
lon = request.GET['lon']
GeoLocation.objects.create(user_agent=user_agent,lat=lat,lon=lon)
except:
return HttpResponse(0, status=500)
return HttpResponse(1, status=200)<|fim▁end|> | from django.shortcuts import render
from .models import GeoLocation
from django.http import HttpResponse |
<|file_name|>http.py<|end_file_name|><|fim▁begin|>import logging
import ssl
from typing import List # pylint: disable=unused-import
import aiohttp
import certifi
import trio_asyncio
from aiohttp.http_exceptions import HttpProcessingError
from .base import BufferedFree, Limit, Sink, Source
logger = logging.getLogger(__name__)
class AiohttpClientSessionMixin:
def init_client(self, client, headers={}):<|fim▁hole|> if client:
self.client_owned, self.client = False, client
else:
self.client_owned, self.client = True, aiohttp.ClientSession(
connector=conn,
headers=headers,
skip_auto_headers=["Content-Type", "User-Agent"],
)
async def close_client(self):
if self.client_owned and not self.client.closed:
await self.client.close()
DEFAULT_CHUNK_SIZE = 1024 * 10 * 16
class URLReader(Source, AiohttpClientSessionMixin):
def __init__(self, url, client=None):
super(URLReader, self).__init__()
self.url = url
self.response = None
self.init_client(client)
@trio_asyncio.aio_as_trio
async def read(self, count=-1):
if self._eof:
return b""
if self.response is None:
self.response = await self.client.get(self.url)
self.response.raise_for_status()
if count == -1:
count = DEFAULT_CHUNK_SIZE
buf = await self.response.content.read(count)
if len(buf) == 0:
await self._close()
return buf
async def _close(self):
self._eof = True
if not self.response is None:
await self.response.release()
self.response = None
await self.close_client()
@trio_asyncio.aio_as_trio
async def close(self):
await self._close()
class URLWriter(Sink, AiohttpClientSessionMixin):
def __init__(self, url, size=None, client=None):
super(URLWriter, self).__init__()
self.url = url
self._done = False
self.response = None
self.bytes_written = 0
self.size = size
self.etag = None
self.init_client(client)
@trio_asyncio.aio_as_trio
async def read(self, count=-1):
if self._done:
return b""
if self.response is None:
@trio_asyncio.trio_as_aio
async def read_from_input():
assert self.input is not None
return (await self.input.read())
async def feed_http_upload():
while True:
buf = await read_from_input()
if len(buf) == 0:
break
yield buf
self.bytes_written += len(buf)
logger.debug('HTTP PUT %s', self.url)
self.response = await self.client.put(
self.url,
data=feed_http_upload(),
raise_for_status=True,
headers={} if self.size is None else {"Content-Length": str(self.size)},
)
content = await self.response.read()
await self.response.release()
if not self.response.status in (200, 201, 202):
raise HttpProcessingError(
code=self.response.status,
message=self.response.reason,
headers=self.response.headers,
)
self._done = True
if "ETAG" in self.response.headers:
self.etag = self.response.headers["ETAG"][1:-1]
return content
@trio_asyncio.aio_as_trio
async def close(self):
self._done = True
if not self.response is None:
await self.response.release()
self.response = None
await self.close_client()
class ChunkedURLWriter(Sink, AiohttpClientSessionMixin):
"""
The ChunkedURLWriter will instantiate an URLWriter for each URL given to
it.
"""
def __init__(self, urls, chunksize, total_size=None, client=None):
super(ChunkedURLWriter, self).__init__()
self._urls = urls
self._chunksize = chunksize
self._url_idx = 0
self.init_client(client)
self.bytes_written = 0
self.total_size = total_size
self.etags = [] # type: List[str]
def add_input(self, input):
self.input = input >> BufferedFree()
async def read(self, count=-1):
assert self.input is not None
if self._url_idx >= len(self._urls):
return b""
url = self._urls[self._url_idx]
logger.debug("Uploading to: %s (max. %d bytes)", url, self._chunksize)
size = (
None
if self.total_size is None
else min(self.total_size - self.bytes_written, self._chunksize)
)
writer = (
self.input
>> Limit(self._chunksize)
>> URLWriter(url, size=size, client=self.client)
)
result = await writer.readall()
self.etags.append(writer.etag)
self.bytes_written += writer.bytes_written
self._url_idx += 1
return result or b"<empty response>"
@trio_asyncio.aio_as_trio
async def close(self):
await self.close_client()<|fim▁end|> | ssl_context = ssl.create_default_context(cafile=certifi.where())
conn = aiohttp.TCPConnector(ssl=ssl_context) |
<|file_name|>a_star_test.go<|end_file_name|><|fim▁begin|>// Copyright ©2014 The gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package path_test
import (
"math"
"reflect"
"testing"
"github.com/gonum/graph"
"github.com/gonum/graph/concrete"
"github.com/gonum/graph/path"
"github.com/gonum/graph/path/internal"
"github.com/gonum/graph/topo"
)
var aStarTests = []struct {
name string
g graph.Graph
s, t int
heuristic path.Heuristic
wantPath []int
}{
{
name: "simple path",
g: func() graph.Graph {
return internal.NewGridFrom(
"*..*",
"**.*",
"**.*",
"**.*",
)
}(),
s: 1, t: 14,
wantPath: []int{1, 2, 6, 10, 14},
},
{
name: "small open graph",
g: internal.NewGrid(3, 3, true),
s: 0, t: 8,
},
{
name: "large open graph",
g: internal.NewGrid(1000, 1000, true),
s: 0, t: 999*1000 + 999,
},
{
name: "no path",
g: func() graph.Graph {
tg := internal.NewGrid(5, 5, true)
// Create a complete "wall" across the middle row.
tg.Set(2, 0, false)
tg.Set(2, 1, false)
tg.Set(2, 2, false)
tg.Set(2, 3, false)
tg.Set(2, 4, false)
return tg
}(),
s: 2, t: 22,
},
{
name: "partially obstructed",
g: func() graph.Graph {
tg := internal.NewGrid(10, 10, true)
// Create a partial "wall" accross the middle
// row with a gap at the left-hand end.
tg.Set(4, 1, false)
tg.Set(4, 2, false)
tg.Set(4, 3, false)
tg.Set(4, 4, false)
tg.Set(4, 5, false)
tg.Set(4, 6, false)
tg.Set(4, 7, false)
tg.Set(4, 8, false)
tg.Set(4, 9, false)
return tg
}(),
s: 5, t: 9*10 + 9,
},
{
name: "partially obstructed with heuristic",
g: func() graph.Graph {
tg := internal.NewGrid(10, 10, true)
// Create a partial "wall" accross the middle
// row with a gap at the left-hand end.
tg.Set(4, 1, false)
tg.Set(4, 2, false)
tg.Set(4, 3, false)
tg.Set(4, 4, false)
tg.Set(4, 5, false)
tg.Set(4, 6, false)
tg.Set(4, 7, false)
tg.Set(4, 8, false)
tg.Set(4, 9, false)
<|fim▁hole|> // Manhattan Heuristic
heuristic: func(u, v graph.Node) float64 {
uid := u.ID()
cu := (uid % 10)
ru := (uid - cu) / 10
vid := v.ID()
cv := (vid % 10)
rv := (vid - cv) / 10
return math.Abs(float64(ru-rv)) + math.Abs(float64(cu-cv))
},
},
}
func TestAStar(t *testing.T) {
for _, test := range aStarTests {
pt, _ := path.AStar(concrete.Node(test.s), concrete.Node(test.t), test.g, test.heuristic)
p, cost := pt.To(concrete.Node(test.t))
if !topo.IsPathIn(test.g, p) {
t.Error("got path that is not path in input graph for %q", test.name)
}
bfp, ok := path.BellmanFordFrom(concrete.Node(test.s), test.g)
if !ok {
t.Fatalf("unexpected negative cycle in %q", test.name)
}
if want := bfp.WeightTo(concrete.Node(test.t)); cost != want {
t.Errorf("unexpected cost for %q: got:%v want:%v", test.name, cost, want)
}
var got = make([]int, 0, len(p))
for _, n := range p {
got = append(got, n.ID())
}
if test.wantPath != nil && !reflect.DeepEqual(got, test.wantPath) {
t.Errorf("unexpected result for %q:\ngot: %v\nwant:%v", test.name, got, test.wantPath)
}
}
}
func TestExhaustiveAStar(t *testing.T) {
g := concrete.NewGraph()
nodes := []locatedNode{
{id: 1, x: 0, y: 6},
{id: 2, x: 1, y: 0},
{id: 3, x: 8, y: 7},
{id: 4, x: 16, y: 0},
{id: 5, x: 17, y: 6},
{id: 6, x: 9, y: 8},
}
for _, n := range nodes {
g.AddNode(n)
}
edges := []weightedEdge{
{from: g.Node(1), to: g.Node(2), cost: 7},
{from: g.Node(1), to: g.Node(3), cost: 9},
{from: g.Node(1), to: g.Node(6), cost: 14},
{from: g.Node(2), to: g.Node(3), cost: 10},
{from: g.Node(2), to: g.Node(4), cost: 15},
{from: g.Node(3), to: g.Node(4), cost: 11},
{from: g.Node(3), to: g.Node(6), cost: 2},
{from: g.Node(4), to: g.Node(5), cost: 7},
{from: g.Node(5), to: g.Node(6), cost: 9},
}
for _, e := range edges {
g.SetEdge(e, e.cost)
}
heuristic := func(u, v graph.Node) float64 {
lu := u.(locatedNode)
lv := v.(locatedNode)
return math.Hypot(lu.x-lv.x, lu.y-lv.y)
}
if ok, edge, goal := isMonotonic(g, heuristic); !ok {
t.Fatalf("non-monotonic heuristic at edge:%v for goal:%v", edge, goal)
}
ps := path.DijkstraAllPaths(g)
for _, start := range g.Nodes() {
for _, goal := range g.Nodes() {
pt, _ := path.AStar(start, goal, g, heuristic)
gotPath, gotWeight := pt.To(goal)
wantPath, wantWeight, _ := ps.Between(start, goal)
if gotWeight != wantWeight {
t.Errorf("unexpected path weight from %v to %v result: got:%s want:%s",
start, goal, gotWeight, wantWeight)
}
if !reflect.DeepEqual(gotPath, wantPath) {
t.Errorf("unexpected path from %v to %v result:\ngot: %v\nwant:%v",
start, goal, gotPath, wantPath)
}
}
}
}
type locatedNode struct {
id int
x, y float64
}
func (n locatedNode) ID() int { return n.id }
type weightedEdge struct {
from, to graph.Node
cost float64
}
func (e weightedEdge) From() graph.Node { return e.from }
func (e weightedEdge) To() graph.Node { return e.to }
type costEdgeListGraph interface {
graph.Weighter
path.EdgeListerGraph
}
func isMonotonic(g costEdgeListGraph, h path.Heuristic) (ok bool, at graph.Edge, goal graph.Node) {
for _, goal := range g.Nodes() {
for _, edge := range g.Edges() {
from := edge.From()
to := edge.To()
if h(from, goal) > g.Weight(edge)+h(to, goal) {
return false, edge, goal
}
}
}
return true, nil, nil
}
func TestAStarNullHeuristic(t *testing.T) {
for _, test := range shortestPathTests {
g := test.g()
for _, e := range test.edges {
g.SetEdge(e, e.Cost)
}
var (
pt path.Shortest
panicked bool
)
func() {
defer func() {
panicked = recover() != nil
}()
pt, _ = path.AStar(test.query.From(), test.query.To(), g.(graph.Graph), nil)
}()
if panicked || test.negative {
if !test.negative {
t.Errorf("%q: unexpected panic", test.name)
}
if !panicked {
t.Errorf("%q: expected panic for negative edge weight", test.name)
}
continue
}
if pt.From().ID() != test.query.From().ID() {
t.Fatalf("%q: unexpected from node ID: got:%d want:%d", pt.From().ID(), test.query.From().ID())
}
p, weight := pt.To(test.query.To())
if weight != test.weight {
t.Errorf("%q: unexpected weight from Between: got:%f want:%f",
test.name, weight, test.weight)
}
if weight := pt.WeightTo(test.query.To()); weight != test.weight {
t.Errorf("%q: unexpected weight from Weight: got:%f want:%f",
test.name, weight, test.weight)
}
var got []int
for _, n := range p {
got = append(got, n.ID())
}
ok := len(got) == 0 && len(test.want) == 0
for _, sp := range test.want {
if reflect.DeepEqual(got, sp) {
ok = true
break
}
}
if !ok {
t.Errorf("%q: unexpected shortest path:\ngot: %v\nwant from:%v",
test.name, p, test.want)
}
np, weight := pt.To(test.none.To())
if pt.From().ID() == test.none.From().ID() && (np != nil || !math.IsInf(weight, 1)) {
t.Errorf("%q: unexpected path:\ngot: path=%v weight=%f\nwant:path=<nil> weight=+Inf",
test.name, np, weight)
}
}
}<|fim▁end|> | return tg
}(),
s: 5, t: 9*10 + 9, |
<|file_name|>support.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Frontend for running support related commands on hyperic installations.
#
# Opens as an interactive shell if no parameters are passed, otherwise runs
# the command and parameters that have been passed on the command line.
#
# Enter help to see available commands.
#
#
# Adding a new command is done by just adding a do_<command>(self,line) method to the
# SupportCmd class. The following will apply:
# * line will contain a string with all the command parameters.
# * The method's documentation becomes the command's help (Python documents methods by
# using a """text....""" comment as the first line *inside* the method).
# * If you want a more complicated help (e.g. dynamic), then just implement a help_<command>(self,line)
# method which prints the help text.
#
# No need to reinvent the wheel - Use run_with_jython and run_java_jar functions in order to perform
# the things that you need.
#
# Global variables can be accessed using gd.getGlobalVariable.
import cmd
import subprocess
import os,sys
import global_data as gd
import hq_utils
from architectures import *
def run_with_jython(script_name,arg_str):
run_jython = gd.getGlobalData(gd.RUN_JYTHON)
s = "%s %s %s" % (run_jython,script_name,arg_str)
subprocess.call(s,shell=True)
def run_java_jar(jar_path,arg_str):
java_executable = gd.getGlobalData(gd.JAVA_EXECUTABLE)
s = "%s -jar %s %s" % (java_executable,jar_path,arg_str)
subprocess.call(s,shell=True)
class SupportCmd(cmd.Cmd):
DELEGATED_HELP_COMMANDS = ['package']
def __init__(self,base_folder):
self.base_folder = base_folder
cmd.Cmd.__init__(self)
def get_cmd_script_name(self,cmd_name):
return os.path.join(self.base_folder,'support-%s.py' % cmd_name)
def do_package(self,line):
run_with_jython(self.get_cmd_script_name('package'),line)
# Override the help command in order to delegate the help printing to the script
# If it is the one implementing the command
def do_help(self,arg):
if arg in SupportCmd.DELEGATED_HELP_COMMANDS:
run_with_jython(self.get_cmd_script_name(arg),'--help')
else:
cmd.Cmd.do_help(self,arg)
def do_get_variables(self,line):
"""get_variables - Retrieves the values of all support-related variables"""
for k in sorted(gd.globalData.keys()):
print "%s: %s" % (k,gd.globalData[k])
def do_sigar(self,line):
"""Run a sigar command. See help for details"""
if line.strip() != '':
sigar_jar = gd.getGlobalData(gd.SIGAR_JAR)
if not os.path.isfile(sigar_jar):
if gd.getGlobalData(gd.INSTALLATION_TYPE) == hq_utils.DEV:
print "Could not find sigar JAR file - Please build the project before using this command"
else:
print "Could not find sigar JAR file in. Expected location is %s" % sigar_jar
return
run_java_jar(sigar_jar,line)
else:
print "sigar command parameters are missing. Use 'sigar help' for details"
def help_sigar(self):
print "Run a sigar command\n"
sigar_jar = gd.getGlobalData(gd.SIGAR_JAR)
run_java_jar(sigar_jar,'help')
def do_EOF(self,line):
return True
def do_quit(self,line):
"""quit - Quit the frontend"""
return True
def postloop(self):
print
def emptyline(self):
pass
detectArchitecture()
# Find the path of the script
scripts_path = os.path.split(os.path.abspath(sys.argv[0]))[0]
# NOTE: Assumption is that script is in $HQ_HOME/support/scripts/
hq_utils.detectHQInformation(os.path.abspath(os.path.join(scripts_path,'..','..')))
s = SupportCmd(scripts_path)
s.prompt = "hq>"
try:
if len(sys.argv) > 1:
s.onecmd(" ".join(sys.argv[1:]))
else:
print "Hyperic HQ Support Frontend. Enter help to see available commands."
print "HQ installation type is %s" % gd.getGlobalData(gd.INSTALLATION_TYPE)<|fim▁hole|> print "JRE folder %s" % os.path.abspath(gd.getGlobalData(gd.JRE))
print "Jython JAR location: %s" % os.path.abspath(gd.getGlobalData(gd.JYTHON_JAR_LOCATION))
s.cmdloop()
except KeyboardInterrupt,e:
pass<|fim▁end|> | |
<|file_name|>test_stock_change_qty_reason.py<|end_file_name|><|fim▁begin|># pylint: disable=import-error,protected-access,too-few-public-methods
# Copyright 2016-2017 ACSONE SA/NV (<http://acsone.eu>)
# Copyright 2019 ForgeFlow S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.tests.common import SavepointCase
class TestStockQuantityChangeReason(SavepointCase):
@classmethod
def setUpClass(cls):
super(TestStockQuantityChangeReason, cls).setUpClass()
# MODELS
cls.stock_move = cls.env["stock.move"]
cls.product_product_model = cls.env["product.product"]
cls.product_category_model = cls.env["product.category"]
cls.wizard_model = cls.env["stock.change.product.qty"]
cls.preset_reason_id = cls.env["stock.inventory.line.reason"]
cls.stock_location = cls.env.ref("stock.stock_location_stock")
# INSTANCES
cls.category = cls.product_category_model.create({"name": "Physical (test)"})
def _create_product(self, name):
return self.product_product_model.create(
{"name": name, "categ_id": self.category.id, "type": "product"}
)
def _product_change_qty(self, product, new_qty):
values = {
"product_tmpl_id": product.product_tmpl_id.id,
"product_id": product.id,
"new_quantity": new_qty,
}
wizard = self.wizard_model.create(values)
wizard.change_product_qty()
def _create_reason(self, name, description=None):
return self.preset_reason_id.create({"name": name, "description": description})
def test_inventory_adjustment_onchange_reason_preset_reason(self):
"""Check that adding a reason or a preset reason explode to lines"""
product2 = self._create_product("product_product_2")
self._product_change_qty(product2, 50)
inventory = self.env["stock.inventory"].create(
{
"name": "remove product2",
"product_ids": [(4, product2.id)],
"location_ids": [(4, self.stock_location.id)],<|fim▁hole|> )
inventory.preset_reason_id = self._create_reason("Test 1", "Description Test 1")
inventory.action_start()
self.assertEqual(len(inventory.line_ids), 1)
inventory.reason = "Reason 2"
inventory.onchange_reason()
self.assertEqual(inventory.line_ids.reason, inventory.reason)
inventory.preset_reason_id = self._create_reason("Test 2", "Description Test 2")
inventory.onchange_preset_reason()
self.assertEqual(
inventory.line_ids.preset_reason_id, inventory.preset_reason_id
)
inventory.line_ids[0].write({"product_qty": 10})
inventory.action_validate()
move = self.stock_move.search(
[("product_id", "=", product2.id), ("preset_reason_id", "!=", False)]
)
self.assertEqual(len(move), 1)
self.assertEqual(move.origin, inventory.preset_reason_id.name)
self.assertEqual(move.preset_reason_id, inventory.preset_reason_id)<|fim▁end|> | } |
<|file_name|>unicastsockopt_test.go<|end_file_name|><|fim▁begin|>// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ipv4_test
import (
"net"
"runtime"
"testing"
"github.com/djbarber/ipfs-hack/Godeps/_workspace/src/golang.org/x/net/internal/iana"
"github.com/djbarber/ipfs-hack/Godeps/_workspace/src/golang.org/x/net/ipv4"
"golang.org/x/net/internal/nettest"
)
func TestConnUnicastSocketOptions(t *testing.T) {
switch runtime.GOOS {
case "nacl", "plan9", "solaris":
t.Skipf("not supported on %s", runtime.GOOS)
}
ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback)
if ifi == nil {
t.Skipf("not available on %s", runtime.GOOS)
}
ln, err := net.Listen("tcp4", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer ln.Close()
done := make(chan bool)
go acceptor(t, ln, done)
c, err := net.Dial("tcp4", ln.Addr().String())
if err != nil {
t.Fatal(err)
}
defer c.Close()
testUnicastSocketOptions(t, ipv4.NewConn(c))
<-done
}
var packetConnUnicastSocketOptionTests = []struct {
net, proto, addr string
}{
{"udp4", "", "127.0.0.1:0"},
{"ip4", ":icmp", "127.0.0.1"},
}
func TestPacketConnUnicastSocketOptions(t *testing.T) {
switch runtime.GOOS {
case "nacl", "plan9", "solaris":
t.Skipf("not supported on %s", runtime.GOOS)
}
ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback)
if ifi == nil {
t.Skipf("not available on %s", runtime.GOOS)
}
m, ok := nettest.SupportsRawIPSocket()
for _, tt := range packetConnUnicastSocketOptionTests {
if tt.net == "ip4" && !ok {
t.Log(m)
continue
}
c, err := net.ListenPacket(tt.net+tt.proto, tt.addr)
if err != nil {
t.Fatal(err)
}
defer c.Close()
testUnicastSocketOptions(t, ipv4.NewPacketConn(c))
}
}
func TestRawConnUnicastSocketOptions(t *testing.T) {
switch runtime.GOOS {
case "nacl", "plan9", "solaris":
t.Skipf("not supported on %s", runtime.GOOS)
}
if m, ok := nettest.SupportsRawIPSocket(); !ok {
t.Skip(m)
}
ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback)
if ifi == nil {
t.Skipf("not available on %s", runtime.GOOS)
}
c, err := net.ListenPacket("ip4:icmp", "127.0.0.1")
if err != nil {
t.Fatal(err)
}
defer c.Close()
r, err := ipv4.NewRawConn(c)
if err != nil {
t.Fatal(err)
}
testUnicastSocketOptions(t, r)<|fim▁hole|>
type testIPv4UnicastConn interface {
TOS() (int, error)
SetTOS(int) error
TTL() (int, error)
SetTTL(int) error
}
func testUnicastSocketOptions(t *testing.T, c testIPv4UnicastConn) {
tos := iana.DiffServCS0 | iana.NotECNTransport
switch runtime.GOOS {
case "windows":
// IP_TOS option is supported on Windows 8 and beyond.
t.Skipf("not supported on %s", runtime.GOOS)
}
if err := c.SetTOS(tos); err != nil {
t.Fatal(err)
}
if v, err := c.TOS(); err != nil {
t.Fatal(err)
} else if v != tos {
t.Fatalf("got %v; want %v", v, tos)
}
const ttl = 255
if err := c.SetTTL(ttl); err != nil {
t.Fatal(err)
}
if v, err := c.TTL(); err != nil {
t.Fatal(err)
} else if v != ttl {
t.Fatalf("got %v; want %v", v, ttl)
}
}<|fim▁end|> | } |
<|file_name|>tests.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from django.contrib.contenttypes.generic import (
generic_inlineformset_factory, GenericTabularInline)
from django.forms.models import ModelForm
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import six
# local test models
from .admin import MediaInline, MediaPermanentInline
from .models import (Episode, EpisodeExtra, EpisodeMaxNum, Media,
EpisodePermanent, Category)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class GenericAdminViewTest(TestCase):
urls = "regressiontests.generic_inline_admin.urls"
fixtures = ['users.xml']
def setUp(self):
# set TEMPLATE_DEBUG to True to ensure {% include %} will raise
# exceptions since that is how inlines are rendered and #9498 will
# bubble up if it is an issue.
self.original_template_debug = settings.TEMPLATE_DEBUG
settings.TEMPLATE_DEBUG = True
self.client.login(username='super', password='secret')
# Can't load content via a fixture (since the GenericForeignKey
# relies on content type IDs, which will vary depending on what
# other tests have been run), thus we do it here.
e = Episode.objects.create(name='This Week in Django')
self.episode_pk = e.pk
m = Media(content_object=e, url='http://example.com/podcast.mp3')
m.save()
self.mp3_media_pk = m.pk
m = Media(content_object=e, url='http://example.com/logo.png')
m.save()
self.png_media_pk = m.pk
def tearDown(self):
self.client.logout()
settings.TEMPLATE_DEBUG = self.original_template_debug
def testBasicAddGet(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/add/')
self.assertEqual(response.status_code, 200)
def testBasicEditGet(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/%d/' % self.episode_pk)
self.assertEqual(response.status_code, 200)
def testBasicAddPost(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
}
response = self.client.post('/generic_inline_admin/admin/generic_inline_admin/episode/add/', post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def testBasicEditPost(self):
"""
A smoke test to ensure POST on edit_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "3",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "2",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-0-id": "%d" % self.mp3_media_pk,
"generic_inline_admin-media-content_type-object_id-0-url": "http://example.com/podcast.mp3",
"generic_inline_admin-media-content_type-object_id-1-id": "%d" % self.png_media_pk,
"generic_inline_admin-media-content_type-object_id-1-url": "http://example.com/logo.png",
"generic_inline_admin-media-content_type-object_id-2-id": "",
"generic_inline_admin-media-content_type-object_id-2-url": "",
}
url = '/generic_inline_admin/admin/generic_inline_admin/episode/%d/' % self.episode_pk
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def testGenericInlineFormset(self):
EpisodeMediaFormSet = generic_inlineformset_factory(Media, can_delete=False, exclude=['description', 'keywords'], extra=3)
e = Episode.objects.get(name='This Week in Django')
# Works with no queryset
formset = EpisodeMediaFormSet(instance=e)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="text" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="text" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="text" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# A queryset can be used to alter display ordering
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.order_by('url'))
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="text" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="text" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="text" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# Works with a queryset that omits items
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.filter(url__endswith=".png"))
self.assertEqual(len(formset.forms), 4)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="text" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="text" name="generic_inline_admin-media-content_type-object_id-1-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>')
def testGenericInlineFormsetFactory(self):
# Regression test for #10522.
inline_formset = generic_inlineformset_factory(Media,
exclude=('url',))
# Regression test for #12340.
e = Episode.objects.get(name='This Week in Django')
formset = inline_formset(instance=e)
self.assertTrue(formset.get_queryset().ordered)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class GenericInlineAdminParametersTest(TestCase):
urls = "regressiontests.generic_inline_admin.urls"
fixtures = ['users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def _create_object(self, model):
"""
Create a model with an attached Media object via GFK. We can't
load content via a fixture (since the GenericForeignKey relies on
content type IDs, which will vary depending on what other tests
have been run), thus we do it here.
"""
e = model.objects.create(name='This Week in Django')
Media.objects.create(content_object=e, url='http://example.com/podcast.mp3')
return e
def testNoParam(self):
"""
With one initial form, extra (default) at 3, there should be 4 forms.
"""
e = self._create_object(Episode)
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/%s/' % e.pk)
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 4)
self.assertEqual(formset.initial_form_count(), 1)
def testExtraParam(self):
"""
With extra=0, there should be one form.
"""
e = self._create_object(EpisodeExtra)
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episodeextra/%s/' % e.pk)
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 1)
self.assertEqual(formset.initial_form_count(), 1)
def testMaxNumParam(self):
"""
With extra=5 and max_num=2, there should be only 2 forms.
"""
e = self._create_object(EpisodeMaxNum)
inline_form_data = '<input type="hidden" name="generic_inline_admin-media-content_type-object_id-TOTAL_FORMS" value="2" id="id_generic_inline_admin-media-content_type-object_id-TOTAL_FORMS" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-INITIAL_FORMS" value="1" id="id_generic_inline_admin-media-content_type-object_id-INITIAL_FORMS" />'
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episodemaxnum/%s/' % e.pk)
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 2)
self.assertEqual(formset.initial_form_count(), 1)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class GenericInlineAdminWithUniqueTogetherTest(TestCase):
urls = "regressiontests.generic_inline_admin.urls"
fixtures = ['users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def testAdd(self):
category_id = Category.objects.create(name='male').pk
post_data = {
"name": "John Doe",
# inline data
"generic_inline_admin-phonenumber-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-phonenumber-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-0-id": "",
"generic_inline_admin-phonenumber-content_type-object_id-0-phone_number": "555-555-5555",
"generic_inline_admin-phonenumber-content_type-object_id-0-category": "%s" % category_id,
}
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/contact/add/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/generic_inline_admin/admin/generic_inline_admin/contact/add/', post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
class NoInlineDeletionTest(TestCase):
urls = "regressiontests.generic_inline_admin.urls"
def test_no_deletion(self):
fake_site = object()
inline = MediaPermanentInline(EpisodePermanent, fake_site)
fake_request = object()
formset = inline.get_formset(fake_request)
self.assertFalse(formset.can_delete)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
class GenericInlineModelAdminTest(TestCase):
urls = "regressiontests.generic_inline_admin.urls"
def setUp(self):
self.site = AdminSite()
def test_get_formset_kwargs(self):
media_inline = MediaInline(Media, AdminSite())
# Create a formset with default arguments
formset = media_inline.get_formset(request)
self.assertEqual(formset.max_num, None)
self.assertEqual(formset.can_order, False)
# Create a formset with custom keyword arguments
formset = media_inline.get_formset(request, max_num=100, can_order=True)
self.assertEqual(formset.max_num, 100)
self.assertEqual(formset.can_order, True)
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `GenericInlineModelAdmin.readonly_fields`
and when no `ModelAdmin.exclude` is defined.<|fim▁hole|> model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
readonly_fields = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
six.dictkeys(list(ma.get_formsets(request))[0]().forms[0].fields),
['keywords', 'id', 'DELETE'])
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected by
`GenericInlineModelAdmin.get_formset`, and overridden if
`ModelAdmin.exclude` or `GenericInlineModelAdmin.exclude` are defined.
Refs #15907.
"""
# First with `GenericInlineModelAdmin` -----------------
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
exclude = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
six.dictkeys(list(ma.get_formsets(request))[0]().forms[0].fields),
['url', 'keywords', 'id', 'DELETE'])
# Then, only with `ModelForm` -----------------
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
six.dictkeys(list(ma.get_formsets(request))[0]().forms[0].fields),
['description', 'keywords', 'id', 'DELETE'])<|fim▁end|> | """
class MediaForm(ModelForm):
class Meta: |
<|file_name|>debug.py<|end_file_name|><|fim▁begin|>import sys
from pyasn1.compat.octets import octs2ints
from pyasn1 import error
from pyasn1 import __version__
flagNone = 0x0000
flagEncoder = 0x0001
flagDecoder = 0x0002
flagAll = 0xffff
flagMap = {
'encoder': flagEncoder,
'decoder': flagDecoder,
'all': flagAll
}
class Debug:
defaultPrinter = sys.stderr and sys.stderr.write or None
def __init__(self, *flags):
self._flags = flagNone
if not self.defaultPrinter:
raise error.PyAsn1Error('Null debug writer specified')
self._printer = self.defaultPrinter
self('running pyasn1 version %s' % __version__)
for f in flags:
if f not in flagMap:
raise error.PyAsn1Error('bad debug flag %s' % (f,))
self._flags = self._flags | flagMap[f]
self('debug category \'%s\' enabled' % f)
def __str__(self):
return 'logger %s, flags %x' % (self._printer, self._flags)
def __call__(self, msg):
self._printer('DBG: %s\n' % msg)
def __and__(self, flag):
return self._flags & flag
def __rand__(self, flag):
return flag & self._flags
logger = 0
def setLogger(l):
global logger
logger = l
def hexdump(octets):
return ' '.join(
[ '%s%.2X' % (n%16 == 0 and ('\n%.5d: ' % n) or '', x)
for n,x in zip(range(len(octets)), octs2ints(octets)) ]
)
class Scope:
def __init__(self):
self._list = []
def __str__(self): return '.'.join(self._list)<|fim▁hole|> def pop(self):
return self._list.pop()
scope = Scope()<|fim▁end|> |
def push(self, token):
self._list.append(token)
|
<|file_name|>index.ts<|end_file_name|><|fim▁begin|>import { login } from './authenticate';
import { getUserName } from './model/User';
const emailInput = document.getElementById('email');
const passwordInput = document.getElementById('password');<|fim▁hole|>
if (!emailInput) {
throw new Error('Cannot find #email input.');
}
if (!passwordInput) {
throw new Error('Cannot find #password input.');
}
if (!loginForm) {
throw new Error('Cannot find #login form.');
}
let email = '';
let password = '';
emailInput.addEventListener('change', (event) => {
if (event.target instanceof HTMLInputElement) {
email = event.target.value;
}
});
passwordInput.addEventListener('change', (event) => {
if (event.target instanceof HTMLInputElement) {
password = event.target.value;
}
});
loginForm.addEventListener('submit', async (event) => {
const user = await login(email, password);
if (user.role === 'admin') {
console.log(`Logged in as ${getUserName(user)} [admin].`);
} else {
console.log(`Logged in as ${getUserName(user)}`);
}
});<|fim▁end|> | const loginForm = document.getElementById('login'); |
<|file_name|>dummy.py<|end_file_name|><|fim▁begin|># -*- coding:utf-8 -*-
# Made by Kei Choi([email protected])
import os # ÆÄÀÏ »èÁ¦¸¦ À§ÇØ import
import kernel
#---------------------------------------------------------------------
# KavMain Ŭ·¡½º
# ŰÄÞ¹é½Å ¿£Áø ¸ðµâÀÓÀ» ³ªÅ¸³»´Â Ŭ·¡½ºÀÌ´Ù.
# ÀÌ Å¬·¡½º°¡ ¾øÀ¸¸é ¹é½Å ¿£Áø Ä¿³Î ¸ðµâ¿¡¼ ·ÎµùÇÏÁö ¾Ê´Â´Ù.
#---------------------------------------------------------------------
class KavMain :<|fim▁hole|> # init(self, plugins)
# ¹é½Å ¿£Áø ¸ðµâÀÇ ÃʱâÈ ÀÛ¾÷À» ¼öÇàÇÑ´Ù.
#-----------------------------------------------------------------
def init(self, plugins) : # ¹é½Å ¸ðµâ ÃʱâÈ
self.virus_name = 'Dummy-Test-File (not a virus)' # Áø´ÜÇÏ´Â ¾Ç¼ºÄÚµå À̸§
# ¾Ç¼ºÄÚµå ÆÐÅÏ µî·Ï
self.dummy_pattern = 'Dummy Engine test file - KICOM Anti-Virus Project, 2012, Kei Choi'
return 0
#-----------------------------------------------------------------
# uninit(self)
# ¹é½Å ¿£Áø ¸ðµâÀÇ Á¾·áÈ ÀÛ¾÷À» ¼öÇàÇÑ´Ù.
#-----------------------------------------------------------------
def uninit(self) : # ¹é½Å ¸ðµâ Á¾·áÈ
try :
del self.virus_name
del self.dummy_pattern
except :
pass
return 0
#-----------------------------------------------------------------
# scan(self, filehandle, filename)
# ¾Ç¼ºÄڵ带 °Ë»çÇÑ´Ù.
# ÀÎÀÚ°ª : mmhandle - ÆÄÀÏ mmap ÇÚµé
# : scan_file_struct - ÆÄÀÏ ±¸Á¶Ã¼
# : format - ¹Ì¸® ºÐ¼®µÈ ÆÄÀÏ Æ÷¸Ë
# ¸®Åϰª : (¾Ç¼ºÄÚµå ¹ß°ß ¿©ºÎ, ¾Ç¼ºÄÚµå À̸§, ¾Ç¼ºÄÚµå ID) µîµî
#-----------------------------------------------------------------
def scan(self, mmhandle, scan_file_struct, format) :
ret_value = {}
ret_value['result'] = False # ¹ÙÀÌ·¯½º ¹ß°ß ¿©ºÎ
ret_value['virus_name'] = '' # ¹ÙÀÌ·¯½º À̸§
ret_value['scan_state'] = kernel.NOT_FOUND # 0:¾øÀ½, 1:°¨¿°, 2:ÀǽÉ, 3:°æ°í
ret_value['virus_id'] = -1 # ¹ÙÀÌ·¯½º ID
try :
# ¹Ì¸® ºÐ¼®µÈ ÆÄÀÏ Æ÷¸ËÁß¿¡ Dummy Æ÷¸ËÀÌ Àִ°¡?
fformat = format['ff_dummy']
# ¹Ì¸® ºÐ¼®µÈ ÆÄÀÏ Æ÷¸Ë¿¡ Å©±â°¡ 65Byte?
if fformat['size'] != len(self.dummy_pattern) :
raise SystemError
# ÆÄÀÏÀ» ¿¾î ¾Ç¼ºÄÚµå ÆÐÅϸ¸Å ÆÄÀÏ¿¡¼ Àд´Ù.
filename = scan_file_struct['real_filename']
fp = open(filename)
buf = fp.read(len(self.dummy_pattern)) # ÆÐÅÏÀº 65 Byte Å©±â
fp.close()
# ¾Ç¼ºÄÚµå ÆÐÅÏÀ» ºñ±³ÇÑ´Ù.
if buf == self.dummy_pattern :
# ¾Ç¼ºÄÚµå ÆÐÅÏÀÌ °®´Ù¸é °á°ú °ªÀ» ¸®ÅÏÇÑ´Ù.
ret_value['result'] = True # ¹ÙÀÌ·¯½º ¹ß°ß ¿©ºÎ
ret_value['virus_name'] = self.virus_name # ¹ÙÀÌ·¯½º À̸§
ret_value['scan_state'] = kernel.INFECTED# 0:¾øÀ½, 1:°¨¿°, 2:ÀǽÉ, 3:°æ°í
ret_value['virus_id'] = 0 # ¹ÙÀÌ·¯½º ID
return ret_value
except :
pass
# ¾Ç¼ºÄڵ带 ¹ß°ßÇÏÁö ¸øÇßÀ½À» ¸®ÅÏÇÑ´Ù.
return ret_value
#-----------------------------------------------------------------
# disinfect(self, filename, malwareID)
# ¾Ç¼ºÄڵ带 Ä¡·áÇÑ´Ù.
# ÀÎÀÚ°ª : filename - ÆÄÀÏ À̸§
# : malwareID - Ä¡·áÇÒ ¾Ç¼ºÄÚµå ID
# ¸®Åϰª : ¾Ç¼ºÄÚµå Ä¡·á ¿©ºÎ
#-----------------------------------------------------------------
def disinfect(self, filename, malwareID) : # ¾Ç¼ºÄÚµå Ä¡·á
try :
# ¾Ç¼ºÄÚµå Áø´Ü °á°ú¿¡¼ ¹ÞÀº ID °ªÀÌ 0Àΰ¡?
if malwareID == 0 :
os.remove(filename) # ÆÄÀÏ »èÁ¦
return True # Ä¡·á ¿Ï·á ¸®ÅÏ
except :
pass
return False # Ä¡·á ½ÇÆÐ ¸®ÅÏ
#-----------------------------------------------------------------
# listvirus(self)
# Áø´Ü/Ä¡·á °¡´ÉÇÑ ¾Ç¼ºÄÚµåÀÇ ¸ñ·ÏÀ» ¾Ë·ÁÁØ´Ù.
#-----------------------------------------------------------------
def listvirus(self) : # Áø´Ü °¡´ÉÇÑ ¾Ç¼ºÄÚµå ¸ñ·Ï
vlist = [] # ¸®½ºÆ®Çü º¯¼ö ¼±¾ð
vlist.append(self.virus_name) # Áø´ÜÇÏ´Â ¾Ç¼ºÄÚµå À̸§ µî·Ï
return vlist
#-----------------------------------------------------------------
# getinfo(self)
# ¹é½Å ¿£Áø ¸ðµâÀÇ ÁÖ¿ä Á¤º¸¸¦ ¾Ë·ÁÁØ´Ù. (¹öÀü, Á¦ÀÛÀÚ...)
#-----------------------------------------------------------------
def getinfo(self) :
info = {} # »çÀüÇü º¯¼ö ¼±¾ð
info['author'] = 'Kei Choi' # Á¦ÀÛÀÚ
info['version'] = '1.0' # ¹öÀü
info['title'] = 'Dummy Scan Engine' # ¿£Áø ¼³¸í
info['kmd_name'] = 'dummy' # ¿£Áø ÆÄÀϸí
# ÆÐÅÏ »ý¼º³¯Â¥¿Í ½Ã°£Àº ¾ø´Ù¸é ºôµå ½Ã°£À¸·Î ÀÚµ¿ ¼³Á¤
info['date'] = 0 # ÆÐÅÏ »ý¼º ³¯Â¥
info['time'] = 0 # ÆÐÅÏ »ý¼º ½Ã°£
info['sig_num'] = 1 # ÆÐÅÏ ¼ö
return info
#-----------------------------------------------------------------
# format(self, mmhandle, filename)
# Dummy Àü¿ë Æ÷¸Ë ºÐ¼®±âÀÌ´Ù.
#-----------------------------------------------------------------
def format(self, mmhandle, filename) :
try :
fformat = {} # Æ÷¸Ë Á¤º¸¸¦ ´ãÀ» °ø°£
mm = mmhandle
if mm[0:5] == 'Dummy' : # Çì´õ üũ
fformat['size'] = len(mm) # Æ÷¸Ë ÁÖ¿ä Á¤º¸ ÀúÀå
ret = {}
ret['ff_dummy'] = fformat
return ret
except :
pass
return None<|fim▁end|> | #----------------------------------------------------------------- |
<|file_name|>wsgi.py<|end_file_name|><|fim▁begin|>"""
WSGI config for credentials.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from os.path import abspath, dirname
from sys import path
from django.core.wsgi import get_wsgi_application
SITE_ROOT = dirname(dirname(abspath(__file__)))<|fim▁hole|>os.environ.setdefault("DJANGO_SETTINGS_MODULE", "credentials.settings.local")
application = get_wsgi_application() # pylint: disable=invalid-name<|fim▁end|> | path.append(SITE_ROOT)
|
<|file_name|>from_sparse_tensor_slices_test.py<|end_file_name|><|fim▁begin|># Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.from_sparse_tensor_slices()`."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class FromSparseTensorSlicesTest(test_base.DatasetTestBase,
parameterized.TestCase):
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=1, mode=["graph"]),
combinations.combine(slices=[[
[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []<|fim▁hole|> ], [[1., 2.], [], [1., 2.], [1.], [1., 2.], [], [1., 2.]]])))
def testFromSparseTensorSlices(self, slices):
"""Test a dataset based on slices of a `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
with self.cached_session() as sess:
# Test with sparse tensor in the appropriate order.
# pylint: disable=g-complex-comprehension
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))])
values = np.array([val for s in slices for val in s])
# pylint: enable=g-complex-comprehension
dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1])
sparse_feed = sparse_tensor.SparseTensorValue(indices, values,
dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
for i, s in enumerate(slices):
results = sess.run(get_next)
self.assertAllEqual(s, results.values)
expected_indices = np.array(
[[j] for j in range(len(slices[i]))]).reshape([-1, 1])
self.assertAllEqual(expected_indices, results.indices)
self.assertAllEqual(dense_shape[1:], results.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=1, mode=["graph"]),
combinations.combine(slices=[[
[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []
], [[1., 2.], [], [1., 2.], [1.], [1., 2.], [], [1., 2.]]])))
def testFromSparseTensorSlicesInReverse(self, slices):
"""Test a dataset based on slices of a `tf.sparse.SparseTensor` in reverse order."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
with self.cached_session() as sess:
# pylint: disable=g-complex-comprehension
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))])
values = np.array([val for s in slices for val in s])
# pylint: enable=g-complex-comprehension
dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1])
# Test with sparse tensor in the reverse order, which is not
# currently supported.
reverse_order_indices = indices[::-1, :]
reverse_order_values = values[::-1]
sparse_feed = sparse_tensor.SparseTensorValue(
reverse_order_indices, reverse_order_values, dense_shape)
with self.assertRaises(errors.UnimplementedError):
sess.run(init_op, feed_dict={st: sparse_feed})
@combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"]))
def testEmptySparseTensorSlices(self):
"""Test a dataset based on slices of an empty `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
with self.cached_session() as sess:
# Test with an empty sparse tensor.
empty_indices = np.empty((0, 4), dtype=np.int64)
empty_values = np.empty((0,), dtype=np.float64)
empty_dense_shape = [0, 4, 37, 9]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,
empty_dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"]))
def testEmptySparseTensorSlicesInvalid(self):
"""Test a dataset based on invalid `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
with self.cached_session() as sess:
# Test with an empty sparse tensor but with non empty values.
empty_indices = np.empty((0, 4), dtype=np.int64)
non_empty_values = [1, 2, 3, 4]
empty_dense_shape = [0, 4, 37, 9]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices,
non_empty_values,
empty_dense_shape)
# Here, we expect the test to fail when running the feed.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(init_op, feed_dict={st: sparse_feed})
@combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"]))
def testEmptySparseTensorSlicesInvalid2(self):
"""Test a dataset based on invalid `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
with self.cached_session() as sess:
# Test with an empty sparse tensor but with non empty values.
empty_indices = [[]]
empty_values = []
dense_shape = [1, 1]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,
dense_shape)
# Here, we expect the test to fail when running the feed.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(init_op, feed_dict={st: sparse_feed})
@combinations.generate(combinations.combine(tf_api_version=2, mode=["eager"]))
def testFromSparseTensorSlicesError(self):
with self.assertRaises(AttributeError):
dataset_ops.Dataset.from_sparse_tensor_slices(None)
class FromSparseTensorSlicesCheckpointTest(
checkpoint_test_base.CheckpointTestBase, parameterized.TestCase):
def _build_sparse_tensor_slice_dataset(self, slices):
# pylint: disable=g-complex-comprehension
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))],
dtype=np.int64)
values = np.array([val for s in slices for val in s], dtype=np.float64)
# pylint: enable=g-complex-comprehension
dense_shape = np.array(
[len(slices), max(len(s) for s in slices) + 1], dtype=np.int64)
sparse_components = sparse_tensor.SparseTensor(indices, values, dense_shape)
return dataset_ops.Dataset.from_sparse_tensor_slices(sparse_components)
@combinations.generate(
combinations.times(test_base.v1_only_combinations(),
checkpoint_test_base.default_test_combinations()))
def test(self, verify_fn):
slices = [[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []]
verify_fn(
self,
lambda: self._build_sparse_tensor_slice_dataset(slices),
num_outputs=9,
sparse_tensors=True)
if __name__ == "__main__":
test.main()<|fim▁end|> | |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>/*
* Copyright (c) Meta Platforms, Inc. and affiliates.<|fim▁hole|> */
use common::FeatureFlag;
use fixture_tests::Fixture;
use graphql_test_helpers::apply_transform_for_test;
use relay_transforms::{apply_fragment_arguments, provided_variable_fragment_transform};
pub fn transform_fixture(fixture: &Fixture<'_>) -> Result<String, String> {
apply_transform_for_test(fixture, |program| {
let program = provided_variable_fragment_transform(program)?;
apply_fragment_arguments(&program, false, &FeatureFlag::Enabled, &Default::default())
})
}<|fim▁end|> | *
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree. |
<|file_name|>blackjack.py<|end_file_name|><|fim▁begin|>"""
Program do gry w Blackjack (a.k.a. Oczko) w języku Python przy użyciu biblioteki PyGame
Projekt zaliczeniowy - Języki Skryptowe, Informatyka i Ekonometria, rok 1, WZ, AGH
Autorzy: Joanna Jeziorek, Mateusz Koziestański, Katarzyna Maciocha
III 2016
"""
import random as rd
import os
import sys
import pygame
from pygame import *
pygame.font.init()
pygame.mixer.init()
screen = pygame.display.set_mode((800, 480))
clock = pygame.time.Clock()
# poniższe zmienne muszę wstępnie zadeklarować tu, bo inaczej wywala błędy niżej w metodach.
display_font = pygame.font.Font(None, 28)
aces = ['ki_a', 'ka_a', 'pi_a', 'tr_a']
player_hand, dealer_hand = [], []
def load_image(imgname, card):
"""
Metoda do wczytywania plików obrazów.
:param imgname: nazwa pliku png
:param card: obiekt karty
:return: zwraca obraz oraz prostokąt go ograniczający
"""
if card == 1:
fullname = os.path.join("obrazy/karty", imgname)
else:
fullname = os.path.join('obrazy', imgname)
try:
imgname = pygame.image.load(fullname)
except pygame.error as message:
print('Nie można zaladować obrazu:', imgname)
imgname = imgname.convert()
return imgname, imgname.get_rect()
def display(font, sentence):
""" Wyswietlacz tekstu na dole ekranu. Tekst sluży do informowania gracza o tym co sie dzieje."""
display_font = pygame.font.Font.render(font, sentence, 1, (255, 255, 255), (0, 0, 0))
return display_font
# =============Funkcje logiki gry==================
def game_over():
"""
Jesli graczowi skoncza sie pieniadze, wyswietla ekran koncowy. Gracz moze tylko zamknac gre.
"""
while 1:
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
if event.type == KEYDOWN and event.key == K_ESCAPE:
sys.exit()
# Czarny ekran
screen.fill((0, 0, 0))
# Napis Koniec Gry
oFont = pygame.font.Font(None, 50)
display_font = pygame.font.Font.render(oFont, "Koniec gry! Skonczyly ci sie pieniadze!", 1, (255, 255, 255),
(0, 0, 0))
screen.blit(display_font, (125, 220))
pygame.display.flip()
def create_deck():
"""
Tworzy talię kart nazwanych w konwencji [dwie pierwsze litery koloru]_[karta],
po czym zwraca talię
a = as, k = król, d = dama, w = walet
"""
deck = ['ki_a', 'ki_k', 'ki_d', 'ki_w',
'ka_a', 'ka_k', 'ka_d', 'ka_w',
'tr_a', 'tr_k', 'tr_d', 'tr_w',
'pi_a', 'pi_k', 'pi_d', 'pi_w']
for x in range(2, 11):
kier = 'ki_' + str(x)
karo = 'ka_' + str(x)
trefl = 'tr_' + str(x)
pik = 'pi_' + str(x)
for kolor in [kier, karo, trefl, pik]:
deck.append(kolor)
return deck<|fim▁hole|> rd.shuffle(deck)
return deck
def return_played(deck, played_deck):
# Przekazuje zagrane obrazy do głównej talii.
# Zwraca potasowaną talię i pustą talię zagranych kart.
for card in played_deck:
deck.append(played_deck.pop())
shuffle(deck)
return deck, played_deck
def deck_deal(deck, played_deck):
# Jeśli talia nie jest pusta, rozdaje pierwsze cztery obrazy z talii na przemian graczowi i krupierowi.
# Zwraca kolejno: talię, zagraną talię, rękę gracza i rękę krupiera
dealer_hand, player_hand = [], []
shuffle(deck)
if len(deck) < 5:
deck, played_deck = return_played(deck, played_deck)
# wymaga dopracowania zwracania kart do talii, jeśli jest już pusta.
dealer_hand.append(deck.pop(0))
played_deck.append(dealer_hand[-1])
player_hand.append(deck.pop(0))
played_deck.append(player_hand[-1])
dealer_hand.append(deck.pop(0))
played_deck.append(dealer_hand[-1])
player_hand.append(deck.pop(0))
played_deck.append(player_hand[-1])
return deck, played_deck, player_hand, dealer_hand
def hit(deck, played_deck, hand):
# Jeśli talia nie jest pusta, daje graczowi kartę do ręki.
if len(deck) < 2:
deck, played_deck = return_played(deck, played_deck)
hand.append(deck.pop(0))
played_deck.append(hand[-1])
return deck, played_deck, hand
def value(hand):
# Oblicza wartość kart w ręce.
# Jeśli w ręce znajduje się as, a wartość przekracza 21, zmienia wartość asa z 11 do 1pkt.
value_total = 0
for card in hand:
if card[3] == 'a':
value_total += 11
elif card[3] in ['k', 'd', 'w', '1']:
value_total += 10
else:
value_total += int(card[3])
if value_total > 21:
for card in hand:
if card[3] == 'a':
value_total -= 10
if value_total <= 21:
break
else:
continue
return value_total
def round_end(deck, player_hand, dealer_hand, played_deck, funds, money_gain, money_loss, dealer_cards, CardSprite):
if len(player_hand) == 2 and player_hand[:1] in aces:
money_gain += (money_gain * 3 / 2)
dealer_cards.empty()
dealer_card_position = (50, 70)
for x in dealer_hand:
card = CardSprite(x, dealer_card_position)
dealer_card_position = (dealer_card_position[0] + 80, dealer_card_position[1])
dealer_cards.add(card)
if not dealer_hand:
for card in player_hand:
played_deck.append(card)
player_hand.pop()
for card in dealer_hand:
played_deck.append(card)
dealer_hand.pop()
funds += money_gain
funds -= money_loss
display_font = pygame.font.Font(None, 28)
if funds <= 0:
game_over()
end_round = 1
return deck, player_hand, dealer_hand, played_deck, funds, end_round
def bust(deck, player_hand, dealer_hand, played_deck, funds, money_gain, money_loss, dealer_cards, CardSprite):
font = pygame.font.Font(None, 28)
display_font = display(font, "Gracz przebił! Przegrana: $%.1f." % money_loss)
deck, player_hand, dealer_hand, played_deck, funds, end_round = round_end(deck, player_hand, dealer_hand,
played_deck, funds,
money_gain, money_loss, dealer_cards,
CardSprite)
return deck, player_hand, dealer_hand, played_deck, funds, end_round, display_font
def compare(deck, played_deck, player_hand, dealer_hand, funds, bet, dealer_cards, CardSprite):
pv, dv = value(player_hand), value(dealer_hand)
display_font = pygame.font.Font(None, 28)
while dv < 17:
deck, played_deck, dealer_hand = hit(deck, played_deck, dealer_hand)
dv = value(dealer_hand)
if dv < pv <= 21:
# Gracz wygrywa
funds += 2 * bet
deck, player_hand, dealer_hand, played_deck, funds, end_round = round_end(deck, player_hand, dealer_hand,
played_deck, funds, bet, 0,
dealer_cards,
CardSprite)
display_font = display(display_font, "Wygrana: $%.1f." % bet)
elif pv == dv and pv <= 21:
# Remis
deck, player_hand, dealer_hand, played_deck, funds, end_round = round_end(deck, player_hand, dealer_hand,
played_deck, funds, 0, 0,
dealer_cards,
CardSprite)
display_font = display(display_font, "Remis!")
elif dv > 21 >= pv:
# Krupier przebił, a gracz nie
deck, player_hand, dealer_hand, played_deck, funds, end_round = round_end(deck, player_hand, dealer_hand,
played_deck, funds, bet, 0,
dealer_cards,
CardSprite)
display_font = display(display_font, "Krupier przebił! Wygrana: $%.1f." % bet)
else:
# W każdej innej sytuacji krupier wygrywa
deck, player_hand, dealer_hand, played_deck, funds, end_round = round_end(deck, player_hand, dealer_hand,
played_deck, funds, 0, bet,
dealer_cards,
CardSprite)
display_font = display(display_font, "Krupier wygrywa! Przegrana $%.1f." % bet)
return deck, played_deck, end_round, funds, display_font
def blackJack(deck, played_deck, player_hand, dealer_hand, funds, bet, dealer_cards, CardSprite):
""" Metoda sprawdzająca, czy któryś z graczy ma blackjack (BJ) """
textFont = pygame.font.Font(None, 28)
pv = value(player_hand)
dv = value(dealer_hand)
if pv == 21 and dv == 21:
# Zarówno gracz, jak i krupier mają BJ, jest remis i nikt nie traci pieniędzy.
display_font = display(textFont, "Blackjack! Krupier także go ma, więc jest remis!")
deck, player_hand, dealer_hand, played_deck, funds, end_round = round_end(deck, player_hand, dealer_hand,
played_deck,
funds, 0, bet, dealer_cards,
CardSprite)
elif pv == 21 and dv != 21:
# Krupier przegrywa, gracz ma BJ
display_font = display(textFont, "Blackjack! Wygrana: $%.1f." % (bet * 1.5))
deck, player_hand, dealer_hand, played_deck, funds, end_round = round_end(deck, player_hand, dealer_hand,
played_deck,
funds, bet, 0, dealer_cards,
CardSprite)
elif dv == 21 and pv != 21:
# Gracz przegrywa, a krupier ma BJ
deck, player_hand, dealer_hand, played_deck, funds, end_round = round_end(deck, player_hand, dealer_hand,
played_deck,
funds, 0, bet, dealer_cards,
CardSprite)
display_font = display(textFont, "Krupier ma blackjack! Przegrana: $%.1f." % bet)
return display_font, player_hand, dealer_hand, played_deck, funds, end_round
# ==============Koniec logiki gry===============
class CardSprite(pygame.sprite.Sprite):
""" Sprite wyświetlający określoną kartę. """
def __init__(self, card, position):
pygame.sprite.Sprite.__init__(self)
card_image = card + ".png"
self.image, self.rect = load_image(card_image, 1)
self.position = position
def update(self):
self.rect.center = self.position
# metoda update w każdym guziku to zasadniczo instrukcja wykonywania funkcjonalności każdego guzika po kliknięciu
class BetButtonUp(pygame.sprite.Sprite):
""" Guzik zwiększający zakład """
# noinspection PyTypeChecker
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image, self.rect = load_image("arrow_up.png", 0)
self.position = (710, 225)
def update(self, mX, mY, bet, funds, click, end_round):
self.image, self.rect = load_image("arrow_up.png", 0)
self.position = (710, 225)
self.rect.center = self.position
if self.rect.collidepoint(mX, mY) == 1 and click == 1 and end_round == 1:
if bet < funds:
bet += 5.0
if bet % 5 != 0:
while bet % 5 != 0:
bet -= 1
click = 0
return bet, click
class BetButtonDown(pygame.sprite.Sprite):
""" Guzik zmniejszający zakład """
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image, self.rect = load_image("arrow_down.png", 0)
self.position = (710, 225)
def update(self, mX, mY, bet, click, end_round):
self.image, self.rect = load_image("arrow_down.png", 0)
self.position = (760, 225)
self.rect.center = self.position
if self.rect.collidepoint(mX, mY) == 1 and click == 1 and end_round == 1:
if bet > 5:
bet -= 5.0
if bet % 5 != 0:
while bet % 5 != 0:
bet += 1
click = 0
return bet, click
class HitButton(pygame.sprite.Sprite):
""" Guzik pozwalający graczowi dobrać kartę z talii. """
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image, self.rect = load_image("hit.png", 0)
self.position = (735, 390)
def update(self, mX, mY, deck, played_deck, player_hand, dealer_cards, player_card_position, end_round, CardSprite,
click):
self.image, self.rect = load_image("hit.png", 0)
self.position = (735, 390)
self.rect.center = self.position
if self.rect.collidepoint(mX, mY) == 1 and click == 1:
if end_round == 0:
deck, played_deck, player_hand = hit(deck, played_deck, player_hand)
current_card = len(player_hand) - 1
card = CardSprite(player_hand[current_card], player_card_position)
dealer_cards.add(card)
player_card_position = (player_card_position[0] - 80, player_card_position[1])
click = 0
return deck, played_deck, player_hand, player_card_position, click
class StandButton(pygame.sprite.Sprite):
""" Guzik umożliwiający graczowi zostanie przy obecnej liczbie kart. """
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image, self.rect = load_image("stand.png", 0)
self.position = (735, 350)
def update(self, mX, mY, deck, played_deck, player_hand, dealer_hand, dealer_cards, player_card_position, end_round,
CardSprite, funds,
bet, display_font):
self.image, self.rect = load_image("stand.png", 0)
self.position = (735, 350)
self.rect.center = self.position
if self.rect.collidepoint(mX, mY) == 1:
if end_round == 0:
deck, played_deck, end_round, funds, display_font = compare(deck, played_deck, player_hand, dealer_hand,
funds, bet, dealer_cards, CardSprite)
return deck, played_deck, end_round, funds, player_hand, played_deck, player_card_position, display_font
class DoubleButton(pygame.sprite.Sprite):
""" Guzik umożliwiający graczowi podwojenie zakładu i wzięcie jedynej dodatkowej karty."""
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image, self.rect = load_image("double.png", 0)
self.position = (735, 305)
def update(self, mX, mY, deck, played_deck, player_hand, dealer_hand, playerCards, dealer_cards,
player_card_position,
end_round,
CardSprite, funds, bet, display_font):
self.image, self.rect = load_image("double.png", 0)
self.position = (735, 305)
self.rect.center = self.position
if self.rect.collidepoint(mX, mY) == 1:
if end_round == 0 and funds >= bet * 2 and len(player_hand) == 2:
bet *= 2
deck, played_deck, player_hand = hit(deck, played_deck, player_hand)
current_card = len(player_hand) - 1
card = CardSprite(player_hand[current_card], player_card_position)
playerCards.add(card)
player_card_position = (player_card_position[0] - 80, player_card_position[1])
deck, played_deck, end_round, funds, display_font = compare(deck, played_deck, player_hand, dealer_hand,
funds, bet, dealer_cards, CardSprite)
bet /= 2
return deck, played_deck, end_round, funds, player_hand, played_deck, player_card_position, display_font, bet
class DealButton(pygame.sprite.Sprite):
""" Guzik umożliwiający rozpoczęcie nowej rundy / rozdania """
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image, self.rect = load_image("deal.png", 0)
self.position = (735, 430)
def update(self, mX, mY, deck, played_deck, end_round, CardSprite, dealer_cards, player_hand, dealer_hand,
dealer_card_posit,
player_card_position, display_font, playerCards, click, handsPlayed) -> object:
textFont = pygame.font.Font(None, 28)
self.image, self.rect = load_image("deal.png", 0)
self.position = (735, 430)
self.rect.center = self.position
if self.rect.collidepoint(mX, mY) == 1:
if end_round == 1 and click == 1:
display_font = display(textFont, "")
dealer_cards.empty()
playerCards.empty()
deck, played_deck, player_hand, dealer_hand = deck_deal(deck, played_deck)
dealer_card_posit = (50, 70)
player_card_position = (540, 370)
for x in player_hand:
card = CardSprite(x, player_card_position)
player_card_position = (player_card_position[0] - 80, player_card_position[1])
playerCards.add(card)
faceDownCard = CardSprite("back", dealer_card_posit)
dealer_card_posit = (dealer_card_posit[0] + 80, dealer_card_posit[1])
dealer_cards.add(faceDownCard)
card = CardSprite(dealer_hand[0], dealer_card_posit)
dealer_cards.add(card)
end_round = 0
click = 0
handsPlayed += 1
return deck, played_deck, player_hand, dealer_hand, dealer_card_posit, player_card_position, end_round, display_font, click, handsPlayed
# czcionka używana po prawej stronie ekranu (fundusze, zakład itd)
textFont = pygame.font.Font(None, 28)
# ustawiam plik tła/ planszy
background, backgroundRect = load_image("plansza.png", 0)
# grupa grafik kart krupiera
dealer_cards = pygame.sprite.Group()
# jak wyżej, tylko dla gracza
player_cards = pygame.sprite.Group()
# Tworzę instancje wszystkich guzików
bet_up = BetButtonUp()
bet_down = BetButtonDown()
stand_button = StandButton()
deal_butt = DealButton()
hit_butt = HitButton()
dbl_butt = DoubleButton()
# Grupa zawierająca wszystkie guziki
buttons = pygame.sprite.Group(bet_up, bet_down, hit_butt, stand_button, deal_butt, dbl_butt)
# Tworzę talię
deck = create_deck()
# Definiuję pusty zbiór zużytych kart
played_deck = []
dealer_card_position, player_card_position = (), ()
mX, mY = 0, 0
click = 0
# Startowe wartości stawki i banku.
funds = 100.0
bet = 10.0
# Ile rund zostało zagrane - inicjalizacja zmiennej
handsPlayed = 0
# Zmienna używana do oznaczenia końca rundy. Równa 0, oprócz pomiędzy rundami, gdzie ma wartość 1.
end_round = 1
firstTime = 1
while 1:
screen.blit(background, backgroundRect)
if bet > funds:
bet = funds
if end_round == 1 and firstTime == 1:
display_font = display(textFont,
"Klikaj w strzałki, aby określić stawkę. Potem wciśnij Deal aby rozpocząć grę.")
firstTime = 0
screen.blit(display_font, (10, 455))
fundsFont = pygame.font.Font.render(textFont, "Bank: $%.1f" % funds, 1, (255, 255, 255), (0, 0, 0))
screen.blit(fundsFont, (658, 175))
betFont = pygame.font.Font.render(textFont, "Stawka: $%.1f" % bet, 1, (255, 255, 255), (0, 0, 0))
screen.blit(betFont, (658, 259))
hpFont = pygame.font.Font.render(textFont, "Runda: %i " % handsPlayed, 1, (255, 255, 255), (0, 0, 0))
screen.blit(hpFont, (658, 150))
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
elif event.type == MOUSEBUTTONDOWN:
if event.button == 1:
mX, mY = pygame.mouse.get_pos()
click = 1
elif event.type == MOUSEBUTTONUP:
mX, mY = 0, 0
click = 0
# początkowe sprawdzenie, czy po rozdaniu dwóch pierwszych kart ktoś ma blackjack.
# Jako że nie umiem zaprogramować "insurance bet" , jeśli krupier ma BJ od razu, to od razu wygrywa.
if end_round == 0:
# to co dzieje się w trakcie rundy
pv = value(player_hand)
dv = value(dealer_hand)
if pv == 21 and len(player_hand) == 2:
# Jeśli gracz ma BJ
display_font, player_hand, dealer_hand, played_deck, funds, end_round = blackJack(deck, played_deck,
player_hand,
dealer_hand, funds, bet,
dealer_cards,
CardSprite)
if dv == 21 and len(dealer_hand) == 2:
# Jeśli krupier ma BJ
display_font, player_hand, dealer_hand, played_deck, funds, end_round = blackJack(deck, played_deck,
player_hand,
dealer_hand, funds, bet,
dealer_cards,
CardSprite)
if pv > 21:
# Jesli gracz przebił
deck, player_hand, dealer_hand, played_deck, funds, end_round, display_font = bust(deck, player_hand,
dealer_hand,
played_deck, funds, 0,
bet, dealer_cards,
CardSprite)
# Update guzików
# deal
deck, played_deck, player_hand, dealer_hand, dealer_card_position, player_card_position, end_round, display_font, click, handsPlayed = deal_butt.update(
mX, mY, deck, played_deck, end_round, CardSprite, dealer_cards, player_hand, dealer_hand, dealer_card_position,
player_card_position, display_font,
player_cards, click, handsPlayed)
# hit
deck, played_deck, player_hand, player_card_position, click = hit_butt.update(mX, mY, deck, played_deck,
player_hand,
player_cards,
player_card_position, end_round,
CardSprite, click)
# stand
deck, played_deck, end_round, funds, player_hand, played_deck, player_card_position, display_font = stand_button.update(
mX,
mY,
deck,
played_deck,
player_hand,
dealer_hand,
dealer_cards,
player_card_position,
end_round,
CardSprite,
funds,
bet,
display_font)
# double
deck, played_deck, end_round, funds, player_hand, played_deck, player_card_position, display_font, bet = dbl_butt.update(
mX,
mY,
deck,
played_deck,
player_hand,
dealer_hand,
player_cards,
dealer_cards,
player_card_position,
end_round,
CardSprite,
funds,
bet,
display_font)
# Stawka - guziki
bet, click = bet_up.update(mX, mY, bet, funds, click, end_round)
bet, click = bet_down.update(mX, mY, bet, click, end_round)
# wrzucam je na ekran.
buttons.draw(screen)
# jeśli są karty na ekranie, wrzuć je tam
if dealer_cards:
player_cards.update()
player_cards.draw(screen)
dealer_cards.update()
dealer_cards.draw(screen)
# update okna gry
pygame.display.flip()<|fim▁end|> |
def shuffle(deck):
# Przyjmuje talię jako argument i zwraca potasowaną talię. Tasowanie metodą random.shuffle(). |
<|file_name|>test_hive_partition.py<|end_file_name|><|fim▁begin|>#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import unittest
from unittest.mock import patch
from airflow.providers.apache.hive.sensors.hive_partition import HivePartitionSensor
from tests.providers.apache.hive import DEFAULT_DATE, TestHiveEnvironment
from tests.test_utils.mock_hooks import MockHiveMetastoreHook<|fim▁hole|>@unittest.skipIf('AIRFLOW_RUNALL_TESTS' not in os.environ, "Skipped because AIRFLOW_RUNALL_TESTS is not set")
@patch(
'airflow.providers.apache.hive.sensors.hive_partition.HiveMetastoreHook',
side_effect=MockHiveMetastoreHook,
)
class TestHivePartitionSensor(TestHiveEnvironment):
def test_hive_partition_sensor(self, mock_hive_metastore_hook):
op = HivePartitionSensor(
task_id='hive_partition_check', table='airflow.static_babynames_partitioned', dag=self.dag
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)<|fim▁end|> | |
<|file_name|>post_render.js<|end_file_name|><|fim▁begin|>var util = require('hexo-util');
var code = [
'if tired && night:',
' sleep()'
].join('\n');
<|fim▁hole|> '```',
'some content',
'',
'## Another title',
'{% blockquote %}',
'quote content',
'{% endblockquote %}',
'',
'{% quote Hello World %}',
'quote content',
'{% endquote %}'
].join('\n');
exports.content = content;
exports.expected = [
'<h1 id="Title"><a href="#Title" class="headerlink" title="Title"></a>Title</h1>',
util.highlight(code, {lang: 'python'}),
'\n<p>some content</p>\n',
'<h2 id="Another-title"><a href="#Another-title" class="headerlink" title="Another title"></a>Another title</h2>',
'<blockquote>',
'<p>quote content</p>\n',
'</blockquote>\n',
'<blockquote><p>quote content</p>\n',
'<footer><strong>Hello World</strong></footer></blockquote>'
].join('');<|fim▁end|> | var content = [
'# Title',
'``` python',
code, |
<|file_name|>Execution.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.executiongraph;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.Archiveable;
import org.apache.flink.api.common.InputDependencyConstraint;
import org.apache.flink.api.common.accumulators.Accumulator;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.core.io.InputSplit;
import org.apache.flink.runtime.JobException;
import org.apache.flink.runtime.accumulators.StringifiedAccumulatorResult;
import org.apache.flink.runtime.checkpoint.CheckpointOptions;
import org.apache.flink.runtime.checkpoint.CheckpointType;
import org.apache.flink.runtime.checkpoint.JobManagerTaskRestore;
import org.apache.flink.runtime.clusterframework.types.AllocationID;
import org.apache.flink.runtime.clusterframework.types.ResourceID;
import org.apache.flink.runtime.clusterframework.types.ResourceProfile;
import org.apache.flink.runtime.clusterframework.types.SlotProfile;
import org.apache.flink.runtime.concurrent.ComponentMainThreadExecutor;
import org.apache.flink.runtime.concurrent.FutureUtils;
import org.apache.flink.runtime.deployment.ResultPartitionDeploymentDescriptor;
import org.apache.flink.runtime.deployment.TaskDeploymentDescriptor;
import org.apache.flink.runtime.deployment.TaskDeploymentDescriptorFactory;
import org.apache.flink.runtime.execution.ExecutionState;
import org.apache.flink.runtime.instance.SlotSharingGroupId;
import org.apache.flink.runtime.io.network.partition.PartitionTracker;
import org.apache.flink.runtime.io.network.partition.ResultPartitionID;
import org.apache.flink.runtime.jobgraph.IntermediateDataSetID;
import org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID;
import org.apache.flink.runtime.jobmanager.scheduler.CoLocationConstraint;
import org.apache.flink.runtime.jobmanager.scheduler.LocationPreferenceConstraint;
import org.apache.flink.runtime.jobmanager.scheduler.NoResourceAvailableException;
import org.apache.flink.runtime.jobmanager.scheduler.ScheduledUnit;
import org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup;
import org.apache.flink.runtime.jobmanager.slots.TaskManagerGateway;
import org.apache.flink.runtime.jobmaster.LogicalSlot;
import org.apache.flink.runtime.jobmaster.SlotRequestId;
import org.apache.flink.runtime.jobmaster.slotpool.SlotProvider;
import org.apache.flink.runtime.messages.Acknowledge;
import org.apache.flink.runtime.messages.StackTraceSampleResponse;
import org.apache.flink.runtime.shuffle.PartitionDescriptor;
import org.apache.flink.runtime.shuffle.ProducerDescriptor;
import org.apache.flink.runtime.shuffle.ShuffleDescriptor;
import org.apache.flink.runtime.state.KeyGroupRangeAssignment;
import org.apache.flink.runtime.taskmanager.TaskManagerLocation;
import org.apache.flink.util.ExceptionUtils;
import org.apache.flink.util.FlinkException;
import org.apache.flink.util.FlinkRuntimeException;
import org.apache.flink.util.OptionalFailure;
import org.apache.flink.util.function.ThrowingRunnable;
import org.slf4j.Logger;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import java.util.function.Function;
import java.util.stream.Collectors;
import static org.apache.flink.runtime.deployment.TaskDeploymentDescriptorFactory.getConsumedPartitionShuffleDescriptor;
import static org.apache.flink.runtime.execution.ExecutionState.CANCELED;
import static org.apache.flink.runtime.execution.ExecutionState.CANCELING;
import static org.apache.flink.runtime.execution.ExecutionState.CREATED;
import static org.apache.flink.runtime.execution.ExecutionState.DEPLOYING;
import static org.apache.flink.runtime.execution.ExecutionState.FAILED;
import static org.apache.flink.runtime.execution.ExecutionState.FINISHED;
import static org.apache.flink.runtime.execution.ExecutionState.RUNNING;
import static org.apache.flink.runtime.execution.ExecutionState.SCHEDULED;
import static org.apache.flink.util.Preconditions.checkNotNull;
/**
* A single execution of a vertex. While an {@link ExecutionVertex} can be executed multiple times
* (for recovery, re-computation, re-configuration), this class tracks the state of a single execution
* of that vertex and the resources.
*
* <h2>Lock free state transitions</h2>
*
* <p>In several points of the code, we need to deal with possible concurrent state changes and actions.
* For example, while the call to deploy a task (send it to the TaskManager) happens, the task gets cancelled.
*
* <p>We could lock the entire portion of the code (decision to deploy, deploy, set state to running) such that
* it is guaranteed that any "cancel command" will only pick up after deployment is done and that the "cancel
* command" call will never overtake the deploying call.
*
* <p>This blocks the threads big time, because the remote calls may take long. Depending of their locking behavior, it
* may even result in distributed deadlocks (unless carefully avoided). We therefore use atomic state updates and
* occasional double-checking to ensure that the state after a completed call is as expected, and trigger correcting
* actions if it is not. Many actions are also idempotent (like canceling).
*/
public class Execution implements AccessExecution, Archiveable<ArchivedExecution>, LogicalSlot.Payload {
private static final AtomicReferenceFieldUpdater<Execution, ExecutionState> STATE_UPDATER =
AtomicReferenceFieldUpdater.newUpdater(Execution.class, ExecutionState.class, "state");
private static final AtomicReferenceFieldUpdater<Execution, LogicalSlot> ASSIGNED_SLOT_UPDATER = AtomicReferenceFieldUpdater.newUpdater(
Execution.class,
LogicalSlot.class,
"assignedResource");
private static final Logger LOG = ExecutionGraph.LOG;
private static final int NUM_CANCEL_CALL_TRIES = 3;
private static final int NUM_STOP_CALL_TRIES = 3;
// --------------------------------------------------------------------------------------------
/** The executor which is used to execute futures. */
private final Executor executor;
/** The execution vertex whose task this execution executes. */
private final ExecutionVertex vertex;
/** The unique ID marking the specific execution instant of the task. */
private final ExecutionAttemptID attemptId;
/** Gets the global modification version of the execution graph when this execution was created.
* This version is bumped in the ExecutionGraph whenever a global failover happens. It is used
* to resolve conflicts between concurrent modification by global and local failover actions. */
private final long globalModVersion;
/** The timestamps when state transitions occurred, indexed by {@link ExecutionState#ordinal()}. */
private final long[] stateTimestamps;
private final int attemptNumber;
private final Time rpcTimeout;
private final Collection<PartitionInfo> partitionInfos;
/** A future that completes once the Execution reaches a terminal ExecutionState. */
private final CompletableFuture<ExecutionState> terminalStateFuture;
private final CompletableFuture<?> releaseFuture;
private final CompletableFuture<TaskManagerLocation> taskManagerLocationFuture;
private volatile ExecutionState state = CREATED;
private volatile LogicalSlot assignedResource;
private volatile Throwable failureCause; // once assigned, never changes
/** Information to restore the task on recovery, such as checkpoint id and task state snapshot. */
@Nullable
private volatile JobManagerTaskRestore taskRestore;
/** This field holds the allocation id once it was assigned successfully. */
@Nullable
private volatile AllocationID assignedAllocationID;
// ------------------------ Accumulators & Metrics ------------------------
/** Lock for updating the accumulators atomically.
* Prevents final accumulators to be overwritten by partial accumulators on a late heartbeat. */
private final Object accumulatorLock = new Object();
/* Continuously updated map of user-defined accumulators */
private volatile Map<String, Accumulator<?, ?>> userAccumulators;
private volatile IOMetrics ioMetrics;
private Map<IntermediateResultPartitionID, ResultPartitionDeploymentDescriptor> producedPartitions;
// --------------------------------------------------------------------------------------------
/**
* Creates a new Execution attempt.
*
* @param executor
* The executor used to dispatch callbacks from futures and asynchronous RPC calls.
* @param vertex
* The execution vertex to which this Execution belongs
* @param attemptNumber
* The execution attempt number.
* @param globalModVersion
* The global modification version of the execution graph when this execution was created
* @param startTimestamp
* The timestamp that marks the creation of this Execution
* @param rpcTimeout
* The rpcTimeout for RPC calls like deploy/cancel/stop.
*/
public Execution(
Executor executor,
ExecutionVertex vertex,
int attemptNumber,
long globalModVersion,
long startTimestamp,
Time rpcTimeout) {
this.executor = checkNotNull(executor);
this.vertex = checkNotNull(vertex);
this.attemptId = new ExecutionAttemptID();
this.rpcTimeout = checkNotNull(rpcTimeout);
this.globalModVersion = globalModVersion;
this.attemptNumber = attemptNumber;
this.stateTimestamps = new long[ExecutionState.values().length];
markTimestamp(CREATED, startTimestamp);
this.partitionInfos = new ArrayList<>(16);
this.producedPartitions = Collections.emptyMap();
this.terminalStateFuture = new CompletableFuture<>();
this.releaseFuture = new CompletableFuture<>();
this.taskManagerLocationFuture = new CompletableFuture<>();
this.assignedResource = null;
}
// --------------------------------------------------------------------------------------------
// Properties
// --------------------------------------------------------------------------------------------
public ExecutionVertex getVertex() {
return vertex;
}
@Override
public ExecutionAttemptID getAttemptId() {
return attemptId;
}
@Override
public int getAttemptNumber() {
return attemptNumber;
}
@Override
public ExecutionState getState() {
return state;
}
@Nullable
public AllocationID getAssignedAllocationID() {
return assignedAllocationID;
}
/**
* Gets the global modification version of the execution graph when this execution was created.
*
* <p>This version is bumped in the ExecutionGraph whenever a global failover happens. It is used
* to resolve conflicts between concurrent modification by global and local failover actions.
*/
public long getGlobalModVersion() {
return globalModVersion;
}
public CompletableFuture<TaskManagerLocation> getTaskManagerLocationFuture() {
return taskManagerLocationFuture;
}
public LogicalSlot getAssignedResource() {
return assignedResource;
}
public Optional<ResultPartitionDeploymentDescriptor> getResultPartitionDeploymentDescriptor(
IntermediateResultPartitionID id) {
return Optional.ofNullable(producedPartitions.get(id));
}
/**
* Tries to assign the given slot to the execution. The assignment works only if the
* Execution is in state SCHEDULED. Returns true, if the resource could be assigned.
*
* @param logicalSlot to assign to this execution
* @return true if the slot could be assigned to the execution, otherwise false
*/
@VisibleForTesting
boolean tryAssignResource(final LogicalSlot logicalSlot) {
assertRunningInJobMasterMainThread();
checkNotNull(logicalSlot);
// only allow to set the assigned resource in state SCHEDULED or CREATED
// note: we also accept resource assignment when being in state CREATED for testing purposes
if (state == SCHEDULED || state == CREATED) {
if (ASSIGNED_SLOT_UPDATER.compareAndSet(this, null, logicalSlot)) {
if (logicalSlot.tryAssignPayload(this)) {
// check for concurrent modification (e.g. cancelling call)
if ((state == SCHEDULED || state == CREATED) && !taskManagerLocationFuture.isDone()) {
taskManagerLocationFuture.complete(logicalSlot.getTaskManagerLocation());
assignedAllocationID = logicalSlot.getAllocationId();
return true;
} else {
// free assigned resource and return false
ASSIGNED_SLOT_UPDATER.set(this, null);
return false;
}
} else {
ASSIGNED_SLOT_UPDATER.set(this, null);
return false;
}
} else {
// the slot already has another slot assigned
return false;
}
} else {
// do not allow resource assignment if we are not in state SCHEDULED
return false;
}
}
public InputSplit getNextInputSplit() {
final LogicalSlot slot = this.getAssignedResource();
final String host = slot != null ? slot.getTaskManagerLocation().getHostname() : null;
return this.vertex.getNextInputSplit(host);
}
@Override
public TaskManagerLocation getAssignedResourceLocation() {
// returns non-null only when a location is already assigned
final LogicalSlot currentAssignedResource = assignedResource;
return currentAssignedResource != null ? currentAssignedResource.getTaskManagerLocation() : null;
}
public Throwable getFailureCause() {
return failureCause;
}
@Override
public String getFailureCauseAsString() {
return ExceptionUtils.stringifyException(getFailureCause());
}
@Override
public long[] getStateTimestamps() {
return stateTimestamps;
}
@Override
public long getStateTimestamp(ExecutionState state) {
return this.stateTimestamps[state.ordinal()];
}
public boolean isFinished() {
return state.isTerminal();
}
@Nullable
public JobManagerTaskRestore getTaskRestore() {
return taskRestore;
}
/**
* Sets the initial state for the execution. The serialized state is then shipped via the
* {@link TaskDeploymentDescriptor} to the TaskManagers.
*
* @param taskRestore information to restore the state
*/
public void setInitialState(@Nullable JobManagerTaskRestore taskRestore) {
this.taskRestore = taskRestore;
}
/**
* Gets a future that completes once the task execution reaches a terminal state.
* The future will be completed with specific state that the execution reached.
* This future is always completed from the job master's main thread.
*
* @return A future which is completed once the execution reaches a terminal state
*/
@Override
public CompletableFuture<ExecutionState> getTerminalStateFuture() {
return terminalStateFuture;
}
/**
* Gets the release future which is completed once the execution reaches a terminal
* state and the assigned resource has been released.
* This future is always completed from the job master's main thread.
*
* @return A future which is completed once the assigned resource has been released
*/
public CompletableFuture<?> getReleaseFuture() {
return releaseFuture;
}
// --------------------------------------------------------------------------------------------
// Actions
// --------------------------------------------------------------------------------------------
public CompletableFuture<Void> scheduleForExecution() {
final ExecutionGraph executionGraph = getVertex().getExecutionGraph();
final SlotProvider resourceProvider = executionGraph.getSlotProvider();
final boolean allowQueued = executionGraph.isQueuedSchedulingAllowed();
return scheduleForExecution(
resourceProvider,
allowQueued,
LocationPreferenceConstraint.ANY,
Collections.emptySet());
}
/**
* NOTE: This method only throws exceptions if it is in an illegal state to be scheduled, or if the tasks needs
* to be scheduled immediately and no resource is available. If the task is accepted by the schedule, any
* error sets the vertex state to failed and triggers the recovery logic.
*
* @param slotProvider The slot provider to use to allocate slot for this execution attempt.
* @param queued Flag to indicate whether the scheduler may queue this task if it cannot
* immediately deploy it.
* @param locationPreferenceConstraint constraint for the location preferences
* @param allPreviousExecutionGraphAllocationIds set with all previous allocation ids in the job graph.
* Can be empty if the allocation ids are not required for scheduling.
* @return Future which is completed once the Execution has been deployed
*/
public CompletableFuture<Void> scheduleForExecution(
SlotProvider slotProvider,
boolean queued,
LocationPreferenceConstraint locationPreferenceConstraint,
@Nonnull Set<AllocationID> allPreviousExecutionGraphAllocationIds) {
assertRunningInJobMasterMainThread();
final ExecutionGraph executionGraph = vertex.getExecutionGraph();
final Time allocationTimeout = executionGraph.getAllocationTimeout();
try {
final CompletableFuture<Execution> allocationFuture = allocateResourcesForExecution(
slotProvider,
queued,
locationPreferenceConstraint,
allPreviousExecutionGraphAllocationIds,
allocationTimeout);
final CompletableFuture<Void> deploymentFuture;
if (allocationFuture.isDone() || queued) {
deploymentFuture = allocationFuture.thenRun(ThrowingRunnable.unchecked(this::deploy));
} else {
deploymentFuture = FutureUtils.completedExceptionally(
new IllegalArgumentException("The slot allocation future has not been completed yet."));
}
deploymentFuture.whenComplete(
(Void ignored, Throwable failure) -> {
if (failure != null) {
final Throwable stripCompletionException = ExceptionUtils.stripCompletionException(failure);
final Throwable schedulingFailureCause;
if (stripCompletionException instanceof TimeoutException) {
schedulingFailureCause = new NoResourceAvailableException(
"Could not allocate enough slots within timeout of " + allocationTimeout + " to run the job. " +
"Please make sure that the cluster has enough resources.");
} else {
schedulingFailureCause = stripCompletionException;
}
markFailed(schedulingFailureCause);
}
});
return deploymentFuture;
} catch (IllegalExecutionStateException e) {
return FutureUtils.completedExceptionally(e);
}
}
/**
* Allocates resources for the execution.
*
* <p>Allocates following resources:
* <ol>
* <li>slot obtained from the slot provider</li>
* <li>registers produced partitions with the {@link org.apache.flink.runtime.shuffle.ShuffleMaster}</li>
* </ol>
*
* @param slotProvider to obtain a new slot from
* @param queued if the allocation can be queued
* @param locationPreferenceConstraint constraint for the location preferences
* @param allPreviousExecutionGraphAllocationIds set with all previous allocation ids in the job graph.
* Can be empty if the allocation ids are not required for scheduling.
* @param allocationTimeout rpcTimeout for allocating a new slot
* @return Future which is completed with this execution once the slot has been assigned
* or with an exception if an error occurred.
*/
CompletableFuture<Execution> allocateResourcesForExecution(
SlotProvider slotProvider,
boolean queued,
LocationPreferenceConstraint locationPreferenceConstraint,
@Nonnull Set<AllocationID> allPreviousExecutionGraphAllocationIds,
Time allocationTimeout) {
return allocateAndAssignSlotForExecution(
slotProvider,
queued,
locationPreferenceConstraint,
allPreviousExecutionGraphAllocationIds,
allocationTimeout)
.thenCompose(slot -> registerProducedPartitions(slot.getTaskManagerLocation()));
}
/**
* Allocates and assigns a slot obtained from the slot provider to the execution.
*
* @param slotProvider to obtain a new slot from
* @param queued if the allocation can be queued
* @param locationPreferenceConstraint constraint for the location preferences
* @param allPreviousExecutionGraphAllocationIds set with all previous allocation ids in the job graph.
* Can be empty if the allocation ids are not required for scheduling.
* @param allocationTimeout rpcTimeout for allocating a new slot
* @return Future which is completed with the allocated slot once it has been assigned
* or with an exception if an error occurred.
*/
private CompletableFuture<LogicalSlot> allocateAndAssignSlotForExecution(
SlotProvider slotProvider,
boolean queued,
LocationPreferenceConstraint locationPreferenceConstraint,
@Nonnull Set<AllocationID> allPreviousExecutionGraphAllocationIds,
Time allocationTimeout) {
checkNotNull(slotProvider);
assertRunningInJobMasterMainThread();
final SlotSharingGroup sharingGroup = vertex.getJobVertex().getSlotSharingGroup();
final CoLocationConstraint locationConstraint = vertex.getLocationConstraint();
// sanity check
if (locationConstraint != null && sharingGroup == null) {
throw new IllegalStateException(
"Trying to schedule with co-location constraint but without slot sharing allowed.");
}
// this method only works if the execution is in the state 'CREATED'
if (transitionState(CREATED, SCHEDULED)) {
final SlotSharingGroupId slotSharingGroupId = sharingGroup != null ? sharingGroup.getSlotSharingGroupId() : null;
ScheduledUnit toSchedule = locationConstraint == null ?
new ScheduledUnit(this, slotSharingGroupId) :
new ScheduledUnit(this, slotSharingGroupId, locationConstraint);
// try to extract previous allocation ids, if applicable, so that we can reschedule to the same slot
ExecutionVertex executionVertex = getVertex();
AllocationID lastAllocation = executionVertex.getLatestPriorAllocation();
Collection<AllocationID> previousAllocationIDs =
lastAllocation != null ? Collections.singletonList(lastAllocation) : Collections.emptyList();
// calculate the preferred locations
final CompletableFuture<Collection<TaskManagerLocation>> preferredLocationsFuture =
calculatePreferredLocations(locationPreferenceConstraint);
final SlotRequestId slotRequestId = new SlotRequestId();
final CompletableFuture<LogicalSlot> logicalSlotFuture =
preferredLocationsFuture.thenCompose(
(Collection<TaskManagerLocation> preferredLocations) ->
slotProvider.allocateSlot(
slotRequestId,
toSchedule,
new SlotProfile(
ResourceProfile.UNKNOWN,
preferredLocations,
previousAllocationIDs,
allPreviousExecutionGraphAllocationIds),
queued,
allocationTimeout));
// register call back to cancel slot request in case that the execution gets canceled
releaseFuture.whenComplete(
(Object ignored, Throwable throwable) -> {
if (logicalSlotFuture.cancel(false)) {
slotProvider.cancelSlotRequest(
slotRequestId,
slotSharingGroupId,
new FlinkException("Execution " + this + " was released."));
}
});
// This forces calls to the slot pool back into the main thread, for normal and exceptional completion
return logicalSlotFuture.handle(
(LogicalSlot logicalSlot, Throwable failure) -> {
if (failure != null) {
throw new CompletionException(failure);
}
if (tryAssignResource(logicalSlot)) {
return logicalSlot;
} else {
// release the slot
logicalSlot.releaseSlot(new FlinkException("Could not assign logical slot to execution " + this + '.'));
throw new CompletionException(
new FlinkException(
"Could not assign slot " + logicalSlot + " to execution " + this + " because it has already been assigned "));
}
});
} else {
// call race, already deployed, or already done
throw new IllegalExecutionStateException(this, CREATED, state);
}
}
@VisibleForTesting
CompletableFuture<Execution> registerProducedPartitions(TaskManagerLocation location) {
assertRunningInJobMasterMainThread();
return FutureUtils.thenApplyAsyncIfNotDone(
registerProducedPartitions(vertex, location, attemptId),
vertex.getExecutionGraph().getJobMasterMainThreadExecutor(),
producedPartitionsCache -> {
producedPartitions = producedPartitionsCache;
startTrackingPartitions(location.getResourceID(), producedPartitionsCache.values());
return this;
});
}
@VisibleForTesting
static CompletableFuture<Map<IntermediateResultPartitionID, ResultPartitionDeploymentDescriptor>> registerProducedPartitions(
ExecutionVertex vertex,
TaskManagerLocation location,
ExecutionAttemptID attemptId) {
ProducerDescriptor producerDescriptor = ProducerDescriptor.create(location, attemptId);
boolean lazyScheduling = vertex.getExecutionGraph().getScheduleMode().allowLazyDeployment();
Collection<IntermediateResultPartition> partitions = vertex.getProducedPartitions().values();
Collection<CompletableFuture<ResultPartitionDeploymentDescriptor>> partitionRegistrations =
new ArrayList<>(partitions.size());
for (IntermediateResultPartition partition : partitions) {
PartitionDescriptor partitionDescriptor = PartitionDescriptor.from(partition);
int maxParallelism = getPartitionMaxParallelism(partition);
CompletableFuture<? extends ShuffleDescriptor> shuffleDescriptorFuture = vertex
.getExecutionGraph()
.getShuffleMaster()
.registerPartitionWithProducer(partitionDescriptor, producerDescriptor);
final boolean releasePartitionOnConsumption =
vertex.getExecutionGraph().isForcePartitionReleaseOnConsumption()
|| !partitionDescriptor.getPartitionType().isBlocking();
CompletableFuture<ResultPartitionDeploymentDescriptor> partitionRegistration = shuffleDescriptorFuture
.thenApply(shuffleDescriptor -> new ResultPartitionDeploymentDescriptor(
partitionDescriptor,
shuffleDescriptor,
maxParallelism,
lazyScheduling,
releasePartitionOnConsumption
? ShuffleDescriptor.ReleaseType.AUTO
: ShuffleDescriptor.ReleaseType.MANUAL));
partitionRegistrations.add(partitionRegistration);
}
return FutureUtils.combineAll(partitionRegistrations).thenApply(rpdds -> {
Map<IntermediateResultPartitionID, ResultPartitionDeploymentDescriptor> producedPartitions =
new LinkedHashMap<>(partitions.size());
rpdds.forEach(rpdd -> producedPartitions.put(rpdd.getPartitionId(), rpdd));
return producedPartitions;
});
}
private static int getPartitionMaxParallelism(IntermediateResultPartition partition) {
// TODO consumers.isEmpty() only exists for test, currently there has to be exactly one consumer in real jobs!
final List<List<ExecutionEdge>> consumers = partition.getConsumers();
int maxParallelism = KeyGroupRangeAssignment.UPPER_BOUND_MAX_PARALLELISM;
if (!consumers.isEmpty()) {
List<ExecutionEdge> consumer = consumers.get(0);
ExecutionJobVertex consumerVertex = consumer.get(0).getTarget().getJobVertex();
maxParallelism = consumerVertex.getMaxParallelism();
}
return maxParallelism;
}
/**
* Deploys the execution to the previously assigned resource.
*
* @throws JobException if the execution cannot be deployed to the assigned resource
*/
public void deploy() throws JobException {
assertRunningInJobMasterMainThread();
final LogicalSlot slot = assignedResource;
checkNotNull(slot, "In order to deploy the execution we first have to assign a resource via tryAssignResource.");
// Check if the TaskManager died in the meantime
// This only speeds up the response to TaskManagers failing concurrently to deployments.
// The more general check is the rpcTimeout of the deployment call
if (!slot.isAlive()) {
throw new JobException("Target slot (TaskManager) for deployment is no longer alive.");
}
// make sure exactly one deployment call happens from the correct state
// note: the transition from CREATED to DEPLOYING is for testing purposes only
ExecutionState previous = this.state;
if (previous == SCHEDULED || previous == CREATED) {
if (!transitionState(previous, DEPLOYING)) {
// race condition, someone else beat us to the deploying call.
// this should actually not happen and indicates a race somewhere else
throw new IllegalStateException("Cannot deploy task: Concurrent deployment call race.");
}
}
else {
// vertex may have been cancelled, or it was already scheduled
throw new IllegalStateException("The vertex must be in CREATED or SCHEDULED state to be deployed. Found state " + previous);
}
if (this != slot.getPayload()) {
throw new IllegalStateException(
String.format("The execution %s has not been assigned to the assigned slot.", this));
}
try {
// race double check, did we fail/cancel and do we need to release the slot?
if (this.state != DEPLOYING) {
slot.releaseSlot(new FlinkException("Actual state of execution " + this + " (" + state + ") does not match expected state DEPLOYING."));
return;
}
if (LOG.isInfoEnabled()) {
LOG.info(String.format("Deploying %s (attempt #%d) to %s", vertex.getTaskNameWithSubtaskIndex(),
attemptNumber, getAssignedResourceLocation()));
}
final TaskDeploymentDescriptor deployment = TaskDeploymentDescriptorFactory
.fromExecutionVertex(vertex, attemptNumber)
.createDeploymentDescriptor(
slot.getAllocationId(),
slot.getPhysicalSlotNumber(),
taskRestore,
producedPartitions.values());
// null taskRestore to let it be GC'ed
taskRestore = null;
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
final ComponentMainThreadExecutor jobMasterMainThreadExecutor =
vertex.getExecutionGraph().getJobMasterMainThreadExecutor();
// We run the submission in the future executor so that the serialization of large TDDs does not block
// the main thread and sync back to the main thread once submission is completed.
CompletableFuture.supplyAsync(() -> taskManagerGateway.submitTask(deployment, rpcTimeout), executor)
.thenCompose(Function.identity())
.whenCompleteAsync(
(ack, failure) -> {
// only respond to the failure case
if (failure != null) {
if (failure instanceof TimeoutException) {
String taskname = vertex.getTaskNameWithSubtaskIndex() + " (" + attemptId + ')';
markFailed(new Exception(
"Cannot deploy task " + taskname + " - TaskManager (" + getAssignedResourceLocation()
+ ") not responding after a rpcTimeout of " + rpcTimeout, failure));
} else {
markFailed(failure);
}
}
},
jobMasterMainThreadExecutor);
}
catch (Throwable t) {
markFailed(t);
ExceptionUtils.rethrow(t);
}
}
public void cancel() {
// depending on the previous state, we go directly to cancelled (no cancel call necessary)
// -- or to canceling (cancel call needs to be sent to the task manager)
// because of several possibly previous states, we need to again loop until we make a
// successful atomic state transition
assertRunningInJobMasterMainThread();
while (true) {
ExecutionState current = this.state;
if (current == CANCELING || current == CANCELED) {
// already taken care of, no need to cancel again
return;
}
// these two are the common cases where we need to send a cancel call
else if (current == RUNNING || current == DEPLOYING) {
// try to transition to canceling, if successful, send the cancel call
if (startCancelling(NUM_CANCEL_CALL_TRIES)) {
return;
}
// else: fall through the loop
}
else if (current == FINISHED || current == FAILED) {
// nothing to do any more. finished/failed before it could be cancelled.
// in any case, the task is removed from the TaskManager already
return;
}
else if (current == CREATED || current == SCHEDULED) {
// from here, we can directly switch to cancelled, because no task has been deployed
if (cancelAtomically()) {
return;
}
// else: fall through the loop
}
else {
throw new IllegalStateException(current.name());
}
}
}
public CompletableFuture<?> suspend() {
switch(state) {
case RUNNING:
case DEPLOYING:
case CREATED:
case SCHEDULED:
if (!cancelAtomically()) {
throw new IllegalStateException(
String.format("Could not directly go to %s from %s.", CANCELED.name(), state.name()));
}
break;
case CANCELING:
completeCancelling();
break;
case FINISHED:
case FAILED:
case CANCELED:
break;
default:
throw new IllegalStateException(state.name());
}
return releaseFuture;
}
private void scheduleConsumer(ExecutionVertex consumerVertex) {
try {
final ExecutionGraph executionGraph = consumerVertex.getExecutionGraph();
consumerVertex.scheduleForExecution(
executionGraph.getSlotProvider(),
executionGraph.isQueuedSchedulingAllowed(),
LocationPreferenceConstraint.ANY, // there must be at least one known location
Collections.emptySet());
} catch (Throwable t) {
consumerVertex.fail(new IllegalStateException("Could not schedule consumer " +
"vertex " + consumerVertex, t));
}
}
void scheduleOrUpdateConsumers(List<List<ExecutionEdge>> allConsumers) {
assertRunningInJobMasterMainThread();
final int numConsumers = allConsumers.size();
if (numConsumers > 1) {
fail(new IllegalStateException("Currently, only a single consumer group per partition is supported."));
} else if (numConsumers == 0) {
return;
}
for (ExecutionEdge edge : allConsumers.get(0)) {
final ExecutionVertex consumerVertex = edge.getTarget();
final Execution consumer = consumerVertex.getCurrentExecutionAttempt();
final ExecutionState consumerState = consumer.getState();
// ----------------------------------------------------------------
// Consumer is created => try to schedule it and the partition info
// is known during deployment
// ----------------------------------------------------------------
if (consumerState == CREATED) {
// Schedule the consumer vertex if its inputs constraint is satisfied, otherwise skip the scheduling.
// A shortcut of input constraint check is added for InputDependencyConstraint.ANY since
// at least one of the consumer vertex's inputs is consumable here. This is to avoid the
// O(N) complexity introduced by input constraint check for InputDependencyConstraint.ANY,
// as we do not want the default scheduling performance to be affected.
if (consumerVertex.getInputDependencyConstraint() == InputDependencyConstraint.ANY ||
consumerVertex.checkInputDependencyConstraints()) {
scheduleConsumer(consumerVertex);
}
}
// ----------------------------------------------------------------
// Consumer is running => send update message now
// Consumer is deploying => cache the partition info which would be
// sent after switching to running
// ----------------------------------------------------------------
else if (consumerState == DEPLOYING || consumerState == RUNNING) {
final PartitionInfo partitionInfo = createPartitionInfo(edge);
if (consumerState == DEPLOYING) {
consumerVertex.cachePartitionInfo(partitionInfo);
} else {
consumer.sendUpdatePartitionInfoRpcCall(Collections.singleton(partitionInfo));
}
}
}
}
private static PartitionInfo createPartitionInfo(ExecutionEdge executionEdge) {
IntermediateDataSetID intermediateDataSetID = executionEdge.getSource().getIntermediateResult().getId();
ShuffleDescriptor shuffleDescriptor = getConsumedPartitionShuffleDescriptor(executionEdge, false);
return new PartitionInfo(intermediateDataSetID, shuffleDescriptor);
}
/**
* This method fails the vertex due to an external condition. The task will move to state FAILED.
* If the task was in state RUNNING or DEPLOYING before, it will send a cancel call to the TaskManager.
*
* @param t The exception that caused the task to fail.
*/
@Override
public void fail(Throwable t) {
processFail(t, false);
}
/**
* Request a stack trace sample from the task of this execution.
*
* @param sampleId of the stack trace sample
* @param numSamples the sample should contain
* @param delayBetweenSamples to wait
* @param maxStackTraceDepth of the samples
* @param timeout until the request times out
* @return Future stack trace sample response
*/
public CompletableFuture<StackTraceSampleResponse> requestStackTraceSample(
int sampleId,
int numSamples,
Time delayBetweenSamples,
int maxStackTraceDepth,
Time timeout) {
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
return taskManagerGateway.requestStackTraceSample(
attemptId,
sampleId,
numSamples,
delayBetweenSamples,
maxStackTraceDepth,
timeout);
} else {
return FutureUtils.completedExceptionally(new Exception("The execution has no slot assigned."));
}
}
/**
* Notify the task of this execution about a completed checkpoint.
*
* @param checkpointId of the completed checkpoint
* @param timestamp of the completed checkpoint
*/
public void notifyCheckpointComplete(long checkpointId, long timestamp) {
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
taskManagerGateway.notifyCheckpointComplete(attemptId, getVertex().getJobId(), checkpointId, timestamp);
} else {
LOG.debug("The execution has no slot assigned. This indicates that the execution is " +
"no longer running.");
}
}
/**
* Trigger a new checkpoint on the task of this execution.
*
* @param checkpointId of th checkpoint to trigger
* @param timestamp of the checkpoint to trigger
* @param checkpointOptions of the checkpoint to trigger
*/
public void triggerCheckpoint(long checkpointId, long timestamp, CheckpointOptions checkpointOptions) {
triggerCheckpointHelper(checkpointId, timestamp, checkpointOptions, false);
}
/**
* Trigger a new checkpoint on the task of this execution.
*
* @param checkpointId of th checkpoint to trigger
* @param timestamp of the checkpoint to trigger
* @param checkpointOptions of the checkpoint to trigger
* @param advanceToEndOfEventTime Flag indicating if the source should inject a {@code MAX_WATERMARK} in the pipeline
* to fire any registered event-time timers
*/
public void triggerSynchronousSavepoint(long checkpointId, long timestamp, CheckpointOptions checkpointOptions, boolean advanceToEndOfEventTime) {
triggerCheckpointHelper(checkpointId, timestamp, checkpointOptions, advanceToEndOfEventTime);
}
private void triggerCheckpointHelper(long checkpointId, long timestamp, CheckpointOptions checkpointOptions, boolean advanceToEndOfEventTime) {
final CheckpointType checkpointType = checkpointOptions.getCheckpointType();
if (advanceToEndOfEventTime && !(checkpointType.isSynchronous() && checkpointType.isSavepoint())) {
throw new IllegalArgumentException("Only synchronous savepoints are allowed to advance the watermark to MAX.");
}
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();<|fim▁hole|> taskManagerGateway.triggerCheckpoint(attemptId, getVertex().getJobId(), checkpointId, timestamp, checkpointOptions, advanceToEndOfEventTime);
} else {
LOG.debug("The execution has no slot assigned. This indicates that the execution is no longer running.");
}
}
// --------------------------------------------------------------------------------------------
// Callbacks
// --------------------------------------------------------------------------------------------
/**
* This method marks the task as failed, but will make no attempt to remove task execution from the task manager.
* It is intended for cases where the task is known not to be running, or then the TaskManager reports failure
* (in which case it has already removed the task).
*
* @param t The exception that caused the task to fail.
*/
void markFailed(Throwable t) {
processFail(t, true);
}
void markFailed(Throwable t, Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) {
processFail(t, true, userAccumulators, metrics);
}
@VisibleForTesting
void markFinished() {
markFinished(null, null);
}
void markFinished(Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) {
assertRunningInJobMasterMainThread();
// this call usually comes during RUNNING, but may also come while still in deploying (very fast tasks!)
while (true) {
ExecutionState current = this.state;
if (current == RUNNING || current == DEPLOYING) {
if (transitionState(current, FINISHED)) {
try {
for (IntermediateResultPartition finishedPartition
: getVertex().finishAllBlockingPartitions()) {
IntermediateResultPartition[] allPartitions = finishedPartition
.getIntermediateResult().getPartitions();
for (IntermediateResultPartition partition : allPartitions) {
scheduleOrUpdateConsumers(partition.getConsumers());
}
}
updateAccumulatorsAndMetrics(userAccumulators, metrics);
releaseAssignedResource(null);
vertex.getExecutionGraph().deregisterExecution(this);
}
finally {
vertex.executionFinished(this);
}
return;
}
}
else if (current == CANCELING) {
// we sent a cancel call, and the task manager finished before it arrived. We
// will never get a CANCELED call back from the job manager
completeCancelling(userAccumulators, metrics);
return;
}
else if (current == CANCELED || current == FAILED) {
if (LOG.isDebugEnabled()) {
LOG.debug("Task FINISHED, but concurrently went to state " + state);
}
return;
}
else {
// this should not happen, we need to fail this
markFailed(new Exception("Vertex received FINISHED message while being in state " + state));
return;
}
}
}
private boolean cancelAtomically() {
if (startCancelling(0)) {
completeCancelling();
return true;
} else {
return false;
}
}
private boolean startCancelling(int numberCancelRetries) {
if (transitionState(state, CANCELING)) {
taskManagerLocationFuture.cancel(false);
sendCancelRpcCall(numberCancelRetries);
return true;
} else {
return false;
}
}
void completeCancelling() {
completeCancelling(null, null);
}
void completeCancelling(Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) {
// the taskmanagers can themselves cancel tasks without an external trigger, if they find that the
// network stack is canceled (for example by a failing / canceling receiver or sender
// this is an artifact of the old network runtime, but for now we need to support task transitions
// from running directly to canceled
while (true) {
ExecutionState current = this.state;
if (current == CANCELED) {
return;
}
else if (current == CANCELING || current == RUNNING || current == DEPLOYING) {
updateAccumulatorsAndMetrics(userAccumulators, metrics);
if (transitionState(current, CANCELED)) {
finishCancellation();
return;
}
// else fall through the loop
}
else {
// failing in the meantime may happen and is no problem.
// anything else is a serious problem !!!
if (current != FAILED) {
String message = String.format("Asynchronous race: Found %s in state %s after successful cancel call.", vertex.getTaskNameWithSubtaskIndex(), state);
LOG.error(message);
vertex.getExecutionGraph().failGlobal(new Exception(message));
}
return;
}
}
}
private void finishCancellation() {
releaseAssignedResource(new FlinkException("Execution " + this + " was cancelled."));
vertex.getExecutionGraph().deregisterExecution(this);
// release partitions on TM in case the Task finished while we where already CANCELING
stopTrackingAndReleasePartitions();
}
void cachePartitionInfo(PartitionInfo partitionInfo) {
partitionInfos.add(partitionInfo);
}
private void sendPartitionInfos() {
if (!partitionInfos.isEmpty()) {
sendUpdatePartitionInfoRpcCall(new ArrayList<>(partitionInfos));
partitionInfos.clear();
}
}
// --------------------------------------------------------------------------------------------
// Internal Actions
// --------------------------------------------------------------------------------------------
private boolean processFail(Throwable t, boolean isCallback) {
return processFail(t, isCallback, null, null);
}
private boolean processFail(Throwable t, boolean isCallback, Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) {
// damn, we failed. This means only that we keep our books and notify our parent JobExecutionVertex
// the actual computation on the task manager is cleaned up by the TaskManager that noticed the failure
// we may need to loop multiple times (in the presence of concurrent calls) in order to
// atomically switch to failed
assertRunningInJobMasterMainThread();
while (true) {
ExecutionState current = this.state;
if (current == FAILED) {
// already failed. It is enough to remember once that we failed (its sad enough)
return false;
}
if (current == CANCELED || current == FINISHED) {
// we are already aborting or are already aborted or we are already finished
if (LOG.isDebugEnabled()) {
LOG.debug("Ignoring transition of vertex {} to {} while being {}.", getVertexWithAttempt(), FAILED, current);
}
return false;
}
if (current == CANCELING) {
completeCancelling(userAccumulators, metrics);
return false;
}
if (transitionState(current, FAILED, t)) {
// success (in a manner of speaking)
this.failureCause = t;
updateAccumulatorsAndMetrics(userAccumulators, metrics);
releaseAssignedResource(t);
vertex.getExecutionGraph().deregisterExecution(this);
stopTrackingAndReleasePartitions();
if (!isCallback && (current == RUNNING || current == DEPLOYING)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Sending out cancel request, to remove task execution from TaskManager.");
}
try {
if (assignedResource != null) {
sendCancelRpcCall(NUM_CANCEL_CALL_TRIES);
}
} catch (Throwable tt) {
// no reason this should ever happen, but log it to be safe
LOG.error("Error triggering cancel call while marking task {} as failed.", getVertex().getTaskNameWithSubtaskIndex(), tt);
}
}
// leave the loop
return true;
}
}
}
boolean switchToRunning() {
if (transitionState(DEPLOYING, RUNNING)) {
sendPartitionInfos();
return true;
}
else {
// something happened while the call was in progress.
// it can mean:
// - canceling, while deployment was in progress. state is now canceling, or canceled, if the response overtook
// - finishing (execution and finished call overtook the deployment answer, which is possible and happens for fast tasks)
// - failed (execution, failure, and failure message overtook the deployment answer)
ExecutionState currentState = this.state;
if (currentState == FINISHED || currentState == CANCELED) {
// do nothing, the task was really fast (nice)
// or it was canceled really fast
}
else if (currentState == CANCELING || currentState == FAILED) {
if (LOG.isDebugEnabled()) {
// this log statement is guarded because the 'getVertexWithAttempt()' method
// performs string concatenations
LOG.debug("Concurrent canceling/failing of {} while deployment was in progress.", getVertexWithAttempt());
}
sendCancelRpcCall(NUM_CANCEL_CALL_TRIES);
}
else {
String message = String.format("Concurrent unexpected state transition of task %s to %s while deployment was in progress.",
getVertexWithAttempt(), currentState);
if (LOG.isDebugEnabled()) {
LOG.debug(message);
}
// undo the deployment
sendCancelRpcCall(NUM_CANCEL_CALL_TRIES);
// record the failure
markFailed(new Exception(message));
}
return false;
}
}
/**
* This method sends a CancelTask message to the instance of the assigned slot.
*
* <p>The sending is tried up to NUM_CANCEL_CALL_TRIES times.
*/
private void sendCancelRpcCall(int numberRetries) {
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
final ComponentMainThreadExecutor jobMasterMainThreadExecutor =
getVertex().getExecutionGraph().getJobMasterMainThreadExecutor();
CompletableFuture<Acknowledge> cancelResultFuture = FutureUtils.retry(
() -> taskManagerGateway.cancelTask(attemptId, rpcTimeout),
numberRetries,
jobMasterMainThreadExecutor);
cancelResultFuture.whenComplete(
(ack, failure) -> {
if (failure != null) {
fail(new Exception("Task could not be canceled.", failure));
}
});
}
}
private void startTrackingPartitions(final ResourceID taskExecutorId, final Collection<ResultPartitionDeploymentDescriptor> partitions) {
PartitionTracker partitionTracker = vertex.getExecutionGraph().getPartitionTracker();
for (ResultPartitionDeploymentDescriptor partition : partitions) {
partitionTracker.startTrackingPartition(
taskExecutorId,
partition);
}
}
void stopTrackingAndReleasePartitions() {
LOG.info("Discarding the results produced by task execution {}.", attemptId);
if (producedPartitions != null && producedPartitions.size() > 0) {
final PartitionTracker partitionTracker = getVertex().getExecutionGraph().getPartitionTracker();
final List<ResultPartitionID> producedPartitionIds = producedPartitions.values().stream()
.map(ResultPartitionDeploymentDescriptor::getShuffleDescriptor)
.map(ShuffleDescriptor::getResultPartitionID)
.collect(Collectors.toList());
partitionTracker.stopTrackingAndReleasePartitions(producedPartitionIds);
}
}
/**
* Update the partition infos on the assigned resource.
*
* @param partitionInfos for the remote task
*/
private void sendUpdatePartitionInfoRpcCall(
final Iterable<PartitionInfo> partitionInfos) {
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
final TaskManagerLocation taskManagerLocation = slot.getTaskManagerLocation();
CompletableFuture<Acknowledge> updatePartitionsResultFuture = taskManagerGateway.updatePartitions(attemptId, partitionInfos, rpcTimeout);
updatePartitionsResultFuture.whenCompleteAsync(
(ack, failure) -> {
// fail if there was a failure
if (failure != null) {
fail(new IllegalStateException("Update task on TaskManager " + taskManagerLocation +
" failed due to:", failure));
}
}, getVertex().getExecutionGraph().getJobMasterMainThreadExecutor());
}
}
/**
* Releases the assigned resource and completes the release future
* once the assigned resource has been successfully released.
*
* @param cause for the resource release, null if none
*/
private void releaseAssignedResource(@Nullable Throwable cause) {
assertRunningInJobMasterMainThread();
final LogicalSlot slot = assignedResource;
if (slot != null) {
ComponentMainThreadExecutor jobMasterMainThreadExecutor =
getVertex().getExecutionGraph().getJobMasterMainThreadExecutor();
slot.releaseSlot(cause)
.whenComplete((Object ignored, Throwable throwable) -> {
jobMasterMainThreadExecutor.assertRunningInMainThread();
if (throwable != null) {
releaseFuture.completeExceptionally(throwable);
} else {
releaseFuture.complete(null);
}
});
} else {
// no assigned resource --> we can directly complete the release future
releaseFuture.complete(null);
}
}
// --------------------------------------------------------------------------------------------
// Miscellaneous
// --------------------------------------------------------------------------------------------
/**
* Calculates the preferred locations based on the location preference constraint.
*
* @param locationPreferenceConstraint constraint for the location preference
* @return Future containing the collection of preferred locations. This might not be completed if not all inputs
* have been a resource assigned.
*/
@VisibleForTesting
public CompletableFuture<Collection<TaskManagerLocation>> calculatePreferredLocations(LocationPreferenceConstraint locationPreferenceConstraint) {
final Collection<CompletableFuture<TaskManagerLocation>> preferredLocationFutures = getVertex().getPreferredLocations();
final CompletableFuture<Collection<TaskManagerLocation>> preferredLocationsFuture;
switch(locationPreferenceConstraint) {
case ALL:
preferredLocationsFuture = FutureUtils.combineAll(preferredLocationFutures);
break;
case ANY:
final ArrayList<TaskManagerLocation> completedTaskManagerLocations = new ArrayList<>(preferredLocationFutures.size());
for (CompletableFuture<TaskManagerLocation> preferredLocationFuture : preferredLocationFutures) {
if (preferredLocationFuture.isDone() && !preferredLocationFuture.isCompletedExceptionally()) {
final TaskManagerLocation taskManagerLocation = preferredLocationFuture.getNow(null);
if (taskManagerLocation == null) {
throw new FlinkRuntimeException("TaskManagerLocationFuture was completed with null. This indicates a programming bug.");
}
completedTaskManagerLocations.add(taskManagerLocation);
}
}
preferredLocationsFuture = CompletableFuture.completedFuture(completedTaskManagerLocations);
break;
default:
throw new RuntimeException("Unknown LocationPreferenceConstraint " + locationPreferenceConstraint + '.');
}
return preferredLocationsFuture;
}
private boolean transitionState(ExecutionState currentState, ExecutionState targetState) {
return transitionState(currentState, targetState, null);
}
private boolean transitionState(ExecutionState currentState, ExecutionState targetState, Throwable error) {
// sanity check
if (currentState.isTerminal()) {
throw new IllegalStateException("Cannot leave terminal state " + currentState + " to transition to " + targetState + '.');
}
if (STATE_UPDATER.compareAndSet(this, currentState, targetState)) {
markTimestamp(targetState);
if (error == null) {
LOG.info("{} ({}) switched from {} to {}.", getVertex().getTaskNameWithSubtaskIndex(), getAttemptId(), currentState, targetState);
} else {
LOG.info("{} ({}) switched from {} to {}.", getVertex().getTaskNameWithSubtaskIndex(), getAttemptId(), currentState, targetState, error);
}
if (targetState.isTerminal()) {
// complete the terminal state future
terminalStateFuture.complete(targetState);
}
// make sure that the state transition completes normally.
// potential errors (in listeners may not affect the main logic)
try {
vertex.notifyStateTransition(this, targetState, error);
}
catch (Throwable t) {
LOG.error("Error while notifying execution graph of execution state transition.", t);
}
return true;
} else {
return false;
}
}
private void markTimestamp(ExecutionState state) {
markTimestamp(state, System.currentTimeMillis());
}
private void markTimestamp(ExecutionState state, long timestamp) {
this.stateTimestamps[state.ordinal()] = timestamp;
}
public String getVertexWithAttempt() {
return vertex.getTaskNameWithSubtaskIndex() + " - execution #" + attemptNumber;
}
// ------------------------------------------------------------------------
// Accumulators
// ------------------------------------------------------------------------
/**
* Update accumulators (discarded when the Execution has already been terminated).
* @param userAccumulators the user accumulators
*/
public void setAccumulators(Map<String, Accumulator<?, ?>> userAccumulators) {
synchronized (accumulatorLock) {
if (!state.isTerminal()) {
this.userAccumulators = userAccumulators;
}
}
}
public Map<String, Accumulator<?, ?>> getUserAccumulators() {
return userAccumulators;
}
@Override
public StringifiedAccumulatorResult[] getUserAccumulatorsStringified() {
Map<String, OptionalFailure<Accumulator<?, ?>>> accumulators =
userAccumulators == null ?
null :
userAccumulators.entrySet()
.stream()
.collect(Collectors.toMap(Map.Entry::getKey, entry -> OptionalFailure.of(entry.getValue())));
return StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulators);
}
@Override
public int getParallelSubtaskIndex() {
return getVertex().getParallelSubtaskIndex();
}
@Override
public IOMetrics getIOMetrics() {
return ioMetrics;
}
private void updateAccumulatorsAndMetrics(Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) {
if (userAccumulators != null) {
synchronized (accumulatorLock) {
this.userAccumulators = userAccumulators;
}
}
if (metrics != null) {
this.ioMetrics = metrics;
}
}
// ------------------------------------------------------------------------
// Standard utilities
// ------------------------------------------------------------------------
@Override
public String toString() {
final LogicalSlot slot = assignedResource;
return String.format("Attempt #%d (%s) @ %s - [%s]", attemptNumber, vertex.getTaskNameWithSubtaskIndex(),
(slot == null ? "(unassigned)" : slot), state);
}
@Override
public ArchivedExecution archive() {
return new ArchivedExecution(this);
}
private void assertRunningInJobMasterMainThread() {
vertex.getExecutionGraph().assertRunningInJobMasterMainThread();
}
}<|fim▁end|> | |
<|file_name|>datamodel_pb.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
from google.appengine.datastore.entity_pb import *
import google.appengine.datastore.entity_pb
class AggregateRpcStatsProto(ProtocolBuffer.ProtocolMessage):
has_service_call_name_ = 0
service_call_name_ = ""
has_total_amount_of_calls_ = 0
total_amount_of_calls_ = 0
has_total_cost_of_calls_microdollars_ = 0
total_cost_of_calls_microdollars_ = 0
def __init__(self, contents=None):
self.total_billed_ops_ = []
if contents is not None: self.MergeFromString(contents)
def service_call_name(self): return self.service_call_name_
def set_service_call_name(self, x):
self.has_service_call_name_ = 1
self.service_call_name_ = x
def clear_service_call_name(self):
if self.has_service_call_name_:
self.has_service_call_name_ = 0
self.service_call_name_ = ""
def has_service_call_name(self): return self.has_service_call_name_
def total_amount_of_calls(self): return self.total_amount_of_calls_
def set_total_amount_of_calls(self, x):
self.has_total_amount_of_calls_ = 1
self.total_amount_of_calls_ = x
def clear_total_amount_of_calls(self):
if self.has_total_amount_of_calls_:
self.has_total_amount_of_calls_ = 0
self.total_amount_of_calls_ = 0
def has_total_amount_of_calls(self): return self.has_total_amount_of_calls_
def total_cost_of_calls_microdollars(self): return self.total_cost_of_calls_microdollars_
def set_total_cost_of_calls_microdollars(self, x):
self.has_total_cost_of_calls_microdollars_ = 1
self.total_cost_of_calls_microdollars_ = x
def clear_total_cost_of_calls_microdollars(self):
if self.has_total_cost_of_calls_microdollars_:
self.has_total_cost_of_calls_microdollars_ = 0
self.total_cost_of_calls_microdollars_ = 0
def has_total_cost_of_calls_microdollars(self): return self.has_total_cost_of_calls_microdollars_
def total_billed_ops_size(self): return len(self.total_billed_ops_)
def total_billed_ops_list(self): return self.total_billed_ops_
def total_billed_ops(self, i):
return self.total_billed_ops_[i]
def mutable_total_billed_ops(self, i):
return self.total_billed_ops_[i]
def add_total_billed_ops(self):
x = BilledOpProto()
self.total_billed_ops_.append(x)
return x
def clear_total_billed_ops(self):
self.total_billed_ops_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_service_call_name()): self.set_service_call_name(x.service_call_name())
if (x.has_total_amount_of_calls()): self.set_total_amount_of_calls(x.total_amount_of_calls())
if (x.has_total_cost_of_calls_microdollars()): self.set_total_cost_of_calls_microdollars(x.total_cost_of_calls_microdollars())
for i in xrange(x.total_billed_ops_size()): self.add_total_billed_ops().CopyFrom(x.total_billed_ops(i))
def Equals(self, x):
if x is self: return 1
if self.has_service_call_name_ != x.has_service_call_name_: return 0
if self.has_service_call_name_ and self.service_call_name_ != x.service_call_name_: return 0
if self.has_total_amount_of_calls_ != x.has_total_amount_of_calls_: return 0
if self.has_total_amount_of_calls_ and self.total_amount_of_calls_ != x.total_amount_of_calls_: return 0
if self.has_total_cost_of_calls_microdollars_ != x.has_total_cost_of_calls_microdollars_: return 0
if self.has_total_cost_of_calls_microdollars_ and self.total_cost_of_calls_microdollars_ != x.total_cost_of_calls_microdollars_: return 0
if len(self.total_billed_ops_) != len(x.total_billed_ops_): return 0
for e1, e2 in zip(self.total_billed_ops_, x.total_billed_ops_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_service_call_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: service_call_name not set.')
if (not self.has_total_amount_of_calls_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: total_amount_of_calls not set.')
for p in self.total_billed_ops_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.service_call_name_))
n += self.lengthVarInt64(self.total_amount_of_calls_)
if (self.has_total_cost_of_calls_microdollars_): n += 1 + self.lengthVarInt64(self.total_cost_of_calls_microdollars_)
n += 1 * len(self.total_billed_ops_)
for i in xrange(len(self.total_billed_ops_)): n += self.lengthString(self.total_billed_ops_[i].ByteSize())
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_service_call_name_):
n += 1
n += self.lengthString(len(self.service_call_name_))
if (self.has_total_amount_of_calls_):
n += 1
n += self.lengthVarInt64(self.total_amount_of_calls_)
if (self.has_total_cost_of_calls_microdollars_): n += 1 + self.lengthVarInt64(self.total_cost_of_calls_microdollars_)
n += 1 * len(self.total_billed_ops_)
for i in xrange(len(self.total_billed_ops_)): n += self.lengthString(self.total_billed_ops_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_service_call_name()
self.clear_total_amount_of_calls()
self.clear_total_cost_of_calls_microdollars()
self.clear_total_billed_ops()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.service_call_name_)
out.putVarInt32(24)
out.putVarInt64(self.total_amount_of_calls_)
if (self.has_total_cost_of_calls_microdollars_):
out.putVarInt32(32)
out.putVarInt64(self.total_cost_of_calls_microdollars_)
for i in xrange(len(self.total_billed_ops_)):
out.putVarInt32(42)
out.putVarInt32(self.total_billed_ops_[i].ByteSize())
self.total_billed_ops_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_service_call_name_):
out.putVarInt32(10)
out.putPrefixedString(self.service_call_name_)
if (self.has_total_amount_of_calls_):
out.putVarInt32(24)
out.putVarInt64(self.total_amount_of_calls_)
if (self.has_total_cost_of_calls_microdollars_):
out.putVarInt32(32)
out.putVarInt64(self.total_cost_of_calls_microdollars_)
for i in xrange(len(self.total_billed_ops_)):
out.putVarInt32(42)
out.putVarInt32(self.total_billed_ops_[i].ByteSizePartial())
self.total_billed_ops_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_service_call_name(d.getPrefixedString())
continue
if tt == 24:
self.set_total_amount_of_calls(d.getVarInt64())
continue
if tt == 32:
self.set_total_cost_of_calls_microdollars(d.getVarInt64())
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_total_billed_ops().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_service_call_name_: res+=prefix+("service_call_name: %s\n" % self.DebugFormatString(self.service_call_name_))
if self.has_total_amount_of_calls_: res+=prefix+("total_amount_of_calls: %s\n" % self.DebugFormatInt64(self.total_amount_of_calls_))
if self.has_total_cost_of_calls_microdollars_: res+=prefix+("total_cost_of_calls_microdollars: %s\n" % self.DebugFormatInt64(self.total_cost_of_calls_microdollars_))
cnt=0
for e in self.total_billed_ops_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("total_billed_ops%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kservice_call_name = 1
ktotal_amount_of_calls = 3
ktotal_cost_of_calls_microdollars = 4
ktotal_billed_ops = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "service_call_name",
3: "total_amount_of_calls",
4: "total_cost_of_calls_microdollars",
5: "total_billed_ops",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.STRING,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.AggregateRpcStatsProto'
class KeyValProto(ProtocolBuffer.ProtocolMessage):
has_key_ = 0
key_ = ""
has_value_ = 0
value_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def key(self): return self.key_
def set_key(self, x):
self.has_key_ = 1
self.key_ = x
def clear_key(self):
if self.has_key_:
self.has_key_ = 0
self.key_ = ""
def has_key(self): return self.has_key_
def value(self): return self.value_
def set_value(self, x):
self.has_value_ = 1
self.value_ = x
def clear_value(self):
if self.has_value_:
self.has_value_ = 0
self.value_ = ""
def has_value(self): return self.has_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_key()): self.set_key(x.key())
if (x.has_value()): self.set_value(x.value())
def Equals(self, x):
if x is self: return 1
if self.has_key_ != x.has_key_: return 0
if self.has_key_ and self.key_ != x.key_: return 0
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: key not set.')
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.key_))
n += self.lengthString(len(self.value_))
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_key_):
n += 1
n += self.lengthString(len(self.key_))
if (self.has_value_):
n += 1
n += self.lengthString(len(self.value_))
return n
def Clear(self):
self.clear_key()
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.key_)
out.putVarInt32(18)
out.putPrefixedString(self.value_)
def OutputPartial(self, out):
if (self.has_key_):
out.putVarInt32(10)
out.putPrefixedString(self.key_)
if (self.has_value_):
out.putVarInt32(18)
out.putPrefixedString(self.value_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_key(d.getPrefixedString())
continue
if tt == 18:
self.set_value(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_key_: res+=prefix+("key: %s\n" % self.DebugFormatString(self.key_))
if self.has_value_: res+=prefix+("value: %s\n" % self.DebugFormatString(self.value_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kkey = 1
kvalue = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "key",
2: "value",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.KeyValProto'
class StackFrameProto(ProtocolBuffer.ProtocolMessage):
has_class_or_file_name_ = 0
class_or_file_name_ = ""
has_line_number_ = 0
line_number_ = 0
has_function_name_ = 0
function_name_ = ""
def __init__(self, contents=None):
self.variables_ = []
if contents is not None: self.MergeFromString(contents)
def class_or_file_name(self): return self.class_or_file_name_
def set_class_or_file_name(self, x):
self.has_class_or_file_name_ = 1
self.class_or_file_name_ = x
def clear_class_or_file_name(self):
if self.has_class_or_file_name_:
self.has_class_or_file_name_ = 0
self.class_or_file_name_ = ""
def has_class_or_file_name(self): return self.has_class_or_file_name_
def line_number(self): return self.line_number_
def set_line_number(self, x):
self.has_line_number_ = 1
self.line_number_ = x
def clear_line_number(self):
if self.has_line_number_:
self.has_line_number_ = 0
self.line_number_ = 0
def has_line_number(self): return self.has_line_number_
def function_name(self): return self.function_name_
def set_function_name(self, x):
self.has_function_name_ = 1
self.function_name_ = x
def clear_function_name(self):
if self.has_function_name_:
self.has_function_name_ = 0
self.function_name_ = ""
def has_function_name(self): return self.has_function_name_
def variables_size(self): return len(self.variables_)
def variables_list(self): return self.variables_
def variables(self, i):
return self.variables_[i]
def mutable_variables(self, i):
return self.variables_[i]
def add_variables(self):
x = KeyValProto()
self.variables_.append(x)
return x
def clear_variables(self):
self.variables_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_class_or_file_name()): self.set_class_or_file_name(x.class_or_file_name())
if (x.has_line_number()): self.set_line_number(x.line_number())
if (x.has_function_name()): self.set_function_name(x.function_name())
for i in xrange(x.variables_size()): self.add_variables().CopyFrom(x.variables(i))
def Equals(self, x):
if x is self: return 1
if self.has_class_or_file_name_ != x.has_class_or_file_name_: return 0
if self.has_class_or_file_name_ and self.class_or_file_name_ != x.class_or_file_name_: return 0
if self.has_line_number_ != x.has_line_number_: return 0
if self.has_line_number_ and self.line_number_ != x.line_number_: return 0
if self.has_function_name_ != x.has_function_name_: return 0
if self.has_function_name_ and self.function_name_ != x.function_name_: return 0
if len(self.variables_) != len(x.variables_): return 0
for e1, e2 in zip(self.variables_, x.variables_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_class_or_file_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: class_or_file_name not set.')
if (not self.has_function_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: function_name not set.')
for p in self.variables_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.class_or_file_name_))
if (self.has_line_number_): n += 1 + self.lengthVarInt64(self.line_number_)
n += self.lengthString(len(self.function_name_))
n += 1 * len(self.variables_)
for i in xrange(len(self.variables_)): n += self.lengthString(self.variables_[i].ByteSize())
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_class_or_file_name_):
n += 1
n += self.lengthString(len(self.class_or_file_name_))
if (self.has_line_number_): n += 1 + self.lengthVarInt64(self.line_number_)
if (self.has_function_name_):
n += 1
n += self.lengthString(len(self.function_name_))
n += 1 * len(self.variables_)
for i in xrange(len(self.variables_)): n += self.lengthString(self.variables_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_class_or_file_name()
self.clear_line_number()
self.clear_function_name()
self.clear_variables()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.class_or_file_name_)
if (self.has_line_number_):
out.putVarInt32(16)
out.putVarInt32(self.line_number_)
out.putVarInt32(26)
out.putPrefixedString(self.function_name_)
for i in xrange(len(self.variables_)):
out.putVarInt32(34)
out.putVarInt32(self.variables_[i].ByteSize())
self.variables_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_class_or_file_name_):
out.putVarInt32(10)
out.putPrefixedString(self.class_or_file_name_)
if (self.has_line_number_):
out.putVarInt32(16)
out.putVarInt32(self.line_number_)
if (self.has_function_name_):
out.putVarInt32(26)
out.putPrefixedString(self.function_name_)
for i in xrange(len(self.variables_)):
out.putVarInt32(34)
out.putVarInt32(self.variables_[i].ByteSizePartial())
self.variables_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_class_or_file_name(d.getPrefixedString())
continue
if tt == 16:
self.set_line_number(d.getVarInt32())
continue
if tt == 26:
self.set_function_name(d.getPrefixedString())
continue
if tt == 34:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_variables().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_class_or_file_name_: res+=prefix+("class_or_file_name: %s\n" % self.DebugFormatString(self.class_or_file_name_))
if self.has_line_number_: res+=prefix+("line_number: %s\n" % self.DebugFormatInt32(self.line_number_))
if self.has_function_name_: res+=prefix+("function_name: %s\n" % self.DebugFormatString(self.function_name_))
cnt=0
for e in self.variables_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("variables%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kclass_or_file_name = 1
kline_number = 2
kfunction_name = 3
kvariables = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "class_or_file_name",
2: "line_number",
3: "function_name",
4: "variables",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.StackFrameProto'
class BilledOpProto(ProtocolBuffer.ProtocolMessage):
DATASTORE_READ = 0
DATASTORE_WRITE = 1
DATASTORE_SMALL = 2
MAIL_RECIPIENT = 3
CHANNEL_OPEN = 4
XMPP_STANZA = 5
_BilledOp_NAMES = {
0: "DATASTORE_READ",
1: "DATASTORE_WRITE",
2: "DATASTORE_SMALL",
3: "MAIL_RECIPIENT",
4: "CHANNEL_OPEN",
5: "XMPP_STANZA",
}
def BilledOp_Name(cls, x): return cls._BilledOp_NAMES.get(x, "")
BilledOp_Name = classmethod(BilledOp_Name)
has_op_ = 0
op_ = 0
has_num_ops_ = 0
num_ops_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def op(self): return self.op_
def set_op(self, x):
self.has_op_ = 1
self.op_ = x
def clear_op(self):
if self.has_op_:
self.has_op_ = 0
self.op_ = 0
def has_op(self): return self.has_op_
def num_ops(self): return self.num_ops_
def set_num_ops(self, x):
self.has_num_ops_ = 1
self.num_ops_ = x
def clear_num_ops(self):
if self.has_num_ops_:
self.has_num_ops_ = 0
self.num_ops_ = 0
def has_num_ops(self): return self.has_num_ops_
def MergeFrom(self, x):
assert x is not self
if (x.has_op()): self.set_op(x.op())
if (x.has_num_ops()): self.set_num_ops(x.num_ops())
def Equals(self, x):
if x is self: return 1
if self.has_op_ != x.has_op_: return 0
if self.has_op_ and self.op_ != x.op_: return 0
if self.has_num_ops_ != x.has_num_ops_: return 0
if self.has_num_ops_ and self.num_ops_ != x.num_ops_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_op_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: op not set.')
if (not self.has_num_ops_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: num_ops not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.op_)
n += self.lengthVarInt64(self.num_ops_)
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_op_):
n += 1
n += self.lengthVarInt64(self.op_)
if (self.has_num_ops_):
n += 1
n += self.lengthVarInt64(self.num_ops_)
return n
def Clear(self):
self.clear_op()
self.clear_num_ops()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt32(self.op_)
out.putVarInt32(16)
out.putVarInt32(self.num_ops_)
def OutputPartial(self, out):
if (self.has_op_):
out.putVarInt32(8)
out.putVarInt32(self.op_)
if (self.has_num_ops_):
out.putVarInt32(16)
out.putVarInt32(self.num_ops_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_op(d.getVarInt32())
continue
if tt == 16:
self.set_num_ops(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_op_: res+=prefix+("op: %s\n" % self.DebugFormatInt32(self.op_))
if self.has_num_ops_: res+=prefix+("num_ops: %s\n" % self.DebugFormatInt32(self.num_ops_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kop = 1
knum_ops = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "op",
2: "num_ops",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.NUMERIC,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.BilledOpProto'
class DatastoreCallDetailsProto(ProtocolBuffer.ProtocolMessage):
has_query_kind_ = 0
query_kind_ = ""
has_query_ancestor_ = 0
query_ancestor_ = None
has_query_thiscursor_ = 0
query_thiscursor_ = 0
has_query_nextcursor_ = 0
query_nextcursor_ = 0
def __init__(self, contents=None):
self.get_successful_fetch_ = []
self.keys_read_ = []
self.keys_written_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def query_kind(self): return self.query_kind_
def set_query_kind(self, x):
self.has_query_kind_ = 1
self.query_kind_ = x
def clear_query_kind(self):
if self.has_query_kind_:
self.has_query_kind_ = 0
self.query_kind_ = ""
def has_query_kind(self): return self.has_query_kind_
def query_ancestor(self):
if self.query_ancestor_ is None:
self.lazy_init_lock_.acquire()
try:
if self.query_ancestor_ is None: self.query_ancestor_ = Reference()
finally:
self.lazy_init_lock_.release()
return self.query_ancestor_
def mutable_query_ancestor(self): self.has_query_ancestor_ = 1; return self.query_ancestor()
def clear_query_ancestor(self):
if self.has_query_ancestor_:
self.has_query_ancestor_ = 0;
if self.query_ancestor_ is not None: self.query_ancestor_.Clear()
def has_query_ancestor(self): return self.has_query_ancestor_
def query_thiscursor(self): return self.query_thiscursor_
def set_query_thiscursor(self, x):
self.has_query_thiscursor_ = 1
self.query_thiscursor_ = x
def clear_query_thiscursor(self):
if self.has_query_thiscursor_:
self.has_query_thiscursor_ = 0
self.query_thiscursor_ = 0
def has_query_thiscursor(self): return self.has_query_thiscursor_
def query_nextcursor(self): return self.query_nextcursor_
def set_query_nextcursor(self, x):
self.has_query_nextcursor_ = 1
self.query_nextcursor_ = x
def clear_query_nextcursor(self):
if self.has_query_nextcursor_:
self.has_query_nextcursor_ = 0
self.query_nextcursor_ = 0
def has_query_nextcursor(self): return self.has_query_nextcursor_
def get_successful_fetch_size(self): return len(self.get_successful_fetch_)
def get_successful_fetch_list(self): return self.get_successful_fetch_
def get_successful_fetch(self, i):
return self.get_successful_fetch_[i]
def set_get_successful_fetch(self, i, x):
self.get_successful_fetch_[i] = x
def add_get_successful_fetch(self, x):
self.get_successful_fetch_.append(x)
def clear_get_successful_fetch(self):
self.get_successful_fetch_ = []
def keys_read_size(self): return len(self.keys_read_)
def keys_read_list(self): return self.keys_read_
def keys_read(self, i):
return self.keys_read_[i]
def mutable_keys_read(self, i):
return self.keys_read_[i]
def add_keys_read(self):
x = Reference()
self.keys_read_.append(x)
return x
def clear_keys_read(self):
self.keys_read_ = []
def keys_written_size(self): return len(self.keys_written_)
def keys_written_list(self): return self.keys_written_
def keys_written(self, i):
return self.keys_written_[i]
def mutable_keys_written(self, i):
return self.keys_written_[i]
def add_keys_written(self):
x = Reference()
self.keys_written_.append(x)
return x
def clear_keys_written(self):
self.keys_written_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_query_kind()): self.set_query_kind(x.query_kind())
if (x.has_query_ancestor()): self.mutable_query_ancestor().MergeFrom(x.query_ancestor())
if (x.has_query_thiscursor()): self.set_query_thiscursor(x.query_thiscursor())
if (x.has_query_nextcursor()): self.set_query_nextcursor(x.query_nextcursor())
for i in xrange(x.get_successful_fetch_size()): self.add_get_successful_fetch(x.get_successful_fetch(i))
for i in xrange(x.keys_read_size()): self.add_keys_read().CopyFrom(x.keys_read(i))
for i in xrange(x.keys_written_size()): self.add_keys_written().CopyFrom(x.keys_written(i))
def Equals(self, x):
if x is self: return 1
if self.has_query_kind_ != x.has_query_kind_: return 0
if self.has_query_kind_ and self.query_kind_ != x.query_kind_: return 0
if self.has_query_ancestor_ != x.has_query_ancestor_: return 0
if self.has_query_ancestor_ and self.query_ancestor_ != x.query_ancestor_: return 0
if self.has_query_thiscursor_ != x.has_query_thiscursor_: return 0
if self.has_query_thiscursor_ and self.query_thiscursor_ != x.query_thiscursor_: return 0
if self.has_query_nextcursor_ != x.has_query_nextcursor_: return 0
if self.has_query_nextcursor_ and self.query_nextcursor_ != x.query_nextcursor_: return 0
if len(self.get_successful_fetch_) != len(x.get_successful_fetch_): return 0
for e1, e2 in zip(self.get_successful_fetch_, x.get_successful_fetch_):
if e1 != e2: return 0
if len(self.keys_read_) != len(x.keys_read_): return 0
for e1, e2 in zip(self.keys_read_, x.keys_read_):
if e1 != e2: return 0
if len(self.keys_written_) != len(x.keys_written_): return 0
for e1, e2 in zip(self.keys_written_, x.keys_written_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_query_ancestor_ and not self.query_ancestor_.IsInitialized(debug_strs)): initialized = 0
for p in self.keys_read_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.keys_written_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
if (self.has_query_kind_): n += 1 + self.lengthString(len(self.query_kind_))
if (self.has_query_ancestor_): n += 1 + self.lengthString(self.query_ancestor_.ByteSize())
if (self.has_query_thiscursor_): n += 9
if (self.has_query_nextcursor_): n += 9
n += 2 * len(self.get_successful_fetch_)
n += 1 * len(self.keys_read_)
for i in xrange(len(self.keys_read_)): n += self.lengthString(self.keys_read_[i].ByteSize())
n += 1 * len(self.keys_written_)
for i in xrange(len(self.keys_written_)): n += self.lengthString(self.keys_written_[i].ByteSize())
return n
def ByteSizePartial(self):
n = 0
if (self.has_query_kind_): n += 1 + self.lengthString(len(self.query_kind_))
if (self.has_query_ancestor_): n += 1 + self.lengthString(self.query_ancestor_.ByteSizePartial())
if (self.has_query_thiscursor_): n += 9
if (self.has_query_nextcursor_): n += 9
n += 2 * len(self.get_successful_fetch_)
n += 1 * len(self.keys_read_)
for i in xrange(len(self.keys_read_)): n += self.lengthString(self.keys_read_[i].ByteSizePartial())
n += 1 * len(self.keys_written_)
for i in xrange(len(self.keys_written_)): n += self.lengthString(self.keys_written_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_query_kind()
self.clear_query_ancestor()
self.clear_query_thiscursor()
self.clear_query_nextcursor()
self.clear_get_successful_fetch()
self.clear_keys_read()
self.clear_keys_written()
def OutputUnchecked(self, out):
if (self.has_query_kind_):
out.putVarInt32(10)
out.putPrefixedString(self.query_kind_)
if (self.has_query_ancestor_):
out.putVarInt32(18)
out.putVarInt32(self.query_ancestor_.ByteSize())
self.query_ancestor_.OutputUnchecked(out)
if (self.has_query_thiscursor_):
out.putVarInt32(25)
out.put64(self.query_thiscursor_)
if (self.has_query_nextcursor_):
out.putVarInt32(33)
out.put64(self.query_nextcursor_)
for i in xrange(len(self.get_successful_fetch_)):
out.putVarInt32(40)
out.putBoolean(self.get_successful_fetch_[i])
for i in xrange(len(self.keys_read_)):
out.putVarInt32(50)
out.putVarInt32(self.keys_read_[i].ByteSize())
self.keys_read_[i].OutputUnchecked(out)
for i in xrange(len(self.keys_written_)):
out.putVarInt32(58)
out.putVarInt32(self.keys_written_[i].ByteSize())
self.keys_written_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_query_kind_):
out.putVarInt32(10)
out.putPrefixedString(self.query_kind_)
if (self.has_query_ancestor_):
out.putVarInt32(18)
out.putVarInt32(self.query_ancestor_.ByteSizePartial())
self.query_ancestor_.OutputPartial(out)
if (self.has_query_thiscursor_):
out.putVarInt32(25)
out.put64(self.query_thiscursor_)
if (self.has_query_nextcursor_):
out.putVarInt32(33)
out.put64(self.query_nextcursor_)
for i in xrange(len(self.get_successful_fetch_)):
out.putVarInt32(40)
out.putBoolean(self.get_successful_fetch_[i])
for i in xrange(len(self.keys_read_)):
out.putVarInt32(50)
out.putVarInt32(self.keys_read_[i].ByteSizePartial())
self.keys_read_[i].OutputPartial(out)
for i in xrange(len(self.keys_written_)):
out.putVarInt32(58)
out.putVarInt32(self.keys_written_[i].ByteSizePartial())
self.keys_written_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_query_kind(d.getPrefixedString())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_query_ancestor().TryMerge(tmp)
continue
if tt == 25:
self.set_query_thiscursor(d.get64())
continue
if tt == 33:
self.set_query_nextcursor(d.get64())
continue
if tt == 40:
self.add_get_successful_fetch(d.getBoolean())
continue
if tt == 50:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_keys_read().TryMerge(tmp)
continue
if tt == 58:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_keys_written().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_query_kind_: res+=prefix+("query_kind: %s\n" % self.DebugFormatString(self.query_kind_))
if self.has_query_ancestor_:
res+=prefix+"query_ancestor <\n"
res+=self.query_ancestor_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_query_thiscursor_: res+=prefix+("query_thiscursor: %s\n" % self.DebugFormatFixed64(self.query_thiscursor_))
if self.has_query_nextcursor_: res+=prefix+("query_nextcursor: %s\n" % self.DebugFormatFixed64(self.query_nextcursor_))
cnt=0
for e in self.get_successful_fetch_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("get_successful_fetch%s: %s\n" % (elm, self.DebugFormatBool(e)))
cnt+=1
cnt=0
for e in self.keys_read_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("keys_read%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.keys_written_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("keys_written%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kquery_kind = 1
kquery_ancestor = 2
kquery_thiscursor = 3
kquery_nextcursor = 4
kget_successful_fetch = 5
kkeys_read = 6
kkeys_written = 7
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "query_kind",
2: "query_ancestor",
3: "query_thiscursor",
4: "query_nextcursor",
5: "get_successful_fetch",
6: "keys_read",
7: "keys_written",
}, 7)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.DOUBLE,
4: ProtocolBuffer.Encoder.DOUBLE,
5: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.STRING,
7: ProtocolBuffer.Encoder.STRING,
}, 7, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.DatastoreCallDetailsProto'
class IndividualRpcStatsProto(ProtocolBuffer.ProtocolMessage):
has_service_call_name_ = 0
service_call_name_ = ""
has_request_data_summary_ = 0
request_data_summary_ = ""
has_response_data_summary_ = 0
response_data_summary_ = ""
has_api_mcycles_ = 0
api_mcycles_ = 0
has_api_milliseconds_ = 0
api_milliseconds_ = 0
has_start_offset_milliseconds_ = 0
start_offset_milliseconds_ = 0
has_duration_milliseconds_ = 0
duration_milliseconds_ = 0
has_namespace_ = 0
namespace_ = ""
has_was_successful_ = 0
was_successful_ = 1
has_datastore_details_ = 0
datastore_details_ = None
has_call_cost_microdollars_ = 0
call_cost_microdollars_ = 0
def __init__(self, contents=None):
self.call_stack_ = []
self.billed_ops_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def service_call_name(self): return self.service_call_name_
def set_service_call_name(self, x):
self.has_service_call_name_ = 1
self.service_call_name_ = x
def clear_service_call_name(self):
if self.has_service_call_name_:
self.has_service_call_name_ = 0
self.service_call_name_ = ""
def has_service_call_name(self): return self.has_service_call_name_
def request_data_summary(self): return self.request_data_summary_
def set_request_data_summary(self, x):
self.has_request_data_summary_ = 1
self.request_data_summary_ = x
def clear_request_data_summary(self):
if self.has_request_data_summary_:
self.has_request_data_summary_ = 0
self.request_data_summary_ = ""
def has_request_data_summary(self): return self.has_request_data_summary_
def response_data_summary(self): return self.response_data_summary_
def set_response_data_summary(self, x):
self.has_response_data_summary_ = 1
self.response_data_summary_ = x
def clear_response_data_summary(self):
if self.has_response_data_summary_:
self.has_response_data_summary_ = 0
self.response_data_summary_ = ""
def has_response_data_summary(self): return self.has_response_data_summary_
def api_mcycles(self): return self.api_mcycles_
def set_api_mcycles(self, x):
self.has_api_mcycles_ = 1
self.api_mcycles_ = x
def clear_api_mcycles(self):
if self.has_api_mcycles_:
self.has_api_mcycles_ = 0
self.api_mcycles_ = 0
def has_api_mcycles(self): return self.has_api_mcycles_
def api_milliseconds(self): return self.api_milliseconds_
def set_api_milliseconds(self, x):
self.has_api_milliseconds_ = 1
self.api_milliseconds_ = x
def clear_api_milliseconds(self):
if self.has_api_milliseconds_:
self.has_api_milliseconds_ = 0
self.api_milliseconds_ = 0
def has_api_milliseconds(self): return self.has_api_milliseconds_
def start_offset_milliseconds(self): return self.start_offset_milliseconds_
def set_start_offset_milliseconds(self, x):
self.has_start_offset_milliseconds_ = 1
self.start_offset_milliseconds_ = x
def clear_start_offset_milliseconds(self):
if self.has_start_offset_milliseconds_:
self.has_start_offset_milliseconds_ = 0
self.start_offset_milliseconds_ = 0
def has_start_offset_milliseconds(self): return self.has_start_offset_milliseconds_
def duration_milliseconds(self): return self.duration_milliseconds_
def set_duration_milliseconds(self, x):
self.has_duration_milliseconds_ = 1
self.duration_milliseconds_ = x
def clear_duration_milliseconds(self):
if self.has_duration_milliseconds_:
self.has_duration_milliseconds_ = 0
self.duration_milliseconds_ = 0
def has_duration_milliseconds(self): return self.has_duration_milliseconds_
def namespace(self): return self.namespace_
def set_namespace(self, x):
self.has_namespace_ = 1
self.namespace_ = x
def clear_namespace(self):
if self.has_namespace_:
self.has_namespace_ = 0
self.namespace_ = ""
def has_namespace(self): return self.has_namespace_
def was_successful(self): return self.was_successful_
def set_was_successful(self, x):
self.has_was_successful_ = 1
self.was_successful_ = x
def clear_was_successful(self):
if self.has_was_successful_:
self.has_was_successful_ = 0
self.was_successful_ = 1
def has_was_successful(self): return self.has_was_successful_
def call_stack_size(self): return len(self.call_stack_)
def call_stack_list(self): return self.call_stack_
def call_stack(self, i):
return self.call_stack_[i]
def mutable_call_stack(self, i):
return self.call_stack_[i]
def add_call_stack(self):
x = StackFrameProto()
self.call_stack_.append(x)
return x
def clear_call_stack(self):
self.call_stack_ = []
def datastore_details(self):
if self.datastore_details_ is None:
self.lazy_init_lock_.acquire()
try:
if self.datastore_details_ is None: self.datastore_details_ = DatastoreCallDetailsProto()
finally:
self.lazy_init_lock_.release()
return self.datastore_details_
def mutable_datastore_details(self): self.has_datastore_details_ = 1; return self.datastore_details()
def clear_datastore_details(self):
if self.has_datastore_details_:
self.has_datastore_details_ = 0;
if self.datastore_details_ is not None: self.datastore_details_.Clear()
def has_datastore_details(self): return self.has_datastore_details_
def call_cost_microdollars(self): return self.call_cost_microdollars_
def set_call_cost_microdollars(self, x):
self.has_call_cost_microdollars_ = 1
self.call_cost_microdollars_ = x
def clear_call_cost_microdollars(self):
if self.has_call_cost_microdollars_:
self.has_call_cost_microdollars_ = 0
self.call_cost_microdollars_ = 0
def has_call_cost_microdollars(self): return self.has_call_cost_microdollars_
def billed_ops_size(self): return len(self.billed_ops_)
def billed_ops_list(self): return self.billed_ops_
def billed_ops(self, i):
return self.billed_ops_[i]
def mutable_billed_ops(self, i):
return self.billed_ops_[i]
def add_billed_ops(self):
x = BilledOpProto()
self.billed_ops_.append(x)
return x
def clear_billed_ops(self):
self.billed_ops_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_service_call_name()): self.set_service_call_name(x.service_call_name())
if (x.has_request_data_summary()): self.set_request_data_summary(x.request_data_summary())
if (x.has_response_data_summary()): self.set_response_data_summary(x.response_data_summary())
if (x.has_api_mcycles()): self.set_api_mcycles(x.api_mcycles())
if (x.has_api_milliseconds()): self.set_api_milliseconds(x.api_milliseconds())
if (x.has_start_offset_milliseconds()): self.set_start_offset_milliseconds(x.start_offset_milliseconds())
if (x.has_duration_milliseconds()): self.set_duration_milliseconds(x.duration_milliseconds())
if (x.has_namespace()): self.set_namespace(x.namespace())
if (x.has_was_successful()): self.set_was_successful(x.was_successful())
for i in xrange(x.call_stack_size()): self.add_call_stack().CopyFrom(x.call_stack(i))
if (x.has_datastore_details()): self.mutable_datastore_details().MergeFrom(x.datastore_details())
if (x.has_call_cost_microdollars()): self.set_call_cost_microdollars(x.call_cost_microdollars())
for i in xrange(x.billed_ops_size()): self.add_billed_ops().CopyFrom(x.billed_ops(i))
def Equals(self, x):
if x is self: return 1
if self.has_service_call_name_ != x.has_service_call_name_: return 0
if self.has_service_call_name_ and self.service_call_name_ != x.service_call_name_: return 0
if self.has_request_data_summary_ != x.has_request_data_summary_: return 0
if self.has_request_data_summary_ and self.request_data_summary_ != x.request_data_summary_: return 0
if self.has_response_data_summary_ != x.has_response_data_summary_: return 0
if self.has_response_data_summary_ and self.response_data_summary_ != x.response_data_summary_: return 0
if self.has_api_mcycles_ != x.has_api_mcycles_: return 0
if self.has_api_mcycles_ and self.api_mcycles_ != x.api_mcycles_: return 0
if self.has_api_milliseconds_ != x.has_api_milliseconds_: return 0
if self.has_api_milliseconds_ and self.api_milliseconds_ != x.api_milliseconds_: return 0
if self.has_start_offset_milliseconds_ != x.has_start_offset_milliseconds_: return 0
if self.has_start_offset_milliseconds_ and self.start_offset_milliseconds_ != x.start_offset_milliseconds_: return 0
if self.has_duration_milliseconds_ != x.has_duration_milliseconds_: return 0
if self.has_duration_milliseconds_ and self.duration_milliseconds_ != x.duration_milliseconds_: return 0
if self.has_namespace_ != x.has_namespace_: return 0
if self.has_namespace_ and self.namespace_ != x.namespace_: return 0
if self.has_was_successful_ != x.has_was_successful_: return 0
if self.has_was_successful_ and self.was_successful_ != x.was_successful_: return 0
if len(self.call_stack_) != len(x.call_stack_): return 0
for e1, e2 in zip(self.call_stack_, x.call_stack_):
if e1 != e2: return 0
if self.has_datastore_details_ != x.has_datastore_details_: return 0
if self.has_datastore_details_ and self.datastore_details_ != x.datastore_details_: return 0
if self.has_call_cost_microdollars_ != x.has_call_cost_microdollars_: return 0
if self.has_call_cost_microdollars_ and self.call_cost_microdollars_ != x.call_cost_microdollars_: return 0
if len(self.billed_ops_) != len(x.billed_ops_): return 0
for e1, e2 in zip(self.billed_ops_, x.billed_ops_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_service_call_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: service_call_name not set.')
if (not self.has_start_offset_milliseconds_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: start_offset_milliseconds not set.')
for p in self.call_stack_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_datastore_details_ and not self.datastore_details_.IsInitialized(debug_strs)): initialized = 0
for p in self.billed_ops_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.service_call_name_))
if (self.has_request_data_summary_): n += 1 + self.lengthString(len(self.request_data_summary_))
if (self.has_response_data_summary_): n += 1 + self.lengthString(len(self.response_data_summary_))
if (self.has_api_mcycles_): n += 1 + self.lengthVarInt64(self.api_mcycles_)
if (self.has_api_milliseconds_): n += 1 + self.lengthVarInt64(self.api_milliseconds_)
n += self.lengthVarInt64(self.start_offset_milliseconds_)
if (self.has_duration_milliseconds_): n += 1 + self.lengthVarInt64(self.duration_milliseconds_)
if (self.has_namespace_): n += 1 + self.lengthString(len(self.namespace_))
if (self.has_was_successful_): n += 2
n += 1 * len(self.call_stack_)
for i in xrange(len(self.call_stack_)): n += self.lengthString(self.call_stack_[i].ByteSize())
if (self.has_datastore_details_): n += 1 + self.lengthString(self.datastore_details_.ByteSize())
if (self.has_call_cost_microdollars_): n += 1 + self.lengthVarInt64(self.call_cost_microdollars_)
n += 1 * len(self.billed_ops_)
for i in xrange(len(self.billed_ops_)): n += self.lengthString(self.billed_ops_[i].ByteSize())
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_service_call_name_):
n += 1
n += self.lengthString(len(self.service_call_name_))
if (self.has_request_data_summary_): n += 1 + self.lengthString(len(self.request_data_summary_))
if (self.has_response_data_summary_): n += 1 + self.lengthString(len(self.response_data_summary_))
if (self.has_api_mcycles_): n += 1 + self.lengthVarInt64(self.api_mcycles_)
if (self.has_api_milliseconds_): n += 1 + self.lengthVarInt64(self.api_milliseconds_)
if (self.has_start_offset_milliseconds_):
n += 1
n += self.lengthVarInt64(self.start_offset_milliseconds_)
if (self.has_duration_milliseconds_): n += 1 + self.lengthVarInt64(self.duration_milliseconds_)
if (self.has_namespace_): n += 1 + self.lengthString(len(self.namespace_))
if (self.has_was_successful_): n += 2
n += 1 * len(self.call_stack_)
for i in xrange(len(self.call_stack_)): n += self.lengthString(self.call_stack_[i].ByteSizePartial())
if (self.has_datastore_details_): n += 1 + self.lengthString(self.datastore_details_.ByteSizePartial())
if (self.has_call_cost_microdollars_): n += 1 + self.lengthVarInt64(self.call_cost_microdollars_)
n += 1 * len(self.billed_ops_)
for i in xrange(len(self.billed_ops_)): n += self.lengthString(self.billed_ops_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_service_call_name()
self.clear_request_data_summary()
self.clear_response_data_summary()
self.clear_api_mcycles()
self.clear_api_milliseconds()
self.clear_start_offset_milliseconds()
self.clear_duration_milliseconds()
self.clear_namespace()
self.clear_was_successful()
self.clear_call_stack()
self.clear_datastore_details()
self.clear_call_cost_microdollars()
self.clear_billed_ops()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.service_call_name_)
if (self.has_request_data_summary_):
out.putVarInt32(26)
out.putPrefixedString(self.request_data_summary_)
if (self.has_response_data_summary_):
out.putVarInt32(34)
out.putPrefixedString(self.response_data_summary_)
if (self.has_api_mcycles_):
out.putVarInt32(40)
out.putVarInt64(self.api_mcycles_)
out.putVarInt32(48)
out.putVarInt64(self.start_offset_milliseconds_)
if (self.has_duration_milliseconds_):
out.putVarInt32(56)
out.putVarInt64(self.duration_milliseconds_)
if (self.has_namespace_):
out.putVarInt32(66)
out.putPrefixedString(self.namespace_)
if (self.has_was_successful_):
out.putVarInt32(72)
out.putBoolean(self.was_successful_)
for i in xrange(len(self.call_stack_)):
out.putVarInt32(82)
out.putVarInt32(self.call_stack_[i].ByteSize())
self.call_stack_[i].OutputUnchecked(out)
if (self.has_api_milliseconds_):
out.putVarInt32(88)
out.putVarInt64(self.api_milliseconds_)
if (self.has_datastore_details_):
out.putVarInt32(98)
out.putVarInt32(self.datastore_details_.ByteSize())
self.datastore_details_.OutputUnchecked(out)
if (self.has_call_cost_microdollars_):
out.putVarInt32(104)
out.putVarInt64(self.call_cost_microdollars_)
for i in xrange(len(self.billed_ops_)):
out.putVarInt32(114)
out.putVarInt32(self.billed_ops_[i].ByteSize())
self.billed_ops_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_service_call_name_):
out.putVarInt32(10)
out.putPrefixedString(self.service_call_name_)
if (self.has_request_data_summary_):
out.putVarInt32(26)
out.putPrefixedString(self.request_data_summary_)
if (self.has_response_data_summary_):
out.putVarInt32(34)
out.putPrefixedString(self.response_data_summary_)
if (self.has_api_mcycles_):
out.putVarInt32(40)
out.putVarInt64(self.api_mcycles_)
if (self.has_start_offset_milliseconds_):
out.putVarInt32(48)
out.putVarInt64(self.start_offset_milliseconds_)
if (self.has_duration_milliseconds_):
out.putVarInt32(56)
out.putVarInt64(self.duration_milliseconds_)
if (self.has_namespace_):
out.putVarInt32(66)
out.putPrefixedString(self.namespace_)
if (self.has_was_successful_):
out.putVarInt32(72)
out.putBoolean(self.was_successful_)
for i in xrange(len(self.call_stack_)):
out.putVarInt32(82)
out.putVarInt32(self.call_stack_[i].ByteSizePartial())
self.call_stack_[i].OutputPartial(out)
if (self.has_api_milliseconds_):
out.putVarInt32(88)
out.putVarInt64(self.api_milliseconds_)
if (self.has_datastore_details_):
out.putVarInt32(98)
out.putVarInt32(self.datastore_details_.ByteSizePartial())
self.datastore_details_.OutputPartial(out)
if (self.has_call_cost_microdollars_):
out.putVarInt32(104)
out.putVarInt64(self.call_cost_microdollars_)
for i in xrange(len(self.billed_ops_)):
out.putVarInt32(114)
out.putVarInt32(self.billed_ops_[i].ByteSizePartial())
self.billed_ops_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_service_call_name(d.getPrefixedString())
continue
if tt == 26:
self.set_request_data_summary(d.getPrefixedString())
continue
if tt == 34:
self.set_response_data_summary(d.getPrefixedString())
continue
if tt == 40:
self.set_api_mcycles(d.getVarInt64())
continue
if tt == 48:
self.set_start_offset_milliseconds(d.getVarInt64())
continue
if tt == 56:
self.set_duration_milliseconds(d.getVarInt64())
continue
if tt == 66:
self.set_namespace(d.getPrefixedString())
continue
if tt == 72:
self.set_was_successful(d.getBoolean())
continue
if tt == 82:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_call_stack().TryMerge(tmp)
continue
if tt == 88:
self.set_api_milliseconds(d.getVarInt64())
continue
if tt == 98:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_datastore_details().TryMerge(tmp)
continue
if tt == 104:
self.set_call_cost_microdollars(d.getVarInt64())
continue
if tt == 114:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_billed_ops().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_service_call_name_: res+=prefix+("service_call_name: %s\n" % self.DebugFormatString(self.service_call_name_))
if self.has_request_data_summary_: res+=prefix+("request_data_summary: %s\n" % self.DebugFormatString(self.request_data_summary_))
if self.has_response_data_summary_: res+=prefix+("response_data_summary: %s\n" % self.DebugFormatString(self.response_data_summary_))
if self.has_api_mcycles_: res+=prefix+("api_mcycles: %s\n" % self.DebugFormatInt64(self.api_mcycles_))
if self.has_api_milliseconds_: res+=prefix+("api_milliseconds: %s\n" % self.DebugFormatInt64(self.api_milliseconds_))
if self.has_start_offset_milliseconds_: res+=prefix+("start_offset_milliseconds: %s\n" % self.DebugFormatInt64(self.start_offset_milliseconds_))
if self.has_duration_milliseconds_: res+=prefix+("duration_milliseconds: %s\n" % self.DebugFormatInt64(self.duration_milliseconds_))
if self.has_namespace_: res+=prefix+("namespace: %s\n" % self.DebugFormatString(self.namespace_))
if self.has_was_successful_: res+=prefix+("was_successful: %s\n" % self.DebugFormatBool(self.was_successful_))
cnt=0
for e in self.call_stack_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("call_stack%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_datastore_details_:
res+=prefix+"datastore_details <\n"
res+=self.datastore_details_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_call_cost_microdollars_: res+=prefix+("call_cost_microdollars: %s\n" % self.DebugFormatInt64(self.call_cost_microdollars_))
cnt=0
for e in self.billed_ops_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("billed_ops%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kservice_call_name = 1
krequest_data_summary = 3
kresponse_data_summary = 4
kapi_mcycles = 5
kapi_milliseconds = 11
kstart_offset_milliseconds = 6
kduration_milliseconds = 7
knamespace = 8
kwas_successful = 9
kcall_stack = 10
kdatastore_details = 12
kcall_cost_microdollars = 13
kbilled_ops = 14
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "service_call_name",
3: "request_data_summary",
4: "response_data_summary",
5: "api_mcycles",
6: "start_offset_milliseconds",
7: "duration_milliseconds",
8: "namespace",
9: "was_successful",
10: "call_stack",
11: "api_milliseconds",
12: "datastore_details",
13: "call_cost_microdollars",
14: "billed_ops",
}, 14)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.NUMERIC,
7: ProtocolBuffer.Encoder.NUMERIC,
8: ProtocolBuffer.Encoder.STRING,
9: ProtocolBuffer.Encoder.NUMERIC,
10: ProtocolBuffer.Encoder.STRING,
11: ProtocolBuffer.Encoder.NUMERIC,
12: ProtocolBuffer.Encoder.STRING,
13: ProtocolBuffer.Encoder.NUMERIC,
14: ProtocolBuffer.Encoder.STRING,
}, 14, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.IndividualRpcStatsProto'
class RequestStatProto(ProtocolBuffer.ProtocolMessage):
has_start_timestamp_milliseconds_ = 0
start_timestamp_milliseconds_ = 0
has_http_method_ = 0
http_method_ = "GET"
has_http_path_ = 0
http_path_ = "/"
has_http_query_ = 0
http_query_ = ""
has_http_status_ = 0
http_status_ = 200
has_duration_milliseconds_ = 0
duration_milliseconds_ = 0
has_api_mcycles_ = 0
api_mcycles_ = 0
has_processor_mcycles_ = 0
processor_mcycles_ = 0
has_overhead_walltime_milliseconds_ = 0
overhead_walltime_milliseconds_ = 0
has_user_email_ = 0
user_email_ = ""
has_is_admin_ = 0
is_admin_ = 0
def __init__(self, contents=None):
self.rpc_stats_ = []
self.cgi_env_ = []
self.individual_stats_ = []
if contents is not None: self.MergeFromString(contents)
def start_timestamp_milliseconds(self): return self.start_timestamp_milliseconds_
def set_start_timestamp_milliseconds(self, x):
self.has_start_timestamp_milliseconds_ = 1
self.start_timestamp_milliseconds_ = x
def clear_start_timestamp_milliseconds(self):
if self.has_start_timestamp_milliseconds_:
self.has_start_timestamp_milliseconds_ = 0
self.start_timestamp_milliseconds_ = 0
def has_start_timestamp_milliseconds(self): return self.has_start_timestamp_milliseconds_
def http_method(self): return self.http_method_
def set_http_method(self, x):
self.has_http_method_ = 1
self.http_method_ = x
def clear_http_method(self):
if self.has_http_method_:
self.has_http_method_ = 0
self.http_method_ = "GET"
def has_http_method(self): return self.has_http_method_
def http_path(self): return self.http_path_
def set_http_path(self, x):
self.has_http_path_ = 1
self.http_path_ = x
def clear_http_path(self):
if self.has_http_path_:
self.has_http_path_ = 0
self.http_path_ = "/"
def has_http_path(self): return self.has_http_path_
def http_query(self): return self.http_query_
def set_http_query(self, x):
self.has_http_query_ = 1
self.http_query_ = x
def clear_http_query(self):
if self.has_http_query_:
self.has_http_query_ = 0
self.http_query_ = ""
def has_http_query(self): return self.has_http_query_
def http_status(self): return self.http_status_
def set_http_status(self, x):
self.has_http_status_ = 1
self.http_status_ = x
def clear_http_status(self):
if self.has_http_status_:
self.has_http_status_ = 0
self.http_status_ = 200
def has_http_status(self): return self.has_http_status_
def duration_milliseconds(self): return self.duration_milliseconds_
def set_duration_milliseconds(self, x):
self.has_duration_milliseconds_ = 1
self.duration_milliseconds_ = x
def clear_duration_milliseconds(self):
if self.has_duration_milliseconds_:
self.has_duration_milliseconds_ = 0
self.duration_milliseconds_ = 0
def has_duration_milliseconds(self): return self.has_duration_milliseconds_
def api_mcycles(self): return self.api_mcycles_
def set_api_mcycles(self, x):
self.has_api_mcycles_ = 1
self.api_mcycles_ = x
def clear_api_mcycles(self):
if self.has_api_mcycles_:
self.has_api_mcycles_ = 0
self.api_mcycles_ = 0
def has_api_mcycles(self): return self.has_api_mcycles_
def processor_mcycles(self): return self.processor_mcycles_
def set_processor_mcycles(self, x):
self.has_processor_mcycles_ = 1
self.processor_mcycles_ = x
def clear_processor_mcycles(self):
if self.has_processor_mcycles_:
self.has_processor_mcycles_ = 0
self.processor_mcycles_ = 0
def has_processor_mcycles(self): return self.has_processor_mcycles_
def rpc_stats_size(self): return len(self.rpc_stats_)
def rpc_stats_list(self): return self.rpc_stats_
def rpc_stats(self, i):
return self.rpc_stats_[i]
def mutable_rpc_stats(self, i):
return self.rpc_stats_[i]
def add_rpc_stats(self):
x = AggregateRpcStatsProto()
self.rpc_stats_.append(x)
return x
def clear_rpc_stats(self):
self.rpc_stats_ = []
def cgi_env_size(self): return len(self.cgi_env_)
def cgi_env_list(self): return self.cgi_env_
def cgi_env(self, i):
return self.cgi_env_[i]
def mutable_cgi_env(self, i):
return self.cgi_env_[i]
def add_cgi_env(self):
x = KeyValProto()
self.cgi_env_.append(x)
return x
def clear_cgi_env(self):
self.cgi_env_ = []
def overhead_walltime_milliseconds(self): return self.overhead_walltime_milliseconds_
def set_overhead_walltime_milliseconds(self, x):
self.has_overhead_walltime_milliseconds_ = 1
self.overhead_walltime_milliseconds_ = x
def clear_overhead_walltime_milliseconds(self):
if self.has_overhead_walltime_milliseconds_:
self.has_overhead_walltime_milliseconds_ = 0
self.overhead_walltime_milliseconds_ = 0
def has_overhead_walltime_milliseconds(self): return self.has_overhead_walltime_milliseconds_
def user_email(self): return self.user_email_
def set_user_email(self, x):
self.has_user_email_ = 1
self.user_email_ = x
def clear_user_email(self):
if self.has_user_email_:
self.has_user_email_ = 0
self.user_email_ = ""
def has_user_email(self): return self.has_user_email_
def is_admin(self): return self.is_admin_
def set_is_admin(self, x):
self.has_is_admin_ = 1
self.is_admin_ = x
def clear_is_admin(self):
if self.has_is_admin_:
self.has_is_admin_ = 0
self.is_admin_ = 0
def has_is_admin(self): return self.has_is_admin_
def individual_stats_size(self): return len(self.individual_stats_)
def individual_stats_list(self): return self.individual_stats_
def individual_stats(self, i):
return self.individual_stats_[i]
def mutable_individual_stats(self, i):
return self.individual_stats_[i]
def add_individual_stats(self):
x = IndividualRpcStatsProto()
self.individual_stats_.append(x)
return x
def clear_individual_stats(self):
self.individual_stats_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_start_timestamp_milliseconds()): self.set_start_timestamp_milliseconds(x.start_timestamp_milliseconds())
if (x.has_http_method()): self.set_http_method(x.http_method())
if (x.has_http_path()): self.set_http_path(x.http_path())
if (x.has_http_query()): self.set_http_query(x.http_query())
if (x.has_http_status()): self.set_http_status(x.http_status())
if (x.has_duration_milliseconds()): self.set_duration_milliseconds(x.duration_milliseconds())
if (x.has_api_mcycles()): self.set_api_mcycles(x.api_mcycles())
if (x.has_processor_mcycles()): self.set_processor_mcycles(x.processor_mcycles())
for i in xrange(x.rpc_stats_size()): self.add_rpc_stats().CopyFrom(x.rpc_stats(i))
for i in xrange(x.cgi_env_size()): self.add_cgi_env().CopyFrom(x.cgi_env(i))
if (x.has_overhead_walltime_milliseconds()): self.set_overhead_walltime_milliseconds(x.overhead_walltime_milliseconds())
if (x.has_user_email()): self.set_user_email(x.user_email())
if (x.has_is_admin()): self.set_is_admin(x.is_admin())
for i in xrange(x.individual_stats_size()): self.add_individual_stats().CopyFrom(x.individual_stats(i))
def Equals(self, x):
if x is self: return 1
if self.has_start_timestamp_milliseconds_ != x.has_start_timestamp_milliseconds_: return 0
if self.has_start_timestamp_milliseconds_ and self.start_timestamp_milliseconds_ != x.start_timestamp_milliseconds_: return 0
if self.has_http_method_ != x.has_http_method_: return 0
if self.has_http_method_ and self.http_method_ != x.http_method_: return 0
if self.has_http_path_ != x.has_http_path_: return 0
if self.has_http_path_ and self.http_path_ != x.http_path_: return 0
if self.has_http_query_ != x.has_http_query_: return 0
if self.has_http_query_ and self.http_query_ != x.http_query_: return 0
if self.has_http_status_ != x.has_http_status_: return 0<|fim▁hole|> if self.has_duration_milliseconds_ != x.has_duration_milliseconds_: return 0
if self.has_duration_milliseconds_ and self.duration_milliseconds_ != x.duration_milliseconds_: return 0
if self.has_api_mcycles_ != x.has_api_mcycles_: return 0
if self.has_api_mcycles_ and self.api_mcycles_ != x.api_mcycles_: return 0
if self.has_processor_mcycles_ != x.has_processor_mcycles_: return 0
if self.has_processor_mcycles_ and self.processor_mcycles_ != x.processor_mcycles_: return 0
if len(self.rpc_stats_) != len(x.rpc_stats_): return 0
for e1, e2 in zip(self.rpc_stats_, x.rpc_stats_):
if e1 != e2: return 0
if len(self.cgi_env_) != len(x.cgi_env_): return 0
for e1, e2 in zip(self.cgi_env_, x.cgi_env_):
if e1 != e2: return 0
if self.has_overhead_walltime_milliseconds_ != x.has_overhead_walltime_milliseconds_: return 0
if self.has_overhead_walltime_milliseconds_ and self.overhead_walltime_milliseconds_ != x.overhead_walltime_milliseconds_: return 0
if self.has_user_email_ != x.has_user_email_: return 0
if self.has_user_email_ and self.user_email_ != x.user_email_: return 0
if self.has_is_admin_ != x.has_is_admin_: return 0
if self.has_is_admin_ and self.is_admin_ != x.is_admin_: return 0
if len(self.individual_stats_) != len(x.individual_stats_): return 0
for e1, e2 in zip(self.individual_stats_, x.individual_stats_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_start_timestamp_milliseconds_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: start_timestamp_milliseconds not set.')
if (not self.has_duration_milliseconds_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: duration_milliseconds not set.')
for p in self.rpc_stats_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.cgi_env_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.individual_stats_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.start_timestamp_milliseconds_)
if (self.has_http_method_): n += 1 + self.lengthString(len(self.http_method_))
if (self.has_http_path_): n += 1 + self.lengthString(len(self.http_path_))
if (self.has_http_query_): n += 1 + self.lengthString(len(self.http_query_))
if (self.has_http_status_): n += 1 + self.lengthVarInt64(self.http_status_)
n += self.lengthVarInt64(self.duration_milliseconds_)
if (self.has_api_mcycles_): n += 1 + self.lengthVarInt64(self.api_mcycles_)
if (self.has_processor_mcycles_): n += 1 + self.lengthVarInt64(self.processor_mcycles_)
n += 1 * len(self.rpc_stats_)
for i in xrange(len(self.rpc_stats_)): n += self.lengthString(self.rpc_stats_[i].ByteSize())
n += 2 * len(self.cgi_env_)
for i in xrange(len(self.cgi_env_)): n += self.lengthString(self.cgi_env_[i].ByteSize())
if (self.has_overhead_walltime_milliseconds_): n += 2 + self.lengthVarInt64(self.overhead_walltime_milliseconds_)
if (self.has_user_email_): n += 2 + self.lengthString(len(self.user_email_))
if (self.has_is_admin_): n += 3
n += 2 * len(self.individual_stats_)
for i in xrange(len(self.individual_stats_)): n += self.lengthString(self.individual_stats_[i].ByteSize())
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_start_timestamp_milliseconds_):
n += 1
n += self.lengthVarInt64(self.start_timestamp_milliseconds_)
if (self.has_http_method_): n += 1 + self.lengthString(len(self.http_method_))
if (self.has_http_path_): n += 1 + self.lengthString(len(self.http_path_))
if (self.has_http_query_): n += 1 + self.lengthString(len(self.http_query_))
if (self.has_http_status_): n += 1 + self.lengthVarInt64(self.http_status_)
if (self.has_duration_milliseconds_):
n += 1
n += self.lengthVarInt64(self.duration_milliseconds_)
if (self.has_api_mcycles_): n += 1 + self.lengthVarInt64(self.api_mcycles_)
if (self.has_processor_mcycles_): n += 1 + self.lengthVarInt64(self.processor_mcycles_)
n += 1 * len(self.rpc_stats_)
for i in xrange(len(self.rpc_stats_)): n += self.lengthString(self.rpc_stats_[i].ByteSizePartial())
n += 2 * len(self.cgi_env_)
for i in xrange(len(self.cgi_env_)): n += self.lengthString(self.cgi_env_[i].ByteSizePartial())
if (self.has_overhead_walltime_milliseconds_): n += 2 + self.lengthVarInt64(self.overhead_walltime_milliseconds_)
if (self.has_user_email_): n += 2 + self.lengthString(len(self.user_email_))
if (self.has_is_admin_): n += 3
n += 2 * len(self.individual_stats_)
for i in xrange(len(self.individual_stats_)): n += self.lengthString(self.individual_stats_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_start_timestamp_milliseconds()
self.clear_http_method()
self.clear_http_path()
self.clear_http_query()
self.clear_http_status()
self.clear_duration_milliseconds()
self.clear_api_mcycles()
self.clear_processor_mcycles()
self.clear_rpc_stats()
self.clear_cgi_env()
self.clear_overhead_walltime_milliseconds()
self.clear_user_email()
self.clear_is_admin()
self.clear_individual_stats()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt64(self.start_timestamp_milliseconds_)
if (self.has_http_method_):
out.putVarInt32(18)
out.putPrefixedString(self.http_method_)
if (self.has_http_path_):
out.putVarInt32(26)
out.putPrefixedString(self.http_path_)
if (self.has_http_query_):
out.putVarInt32(34)
out.putPrefixedString(self.http_query_)
if (self.has_http_status_):
out.putVarInt32(40)
out.putVarInt32(self.http_status_)
out.putVarInt32(48)
out.putVarInt64(self.duration_milliseconds_)
if (self.has_api_mcycles_):
out.putVarInt32(56)
out.putVarInt64(self.api_mcycles_)
if (self.has_processor_mcycles_):
out.putVarInt32(64)
out.putVarInt64(self.processor_mcycles_)
for i in xrange(len(self.rpc_stats_)):
out.putVarInt32(74)
out.putVarInt32(self.rpc_stats_[i].ByteSize())
self.rpc_stats_[i].OutputUnchecked(out)
for i in xrange(len(self.cgi_env_)):
out.putVarInt32(810)
out.putVarInt32(self.cgi_env_[i].ByteSize())
self.cgi_env_[i].OutputUnchecked(out)
if (self.has_overhead_walltime_milliseconds_):
out.putVarInt32(816)
out.putVarInt64(self.overhead_walltime_milliseconds_)
if (self.has_user_email_):
out.putVarInt32(826)
out.putPrefixedString(self.user_email_)
if (self.has_is_admin_):
out.putVarInt32(832)
out.putBoolean(self.is_admin_)
for i in xrange(len(self.individual_stats_)):
out.putVarInt32(858)
out.putVarInt32(self.individual_stats_[i].ByteSize())
self.individual_stats_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_start_timestamp_milliseconds_):
out.putVarInt32(8)
out.putVarInt64(self.start_timestamp_milliseconds_)
if (self.has_http_method_):
out.putVarInt32(18)
out.putPrefixedString(self.http_method_)
if (self.has_http_path_):
out.putVarInt32(26)
out.putPrefixedString(self.http_path_)
if (self.has_http_query_):
out.putVarInt32(34)
out.putPrefixedString(self.http_query_)
if (self.has_http_status_):
out.putVarInt32(40)
out.putVarInt32(self.http_status_)
if (self.has_duration_milliseconds_):
out.putVarInt32(48)
out.putVarInt64(self.duration_milliseconds_)
if (self.has_api_mcycles_):
out.putVarInt32(56)
out.putVarInt64(self.api_mcycles_)
if (self.has_processor_mcycles_):
out.putVarInt32(64)
out.putVarInt64(self.processor_mcycles_)
for i in xrange(len(self.rpc_stats_)):
out.putVarInt32(74)
out.putVarInt32(self.rpc_stats_[i].ByteSizePartial())
self.rpc_stats_[i].OutputPartial(out)
for i in xrange(len(self.cgi_env_)):
out.putVarInt32(810)
out.putVarInt32(self.cgi_env_[i].ByteSizePartial())
self.cgi_env_[i].OutputPartial(out)
if (self.has_overhead_walltime_milliseconds_):
out.putVarInt32(816)
out.putVarInt64(self.overhead_walltime_milliseconds_)
if (self.has_user_email_):
out.putVarInt32(826)
out.putPrefixedString(self.user_email_)
if (self.has_is_admin_):
out.putVarInt32(832)
out.putBoolean(self.is_admin_)
for i in xrange(len(self.individual_stats_)):
out.putVarInt32(858)
out.putVarInt32(self.individual_stats_[i].ByteSizePartial())
self.individual_stats_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_start_timestamp_milliseconds(d.getVarInt64())
continue
if tt == 18:
self.set_http_method(d.getPrefixedString())
continue
if tt == 26:
self.set_http_path(d.getPrefixedString())
continue
if tt == 34:
self.set_http_query(d.getPrefixedString())
continue
if tt == 40:
self.set_http_status(d.getVarInt32())
continue
if tt == 48:
self.set_duration_milliseconds(d.getVarInt64())
continue
if tt == 56:
self.set_api_mcycles(d.getVarInt64())
continue
if tt == 64:
self.set_processor_mcycles(d.getVarInt64())
continue
if tt == 74:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_rpc_stats().TryMerge(tmp)
continue
if tt == 810:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_cgi_env().TryMerge(tmp)
continue
if tt == 816:
self.set_overhead_walltime_milliseconds(d.getVarInt64())
continue
if tt == 826:
self.set_user_email(d.getPrefixedString())
continue
if tt == 832:
self.set_is_admin(d.getBoolean())
continue
if tt == 858:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_individual_stats().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_start_timestamp_milliseconds_: res+=prefix+("start_timestamp_milliseconds: %s\n" % self.DebugFormatInt64(self.start_timestamp_milliseconds_))
if self.has_http_method_: res+=prefix+("http_method: %s\n" % self.DebugFormatString(self.http_method_))
if self.has_http_path_: res+=prefix+("http_path: %s\n" % self.DebugFormatString(self.http_path_))
if self.has_http_query_: res+=prefix+("http_query: %s\n" % self.DebugFormatString(self.http_query_))
if self.has_http_status_: res+=prefix+("http_status: %s\n" % self.DebugFormatInt32(self.http_status_))
if self.has_duration_milliseconds_: res+=prefix+("duration_milliseconds: %s\n" % self.DebugFormatInt64(self.duration_milliseconds_))
if self.has_api_mcycles_: res+=prefix+("api_mcycles: %s\n" % self.DebugFormatInt64(self.api_mcycles_))
if self.has_processor_mcycles_: res+=prefix+("processor_mcycles: %s\n" % self.DebugFormatInt64(self.processor_mcycles_))
cnt=0
for e in self.rpc_stats_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("rpc_stats%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.cgi_env_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("cgi_env%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_overhead_walltime_milliseconds_: res+=prefix+("overhead_walltime_milliseconds: %s\n" % self.DebugFormatInt64(self.overhead_walltime_milliseconds_))
if self.has_user_email_: res+=prefix+("user_email: %s\n" % self.DebugFormatString(self.user_email_))
if self.has_is_admin_: res+=prefix+("is_admin: %s\n" % self.DebugFormatBool(self.is_admin_))
cnt=0
for e in self.individual_stats_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("individual_stats%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kstart_timestamp_milliseconds = 1
khttp_method = 2
khttp_path = 3
khttp_query = 4
khttp_status = 5
kduration_milliseconds = 6
kapi_mcycles = 7
kprocessor_mcycles = 8
krpc_stats = 9
kcgi_env = 101
koverhead_walltime_milliseconds = 102
kuser_email = 103
kis_admin = 104
kindividual_stats = 107
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "start_timestamp_milliseconds",
2: "http_method",
3: "http_path",
4: "http_query",
5: "http_status",
6: "duration_milliseconds",
7: "api_mcycles",
8: "processor_mcycles",
9: "rpc_stats",
101: "cgi_env",
102: "overhead_walltime_milliseconds",
103: "user_email",
104: "is_admin",
107: "individual_stats",
}, 107)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.NUMERIC,
7: ProtocolBuffer.Encoder.NUMERIC,
8: ProtocolBuffer.Encoder.NUMERIC,
9: ProtocolBuffer.Encoder.STRING,
101: ProtocolBuffer.Encoder.STRING,
102: ProtocolBuffer.Encoder.NUMERIC,
103: ProtocolBuffer.Encoder.STRING,
104: ProtocolBuffer.Encoder.NUMERIC,
107: ProtocolBuffer.Encoder.STRING,
}, 107, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.RequestStatProto'
if _extension_runtime:
pass
__all__ = ['AggregateRpcStatsProto','KeyValProto','StackFrameProto','BilledOpProto','DatastoreCallDetailsProto','IndividualRpcStatsProto','RequestStatProto']<|fim▁end|> | if self.has_http_status_ and self.http_status_ != x.http_status_: return 0 |
<|file_name|>subp_main.py<|end_file_name|><|fim▁begin|># Copyright 2009 Noam Yorav-Raphael
#
# This file is part of DreamPie.
#
# DreamPie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DreamPie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DreamPie. If not, see <http://www.gnu.org/licenses/>.
# This file is a script (not a module) run by the DreamPie GUI.
# It expects one argument: the port to connect to.
# It creates a package called dreampielib from subp-py2.zip or subp-py3.zip
# (which are expected to be in the directory of __file__),
# and runs dreampielib.subprocess.main(port).
import sys<|fim▁hole|>def main():
port = int(sys.argv[1])
py_ver = sys.version_info[0]
lib_name = abspath(join(dirname(__file__), 'subp-py%d' % py_ver))
sys.path.insert(0, lib_name)
from dreampielib.subprocess import main as subprocess_main
del sys.path[0]
if sys.version_info[:2] == (3, 0):
sys.stderr.write("Warning: DreamPie doesn't support Python 3.0. \n"
"Please upgrade to Python 3.1.\n")
subprocess_main(port)
if __name__ == '__main__':
main()<|fim▁end|> |
from os.path import abspath, join, dirname
|
<|file_name|>chat.client.routes.js<|end_file_name|><|fim▁begin|>(function () {
'use strict';
// var matcher = require('../../lib/matchUsers');
angular
.module('chat.routes')
.config(routeConfig);
routeConfig.$inject = ['$stateProvider'];<|fim▁hole|> $stateProvider
.state('chat', {
url: c,
templateUrl: '/modules/chat/client/views/chat.client.view.html',
controller: 'ChatController',
controllerAs: 'vm',
data: {
roles: ['user', 'admin'],
pageTitle: 'Chat'
}
});
}
}());<|fim▁end|> | var c = "/room";// need to make this somehow return the correct room
function routeConfig($stateProvider) { |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>#!/usr/bin/env node
var express = require('express'),
package = require('./package.json'),
program = require('commander'),
_ = require('underscore'),
Assets = require('./lib/assets.js');
program
.version(package.version)
.option('-s, --store <store>', 'Location of storage')
.option('-u, --url <url>', 'Base url', '/store')
.parse(process.argv);
if (!program.store)
program.help();
var assets = new Assets(program.store),
app = express();
app.get(program.url + '/:id',
/**
* GET asset meta information
*
* @param {Object} req
* @prop {Object} params
* @prop {string} params.id The asset ID
*
* @returns {json} Meta information about asset
*/
function(req, res) {<|fim▁hole|> assets
.get(req.params.id)
.done(function(asset) {
res.json(asset);
}, function() {
res.json(404, {error: 'Asset not found'});
});
}
);
app.get(program.url + '/data/:id',
/**
* GET asset data
*
* @param {Object} req
* @prop {Object} params
* @prop {string} params.id The asset ID
*
* @returns {file|redirect} File associated with asset or redirect to server
* that has the file
*/
function(req, res) {
assets
.getData(req.params.id)
.done(function(file) {
res.download(file.path, file.name);
}, function() {
res.json(404, {error: 'Asset not found'});
});
}
);
app.post(program.url,
[express.json(), express.multipart()],
/**
* POST asset meta information and data
*
* @param {Object} req
* @prop {Object} files Uploaded files
*
* @returns {json} Meta information about asset
*/
function(req, res) {
assets
.fromUploads(req.files)
.done(function() {
res.send('');
}, function(err) {
res.send(500, {error: 'Upload failed', raw_error: err});
});
}
);
app.put(program.url + '/:id',
[express.json(), express.multipart()],
/**
* PUT asset meta information
*
* @param {Object} req
* @prop {Object} params
* @prop {string} params.id The asset ID
* @prop {Object} files Uploaded files
*
* @returns {json} Meta information about asset
*/
function(req, res) {
assets.put(req.params.id, file);
}
);
app.delete(program.url + '/:id',
/**
* DELETE asset
*
* @param {Object} req
* @prop {Object} params
* @prop {string} params.id The asset ID
*
* @returns {json} Meta information about asset
*/
function(req, res) {
assets.delete(req.params.id);
}
);
assets
.init(true)
.then(function() {
app.listen(3001);
console.log('Listening on port 3001');
});<|fim▁end|> | |
<|file_name|>local.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::prelude::*;
use alloc::owned::Box;
use local_ptr;
use task::Task;
/// Encapsulates some task-local data.
pub trait Local<Borrowed> {
fn put(value: Box<Self>);
fn take() -> Box<Self>;
fn try_take() -> Option<Box<Self>>;
fn exists(unused_value: Option<Self>) -> bool;
fn borrow(unused_value: Option<Self>) -> Borrowed;
unsafe fn unsafe_take() -> Box<Self>;
unsafe fn unsafe_borrow() -> *mut Self;
unsafe fn try_unsafe_borrow() -> Option<*mut Self>;
}
#[allow(visible_private_types)]
impl Local<local_ptr::Borrowed<Task>> for Task {
#[inline]
fn put(value: Box<Task>) { unsafe { local_ptr::put(value) } }
#[inline]
fn take() -> Box<Task> { unsafe { local_ptr::take() } }
#[inline]
fn try_take() -> Option<Box<Task>> { unsafe { local_ptr::try_take() } }
fn exists(_: Option<Task>) -> bool { local_ptr::exists() }
#[inline]
fn borrow(_: Option<Task>) -> local_ptr::Borrowed<Task> {
unsafe {
local_ptr::borrow::<Task>()
}
}
#[inline]
unsafe fn unsafe_take() -> Box<Task> { local_ptr::unsafe_take() }
#[inline]
unsafe fn unsafe_borrow() -> *mut Task { local_ptr::unsafe_borrow() }
#[inline]
unsafe fn try_unsafe_borrow() -> Option<*mut Task> {
local_ptr::try_unsafe_borrow()
}
}
#[cfg(test)]
mod test {
use std::prelude::*;
use std::rt::thread::Thread;
use super::*;
use task::Task;
#[test]
fn thread_local_task_smoke_test() {
Thread::start(proc() {
let task = box Task::new();
Local::put(task);
let task: Box<Task> = Local::take();
cleanup_task(task);
}).join();
}
#[test]
fn thread_local_task_two_instances() {
Thread::start(proc() {
let task = box Task::new();
Local::put(task);
let task: Box<Task> = Local::take();
cleanup_task(task);
let task = box Task::new();
Local::put(task);
let task: Box<Task> = Local::take();
cleanup_task(task);
}).join();
}
#[test]
fn borrow_smoke_test() {
Thread::start(proc() {
let task = box Task::new();
Local::put(task);
unsafe {
let _task: *mut Task = Local::unsafe_borrow();
}
let task: Box<Task> = Local::take();
cleanup_task(task);
}).join();
}
#[test]
fn borrow_with_return() {
Thread::start(proc() {
let task = box Task::new();
Local::put(task);
{
let _ = Local::borrow(None::<Task>);
}
let task: Box<Task> = Local::take();
cleanup_task(task);
}).join();
}
#[test]
fn try_take() {
Thread::start(proc() {
let task = box Task::new();
Local::put(task);<|fim▁hole|>
cleanup_task(t);
}).join();
}
fn cleanup_task(mut t: Box<Task>) {
t.destroyed = true;
}
}<|fim▁end|> |
let t: Box<Task> = Local::try_take().unwrap();
let u: Option<Box<Task>> = Local::try_take();
assert!(u.is_none()); |
<|file_name|>GuessTypeParameters.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2000-2013 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.codeInsight.daemon.impl.quickfix;
import com.intellij.codeInsight.ExpectedTypeInfo;
import com.intellij.codeInsight.ExpectedTypesProvider;
import com.intellij.codeInsight.intention.impl.TypeExpression;
import com.intellij.codeInsight.template.TemplateBuilder;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.project.Project;
import com.intellij.psi.*;
import com.intellij.psi.search.GlobalSearchScope;
import com.intellij.psi.util.PsiTreeUtil;
import com.intellij.util.IncorrectOperationException;
import com.intellij.util.containers.ContainerUtil;
import org.jetbrains.annotations.Nullable;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* @author ven
*/
public class GuessTypeParameters {
private final JVMElementFactory myFactory;
private static final Logger LOG = Logger.getInstance("#com.intellij.codeInsight.daemon.impl.quickfix.GuessTypeParameters");
public GuessTypeParameters(JVMElementFactory factory) {
myFactory = factory;
}
private List<PsiType> matchingTypeParameters (PsiType[] paramVals, PsiTypeParameter[] params, ExpectedTypeInfo info) {
PsiType type = info.getType();
int kind = info.getKind();
List<PsiType> result = new ArrayList<PsiType>();
for (int i = 0; i < paramVals.length; i++) {
PsiType val = paramVals[i];
if (val != null) {
switch (kind) {
case ExpectedTypeInfo.TYPE_STRICTLY:
if (val.equals(type)) result.add(myFactory.createType(params[i]));
break;
case ExpectedTypeInfo.TYPE_OR_SUBTYPE:
if (type.isAssignableFrom(val)) result.add(myFactory.createType(params[i]));
break;
case ExpectedTypeInfo.TYPE_OR_SUPERTYPE:
if (val.isAssignableFrom(type)) result.add(myFactory.createType(params[i]));
break;
}
}
}
return result;
}
public void setupTypeElement (PsiTypeElement typeElement, ExpectedTypeInfo[] infos, PsiSubstitutor substitutor,
TemplateBuilder builder, @Nullable PsiElement context, PsiClass targetClass) {
LOG.assertTrue(typeElement.isValid());
ApplicationManager.getApplication().assertWriteAccessAllowed();
PsiManager manager = typeElement.getManager();
GlobalSearchScope scope = typeElement.getResolveScope();
Project project = manager.getProject();
if (infos.length == 1 && substitutor != null && substitutor != PsiSubstitutor.EMPTY) {
ExpectedTypeInfo info = infos[0];
Map<PsiTypeParameter, PsiType> map = substitutor.getSubstitutionMap();
PsiType[] vals = map.values().toArray(PsiType.createArray(map.size()));
PsiTypeParameter[] params = map.keySet().toArray(new PsiTypeParameter[map.size()]);
List<PsiType> types = matchingTypeParameters(vals, params, info);
if (!types.isEmpty()) {
ContainerUtil.addAll(types, ExpectedTypesProvider.processExpectedTypes(infos, new MyTypeVisitor(manager, scope), project));
builder.replaceElement(typeElement, new TypeExpression(project, types.toArray(PsiType.createArray(types.size()))));
return;
}
else {
PsiElementFactory factory = JavaPsiFacade.getInstance(manager.getProject()).getElementFactory();
PsiType type = info.getType();
PsiType defaultType = info.getDefaultType();
try {
PsiTypeElement inplaceTypeElement = ((PsiVariable)factory.createVariableDeclarationStatement("foo", type, null).getDeclaredElements()[0]).getTypeElement();
PsiSubstitutor rawingSubstitutor = getRawingSubstitutor (context, targetClass);
int substitionResult = substituteToTypeParameters(typeElement, inplaceTypeElement, vals, params, builder, rawingSubstitutor, true);
if (substitionResult != SUBSTITUTED_NONE) {
if (substitionResult == SUBSTITUTED_IN_PARAMETERS) {
PsiJavaCodeReferenceElement refElement = typeElement.getInnermostComponentReferenceElement();
LOG.assertTrue(refElement != null && refElement.getReferenceNameElement() != null);
type = getComponentType(type);
LOG.assertTrue(type != null);<|fim▁hole|> defaultType = getComponentType(defaultType);
LOG.assertTrue(defaultType != null);
ExpectedTypeInfo info1 = ExpectedTypesProvider.createInfo(((PsiClassType)defaultType).rawType(),
ExpectedTypeInfo.TYPE_STRICTLY,
((PsiClassType)defaultType).rawType(),
info.getTailType());
MyTypeVisitor visitor = new MyTypeVisitor(manager, scope);
builder.replaceElement(refElement.getReferenceNameElement(),
new TypeExpression(project, ExpectedTypesProvider.processExpectedTypes(new ExpectedTypeInfo[]{info1}, visitor, project)));
}
return;
}
}
catch (IncorrectOperationException e) {
LOG.error(e);
}
}
}
PsiType[] types = infos.length == 0 ? new PsiType[] {typeElement.getType()} : ExpectedTypesProvider.processExpectedTypes(infos, new MyTypeVisitor(manager, scope), project);
builder.replaceElement(typeElement,
new TypeExpression(project, types));
}
private static PsiSubstitutor getRawingSubstitutor(PsiElement context, PsiClass targetClass) {
if (context == null || targetClass == null) return PsiSubstitutor.EMPTY;
PsiTypeParameterListOwner currContext = PsiTreeUtil.getParentOfType(context, PsiTypeParameterListOwner.class);
PsiManager manager = context.getManager();
PsiSubstitutor substitutor = PsiSubstitutor.EMPTY;
while (currContext != null && !manager.areElementsEquivalent(currContext, targetClass)) {
PsiTypeParameter[] typeParameters = currContext.getTypeParameters();
substitutor = JavaPsiFacade.getInstance(context.getProject()).getElementFactory().createRawSubstitutor(substitutor, typeParameters);
currContext = currContext.getContainingClass();
}
return substitutor;
}
@Nullable
private static PsiClassType getComponentType (PsiType type) {
type = type.getDeepComponentType();
if (type instanceof PsiClassType) return (PsiClassType)type;
return null;
}
private static final int SUBSTITUTED_NONE = 0;
private static final int SUBSTITUTED_IN_REF = 1;
private static final int SUBSTITUTED_IN_PARAMETERS = 2;
private int substituteToTypeParameters (PsiTypeElement typeElement,
PsiTypeElement inplaceTypeElement,
PsiType[] paramVals,
PsiTypeParameter[] params,
TemplateBuilder builder,
PsiSubstitutor rawingSubstitutor,
boolean toplevel) {
PsiType type = inplaceTypeElement.getType();
List<PsiType> types = new ArrayList<PsiType>();
for (int i = 0; i < paramVals.length; i++) {
PsiType val = paramVals[i];
if (val == null) return SUBSTITUTED_NONE;
if (type.equals(val)) {
types.add(myFactory.createType(params[i]));
}
}
if (!types.isEmpty()) {
Project project = typeElement.getProject();
PsiType substituted = rawingSubstitutor.substitute(type);
if (!CommonClassNames.JAVA_LANG_OBJECT.equals(substituted.getCanonicalText()) && (toplevel || substituted.equals(type))) {
types.add(substituted);
}
builder.replaceElement(typeElement, new TypeExpression(project, types.toArray(PsiType.createArray(types.size()))));
return toplevel ? SUBSTITUTED_IN_REF : SUBSTITUTED_IN_PARAMETERS;
}
boolean substituted = false;
PsiJavaCodeReferenceElement ref = typeElement.getInnermostComponentReferenceElement();
PsiJavaCodeReferenceElement inplaceRef = inplaceTypeElement.getInnermostComponentReferenceElement();
if (ref != null) {
LOG.assertTrue(inplaceRef != null);
PsiTypeElement[] innerTypeElements = ref.getParameterList().getTypeParameterElements();
PsiTypeElement[] inplaceInnerTypeElements = inplaceRef.getParameterList().getTypeParameterElements();
for (int i = 0; i < innerTypeElements.length; i++) {
substituted |= substituteToTypeParameters(innerTypeElements[i], inplaceInnerTypeElements[i], paramVals, params, builder,
rawingSubstitutor, false) != SUBSTITUTED_NONE;
}
}
return substituted ? SUBSTITUTED_IN_PARAMETERS : SUBSTITUTED_NONE;
}
public static class MyTypeVisitor extends PsiTypeVisitor<PsiType> {
private final GlobalSearchScope myResolveScope;
private final PsiManager myManager;
public MyTypeVisitor(PsiManager manager, GlobalSearchScope resolveScope) {
myManager = manager;
myResolveScope = resolveScope;
}
@Override
public PsiType visitType(PsiType type) {
if (type.equals(PsiType.NULL)) return PsiType.getJavaLangObject(myManager, myResolveScope);
return type;
}
@Override
public PsiType visitCapturedWildcardType(PsiCapturedWildcardType capturedWildcardType) {
return capturedWildcardType.getUpperBound().accept(this);
}
}
}<|fim▁end|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.