filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_16728 | # -*- coding: utf-8 -*-
import numpy as np
"""
This script is for outputting PC1/PC2/PC3 data from preprocd_dataset.npz
of MD samples
"""
def makePC123(dtsetfile, outfile, grpname):
dtset= np.load(dtsetfile, allow_pickle=True)
#allow_pickle op is for adapting spec change of numpy 1.16.3 and later
dts= dtset['dataset']
dataset0=[]
for dt in dts:
dt0=dt['inputs/0']
dataset0.append(dt0)
dim0=len(dataset0)
dim1=len(dataset0[0])
dim2=len(dataset0[0][0])
with open(outfile, 'w') as f1:
for dt64 in dataset0:
for dt in dt64:
wdt=str(dt[0])+" "+str(dt[1])+" "+str(dt[2])+"\n"
f1.write(wdt)
print(f'Saved PC1/PC2/PC3 data of {grpname}: Shape= {dim0} x {dim1} x {dim2}')
if __name__ == '__main__':
mdfolder="/home/okugawa/HDNNP/Si-190808-md"
outfolder=mdfolder+"/result/PC123/"
grps=['1000K','1200K']
for grp in grps:
for j in range(1,11):
grpname=grp+"-"+str(j)
dtsetdir=mdfolder+"/"+grp+"/"+str(j)
dtsetfile=dtsetdir+"/data/CrystalSi64/preprocd_dataset.npz"
outfile=outfolder+grpname+"-PC123.txt"
makePC123(dtsetfile, outfile, grpname)
|
the-stack_0_16730 | from pytorch_lightning.callbacks import ModelCheckpoint
def compute_receptive_field(kernel_pattern, dilation_pattern):
""" Compute the receptive field in samples."""
rf = 1
for kernel_size, dilation in zip(kernel_pattern, dilation_pattern):
rf += (kernel_size-1) * dilation
return rf
def to_np(x):
return x.detach().cpu().squeeze().numpy()
class CheckpointSaverCallback(ModelCheckpoint):
def on_keyboard_interrupt(self, trainer, pl_module):
print('CheckpointSaverCallback - Keyboard Interrupt. Best model path, best model score', self.best_model_path, self.best_model_score)
pl_module.logger.experiment.log_model(f'best_model', self.best_model_path)
pl_module.logger.experiment.log_parameter("best_model_path", self.best_model_path)
pl_module.logger.experiment.end()
def on_train_start(self, trainer, pl_module):
super(CheckpointSaverCallback, self).on_train_start(trainer, pl_module)
trainable_parameters = sum(p.numel() for p in pl_module.parameters() if p.requires_grad)
pl_module.logger.experiment.log_parameter("trainable_params", trainable_parameters)
# save before training
local_model_path = pl_module.logger.save_dir+f"/checkpoints/epoch0.ckpt"
trainer.save_checkpoint(local_model_path)
pl_module.logger.experiment.log_model(f'epoch0', local_model_path)
def on_train_end(self, trainer, pl_module):
print('CheckpointSaverCallback - Train End. Best model path, best model score', self.best_model_path, self.best_model_score)
super(CheckpointSaverCallback, self).on_train_end(trainer, pl_module)
pl_module.logger.experiment.log_model(f'best_model', self.best_model_path)
pl_module.logger.experiment.log_parameter("best_model_path", self.best_model_path)
pl_module.logger.experiment.end()
def on_validation_end(self, trainer, pl_module):
super(CheckpointSaverCallback, self).on_validation_end(trainer, pl_module)
epoch = pl_module.current_epoch
if epoch in [1,2,3,5,10,25,50,75,100,150,200,500,750,1000,1500,2000]:
print(f'Epoch {epoch}: Saving checkpoint, logging histogram.')
local_model_path = pl_module.logger.save_dir+f"/checkpoints/epoch{epoch}.ckpt"
trainer.save_checkpoint(local_model_path)
pl_module.logger.experiment.log_model(f'epoch{epoch}', local_model_path)
|
the-stack_0_16731 | #!/usr/bin/env python
'''
Python WebSocket library with support for "wss://" encryption.
Copyright 2011 Joel Martin
Licensed under LGPL version 3 (see docs/LICENSE.LGPL-3)
Supports following protocol versions:
- http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-75
- http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
- http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-10
You can make a cert/key with openssl using:
openssl req -new -x509 -days 365 -nodes -out self.pem -keyout self.pem
as taken from http://docs.python.org/dev/library/ssl.html#certificates
'''
import os, sys, time, errno, signal, socket, traceback, select
import array, struct
from base64 import b64encode, b64decode
# Imports that vary by python version
# python 3.0 differences
if sys.hexversion > 0x3000000:
b2s = lambda buf: buf.decode('latin_1')
s2b = lambda s: s.encode('latin_1')
s2a = lambda s: s
else:
b2s = lambda buf: buf # No-op
s2b = lambda s: s # No-op
s2a = lambda s: [ord(c) for c in s]
try: from io import StringIO
except: from cStringIO import StringIO
try: from http.server import SimpleHTTPRequestHandler
except: from SimpleHTTPServer import SimpleHTTPRequestHandler
# python 2.6 differences
try: from hashlib import md5, sha1
except: from md5 import md5; from sha import sha as sha1
# python 2.5 differences
try:
from struct import pack, unpack_from
except:
from struct import pack
def unpack_from(fmt, buf, offset=0):
slice = buffer(buf, offset, struct.calcsize(fmt))
return struct.unpack(fmt, slice)
# Degraded functionality if these imports are missing
for mod, sup in [('numpy', 'HyBi protocol'), ('ssl', 'TLS/SSL/wss'),
('multiprocessing', 'Multi-Processing'),
('resource', 'daemonizing')]:
try:
globals()[mod] = __import__(mod)
except ImportError:
globals()[mod] = None
print("WARNING: no '%s' module, %s is slower or disabled" % (
mod, sup))
if multiprocessing and sys.platform == 'win32':
# make sockets pickle-able/inheritable
import multiprocessing.reduction
class WebSocketServer(object):
"""
WebSockets server class.
Must be sub-classed with new_client method definition.
"""
buffer_size = 65536
server_handshake_hixie = """HTTP/1.1 101 Web Socket Protocol Handshake\r
Upgrade: WebSocket\r
Connection: Upgrade\r
%sWebSocket-Origin: %s\r
%sWebSocket-Location: %s://%s%s\r
"""
server_handshake_hybi = """HTTP/1.1 101 Switching Protocols\r
Upgrade: websocket\r
Connection: Upgrade\r
Sec-WebSocket-Accept: %s\r
"""
GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
policy_response = """<cross-domain-policy><allow-access-from domain="*" to-ports="*" /></cross-domain-policy>\n"""
# An exception before the WebSocket connection was established
class EClose(Exception):
pass
# An exception while the WebSocket client was connected
class CClose(Exception):
pass
def __init__(self, listen_host='', listen_port=None, source_is_ipv6=False,
verbose=False, cert='', key='', ssl_only=None,
daemon=False, record='', web='',
run_once=False, timeout=0, idle_timeout=0):
# settings
self.verbose = verbose
self.listen_host = listen_host
self.listen_port = listen_port
self.prefer_ipv6 = source_is_ipv6
self.ssl_only = ssl_only
self.daemon = daemon
self.run_once = run_once
self.timeout = timeout
self.idle_timeout = idle_timeout
self.launch_time = time.time()
self.ws_connection = False
self.handler_id = 1
# Make paths settings absolute
self.cert = os.path.abspath(cert)
self.key = self.web = self.record = ''
if key:
self.key = os.path.abspath(key)
if web:
self.web = os.path.abspath(web)
if record:
self.record = os.path.abspath(record)
if self.web:
os.chdir(self.web)
# Sanity checks
if not ssl and self.ssl_only:
raise Exception("No 'ssl' module and SSL-only specified")
if self.daemon and not resource:
raise Exception("Module 'resource' required to daemonize")
# Show configuration
print("WebSocket server settings:")
print(" - Listen on %s:%s" % (
self.listen_host, self.listen_port))
print(" - Flash security policy server")
if self.web:
print(" - Web server. Web root: %s" % self.web)
if ssl:
if os.path.exists(self.cert):
print(" - SSL/TLS support")
if self.ssl_only:
print(" - Deny non-SSL/TLS connections")
else:
print(" - No SSL/TLS support (no cert file)")
else:
print(" - No SSL/TLS support (no 'ssl' module)")
if self.daemon:
print(" - Backgrounding (daemon)")
if self.record:
print(" - Recording to '%s.*'" % self.record)
#
# WebSocketServer static methods
#
@staticmethod
def socket(host, port=None, connect=False, prefer_ipv6=False, unix_socket=None, use_ssl=False):
""" Resolve a host (and optional port) to an IPv4 or IPv6
address. Create a socket. Bind to it if listen is set,
otherwise connect to it. Return the socket.
"""
flags = 0
if host == '':
host = None
if connect and not (port or unix_socket):
raise Exception("Connect mode requires a port")
if use_ssl and not ssl:
raise Exception("SSL socket requested but Python SSL module not loaded.");
if not connect and use_ssl:
raise Exception("SSL only supported in connect mode (for now)")
if not connect:
flags = flags | socket.AI_PASSIVE
if not unix_socket:
addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM,
socket.IPPROTO_TCP, flags)
if not addrs:
raise Exception("Could not resolve host '%s'" % host)
addrs.sort(key=lambda x: x[0])
if prefer_ipv6:
addrs.reverse()
sock = socket.socket(addrs[0][0], addrs[0][1])
if connect:
sock.connect(addrs[0][4])
if use_ssl:
sock = ssl.wrap_socket(sock)
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(addrs[0][4])
sock.listen(100)
else:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(unix_socket)
return sock
@staticmethod
def daemonize(keepfd=None, chdir='/'):
os.umask(0)
if chdir:
os.chdir(chdir)
else:
os.chdir('/')
os.setgid(os.getgid()) # relinquish elevations
os.setuid(os.getuid()) # relinquish elevations
# Double fork to daemonize
if os.fork() > 0: os._exit(0) # Parent exits
os.setsid() # Obtain new process group
if os.fork() > 0: os._exit(0) # Parent exits
# Signal handling
def terminate(a,b): os._exit(0)
signal.signal(signal.SIGTERM, terminate)
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Close open files
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd == resource.RLIM_INFINITY: maxfd = 256
for fd in reversed(range(maxfd)):
try:
if fd != keepfd:
os.close(fd)
except OSError:
_, exc, _ = sys.exc_info()
if exc.errno != errno.EBADF: raise
# Redirect I/O to /dev/null
os.dup2(os.open(os.devnull, os.O_RDWR), sys.stdin.fileno())
os.dup2(os.open(os.devnull, os.O_RDWR), sys.stdout.fileno())
os.dup2(os.open(os.devnull, os.O_RDWR), sys.stderr.fileno())
@staticmethod
def unmask(buf, hlen, plen):
pstart = hlen + 4
pend = pstart + plen
if numpy:
b = c = s2b('')
if plen >= 4:
mask = numpy.frombuffer(buf, dtype=numpy.dtype('<u4'),
offset=hlen, count=1)
data = numpy.frombuffer(buf, dtype=numpy.dtype('<u4'),
offset=pstart, count=int(plen / 4))
#b = numpy.bitwise_xor(data, mask).data
b = numpy.bitwise_xor(data, mask).tostring()
if plen % 4:
#print("Partial unmask")
mask = numpy.frombuffer(buf, dtype=numpy.dtype('B'),
offset=hlen, count=(plen % 4))
data = numpy.frombuffer(buf, dtype=numpy.dtype('B'),
offset=pend - (plen % 4),
count=(plen % 4))
c = numpy.bitwise_xor(data, mask).tostring()
return b + c
else:
# Slower fallback
mask = buf[hlen:hlen+4]
data = array.array('B')
mask = s2a(mask)
data.fromstring(buf[pstart:pend])
for i in range(len(data)):
data[i] ^= mask[i % 4]
return data.tostring()
@staticmethod
def encode_hybi(buf, opcode, base64=False):
""" Encode a HyBi style WebSocket frame.
Optional opcode:
0x0 - continuation
0x1 - text frame (base64 encode buf)
0x2 - binary frame (use raw buf)
0x8 - connection close
0x9 - ping
0xA - pong
"""
if base64:
buf = b64encode(buf)
b1 = 0x80 | (opcode & 0x0f) # FIN + opcode
payload_len = len(buf)
if payload_len <= 125:
header = pack('>BB', b1, payload_len)
elif payload_len > 125 and payload_len < 65536:
header = pack('>BBH', b1, 126, payload_len)
elif payload_len >= 65536:
header = pack('>BBQ', b1, 127, payload_len)
#print("Encoded: %s" % repr(header + buf))
return header + buf, len(header), 0
@staticmethod
def decode_hybi(buf, base64=False):
""" Decode HyBi style WebSocket packets.
Returns:
{'fin' : 0_or_1,
'opcode' : number,
'masked' : boolean,
'hlen' : header_bytes_number,
'length' : payload_bytes_number,
'payload' : decoded_buffer,
'left' : bytes_left_number,
'close_code' : number,
'close_reason' : string}
"""
f = {'fin' : 0,
'opcode' : 0,
'masked' : False,
'hlen' : 2,
'length' : 0,
'payload' : None,
'left' : 0,
'close_code' : 1000,
'close_reason' : ''}
blen = len(buf)
f['left'] = blen
if blen < f['hlen']:
return f # Incomplete frame header
b1, b2 = unpack_from(">BB", buf)
f['opcode'] = b1 & 0x0f
f['fin'] = (b1 & 0x80) >> 7
f['masked'] = (b2 & 0x80) >> 7
f['length'] = b2 & 0x7f
if f['length'] == 126:
f['hlen'] = 4
if blen < f['hlen']:
return f # Incomplete frame header
(f['length'],) = unpack_from('>xxH', buf)
elif f['length'] == 127:
f['hlen'] = 10
if blen < f['hlen']:
return f # Incomplete frame header
(f['length'],) = unpack_from('>xxQ', buf)
full_len = f['hlen'] + f['masked'] * 4 + f['length']
if blen < full_len: # Incomplete frame
return f # Incomplete frame header
# Number of bytes that are part of the next frame(s)
f['left'] = blen - full_len
# Process 1 frame
if f['masked']:
# unmask payload
f['payload'] = WebSocketServer.unmask(buf, f['hlen'],
f['length'])
else:
print("Unmasked frame: %s" % repr(buf))
f['payload'] = buf[(f['hlen'] + f['masked'] * 4):full_len]
if base64 and f['opcode'] in [1, 2]:
try:
f['payload'] = b64decode(f['payload'])
except:
print("Exception while b64decoding buffer: %s" %
repr(buf))
raise
if f['opcode'] == 0x08:
if f['length'] >= 2:
f['close_code'] = unpack_from(">H", f['payload'])[0]
if f['length'] > 3:
f['close_reason'] = f['payload'][2:]
return f
@staticmethod
def encode_hixie(buf):
return s2b("\x00" + b2s(b64encode(buf)) + "\xff"), 1, 1
@staticmethod
def decode_hixie(buf):
end = buf.find(s2b('\xff'))
return {'payload': b64decode(buf[1:end]),
'hlen': 1,
'masked': False,
'length': end - 1,
'left': len(buf) - (end + 1)}
@staticmethod
def gen_md5(keys):
""" Generate hash value for WebSockets hixie-76. """
key1 = keys['Sec-WebSocket-Key1']
key2 = keys['Sec-WebSocket-Key2']
key3 = keys['key3']
spaces1 = key1.count(" ")
spaces2 = key2.count(" ")
num1 = int("".join([c for c in key1 if c.isdigit()])) / spaces1
num2 = int("".join([c for c in key2 if c.isdigit()])) / spaces2
return b2s(md5(pack('>II8s',
int(num1), int(num2), key3)).digest())
#
# WebSocketServer logging/output functions
#
def traffic(self, token="."):
""" Show traffic flow in verbose mode. """
if self.verbose and not self.daemon:
sys.stdout.write(token)
sys.stdout.flush()
def msg(self, msg):
""" Output message with handler_id prefix. """
if not self.daemon:
print("% 3d: %s" % (self.handler_id, msg))
def vmsg(self, msg):
""" Same as msg() but only if verbose. """
if self.verbose:
self.msg(msg)
#
# Main WebSocketServer methods
#
def send_frames(self, bufs=None):
""" Encode and send WebSocket frames. Any frames already
queued will be sent first. If buf is not set then only queued
frames will be sent. Returns the number of pending frames that
could not be fully sent. If returned pending frames is greater
than 0, then the caller should call again when the socket is
ready. """
tdelta = int(time.time()*1000) - self.start_time
if bufs:
for buf in bufs:
if self.version.startswith("hybi"):
if self.base64:
encbuf, lenhead, lentail = self.encode_hybi(
buf, opcode=1, base64=True)
else:
encbuf, lenhead, lentail = self.encode_hybi(
buf, opcode=2, base64=False)
else:
encbuf, lenhead, lentail = self.encode_hixie(buf)
if self.rec:
self.rec.write("%s,\n" %
repr("{%s{" % tdelta
+ encbuf[lenhead:len(encbuf)-lentail]))
self.send_parts.append(encbuf)
while self.send_parts:
# Send pending frames
buf = self.send_parts.pop(0)
sent = self.client.send(buf)
if sent == len(buf):
self.traffic("<")
else:
self.traffic("<.")
self.send_parts.insert(0, buf[sent:])
break
return len(self.send_parts)
def recv_frames(self):
""" Receive and decode WebSocket frames.
Returns:
(bufs_list, closed_string)
"""
closed = False
bufs = []
tdelta = int(time.time()*1000) - self.start_time
buf = self.client.recv(self.buffer_size)
if len(buf) == 0:
closed = {'code': 1000, 'reason': "Client closed abruptly"}
return bufs, closed
if self.recv_part:
# Add partially received frames to current read buffer
buf = self.recv_part + buf
self.recv_part = None
while buf:
if self.version.startswith("hybi"):
frame = self.decode_hybi(buf, base64=self.base64)
#print("Received buf: %s, frame: %s" % (repr(buf), frame))
if frame['payload'] == None:
# Incomplete/partial frame
self.traffic("}.")
if frame['left'] > 0:
self.recv_part = buf[-frame['left']:]
break
else:
if frame['opcode'] == 0x8: # connection close
closed = {'code': frame['close_code'],
'reason': frame['close_reason']}
break
else:
if buf[0:2] == s2b('\xff\x00'):
closed = {'code': 1000,
'reason': "Client sent orderly close frame"}
break
elif buf[0:2] == s2b('\x00\xff'):
buf = buf[2:]
continue # No-op
elif buf.count(s2b('\xff')) == 0:
# Partial frame
self.traffic("}.")
self.recv_part = buf
break
frame = self.decode_hixie(buf)
self.traffic("}")
if self.rec:
start = frame['hlen']
end = frame['hlen'] + frame['length']
if frame['masked']:
recbuf = WebSocketServer.unmask(buf, frame['hlen'],
frame['length'])
else:
recbuf = buf[frame['hlen']:frame['hlen'] +
frame['length']]
self.rec.write("%s,\n" %
repr("}%s}" % tdelta + recbuf))
bufs.append(frame['payload'])
if frame['left']:
buf = buf[-frame['left']:]
else:
buf = ''
return bufs, closed
def send_close(self, code=1000, reason=''):
""" Send a WebSocket orderly close frame. """
if self.version.startswith("hybi"):
msg = pack(">H%ds" % len(reason), code, reason)
buf, h, t = self.encode_hybi(msg, opcode=0x08, base64=False)
self.client.send(buf)
elif self.version == "hixie-76":
buf = s2b('\xff\x00')
self.client.send(buf)
# No orderly close for 75
def do_websocket_handshake(self, headers, path):
h = self.headers = headers
self.path = path
prot = 'WebSocket-Protocol'
protocols = h.get('Sec-'+prot, h.get(prot, '')).split(',')
ver = h.get('Sec-WebSocket-Version')
if ver:
# HyBi/IETF version of the protocol
# HyBi-07 report version 7
# HyBi-08 - HyBi-12 report version 8
# HyBi-13 reports version 13
if ver in ['7', '8', '13']:
self.version = "hybi-%02d" % int(ver)
else:
raise self.EClose('Unsupported protocol version %s' % ver)
key = h['Sec-WebSocket-Key']
# Choose binary if client supports it
if 'binary' in protocols:
self.base64 = False
elif 'base64' in protocols:
self.base64 = True
else:
raise self.EClose("Client must support 'binary' or 'base64' protocol")
# Generate the hash value for the accept header
accept = b64encode(sha1(s2b(key + self.GUID)).digest())
response = self.server_handshake_hybi % b2s(accept)
if self.base64:
response += "Sec-WebSocket-Protocol: base64\r\n"
else:
response += "Sec-WebSocket-Protocol: binary\r\n"
response += "\r\n"
else:
# Hixie version of the protocol (75 or 76)
if h.get('key3'):
trailer = self.gen_md5(h)
pre = "Sec-"
self.version = "hixie-76"
else:
trailer = ""
pre = ""
self.version = "hixie-75"
# We only support base64 in Hixie era
self.base64 = True
response = self.server_handshake_hixie % (pre,
h['Origin'], pre, self.scheme, h['Host'], path)
if 'base64' in protocols:
response += "%sWebSocket-Protocol: base64\r\n" % pre
else:
self.msg("Warning: client does not report 'base64' protocol support")
response += "\r\n" + trailer
return response
def do_handshake(self, sock, address):
"""
do_handshake does the following:
- Peek at the first few bytes from the socket.
- If the connection is Flash policy request then answer it,
close the socket and return.
- If the connection is an HTTPS/SSL/TLS connection then SSL
wrap the socket.
- Read from the (possibly wrapped) socket.
- If we have received a HTTP GET request and the webserver
functionality is enabled, answer it, close the socket and
return.
- Assume we have a WebSockets connection, parse the client
handshake data.
- Send a WebSockets handshake server response.
- Return the socket for this WebSocket client.
"""
stype = ""
ready = select.select([sock], [], [], 3)[0]
if not ready:
raise self.EClose("ignoring socket not ready")
# Peek, but do not read the data so that we have a opportunity
# to SSL wrap the socket first
handshake = sock.recv(1024, socket.MSG_PEEK)
#self.msg("Handshake [%s]" % handshake)
if handshake == "":
raise self.EClose("ignoring empty handshake")
elif handshake.startswith(s2b("<policy-file-request/>")):
# Answer Flash policy request
handshake = sock.recv(1024)
sock.send(s2b(self.policy_response))
raise self.EClose("Sending flash policy response")
elif handshake[0] in ("\x16", "\x80", 22, 128):
# SSL wrap the connection
if not ssl:
raise self.EClose("SSL connection but no 'ssl' module")
if not os.path.exists(self.cert):
raise self.EClose("SSL connection but '%s' not found"
% self.cert)
retsock = None
try:
retsock = ssl.wrap_socket(
sock,
server_side=True,
certfile=self.cert,
keyfile=self.key)
except ssl.SSLError:
_, x, _ = sys.exc_info()
if x.args[0] == ssl.SSL_ERROR_EOF:
if len(x.args) > 1:
raise self.EClose(x.args[1])
else:
raise self.EClose("Got SSL_ERROR_EOF")
else:
raise
self.scheme = "wss"
stype = "SSL/TLS (wss://)"
elif self.ssl_only:
raise self.EClose("non-SSL connection received but disallowed")
else:
retsock = sock
self.scheme = "ws"
stype = "Plain non-SSL (ws://)"
wsh = WSRequestHandler(retsock, address, not self.web)
if wsh.last_code == 101:
# Continue on to handle WebSocket upgrade
pass
elif wsh.last_code == 405:
raise self.EClose("Normal web request received but disallowed")
elif wsh.last_code < 200 or wsh.last_code >= 300:
raise self.EClose(wsh.last_message)
elif self.verbose:
raise self.EClose(wsh.last_message)
else:
raise self.EClose("")
response = self.do_websocket_handshake(wsh.headers, wsh.path)
self.msg("%s: %s WebSocket connection" % (address[0], stype))
self.msg("%s: Version %s, base64: '%s'" % (address[0],
self.version, self.base64))
if self.path != '/':
self.msg("%s: Path: '%s'" % (address[0], self.path))
# Send server WebSockets handshake response
#self.msg("sending response [%s]" % response)
retsock.send(s2b(response))
# Return the WebSockets socket which may be SSL wrapped
return retsock
#
# Events that can/should be overridden in sub-classes
#
def started(self):
""" Called after WebSockets startup """
self.vmsg("WebSockets server started")
def poll(self):
""" Run periodically while waiting for connections. """
#self.vmsg("Running poll()")
pass
def fallback_SIGCHLD(self, sig, stack):
# Reap zombies when using os.fork() (python 2.4)
self.vmsg("Got SIGCHLD, reaping zombies")
try:
result = os.waitpid(-1, os.WNOHANG)
while result[0]:
self.vmsg("Reaped child process %s" % result[0])
result = os.waitpid(-1, os.WNOHANG)
except (OSError):
pass
def do_SIGINT(self, sig, stack):
self.msg("Got SIGINT, exiting")
sys.exit(0)
def top_new_client(self, startsock, address):
""" Do something with a WebSockets client connection. """
# Initialize per client settings
self.send_parts = []
self.recv_part = None
self.base64 = False
self.rec = None
self.start_time = int(time.time()*1000)
# handler process
try:
try:
self.client = self.do_handshake(startsock, address)
if self.record:
# Record raw frame data as JavaScript array
fname = "%s.%s" % (self.record,
self.handler_id)
self.msg("opening record file: %s" % fname)
self.rec = open(fname, 'w+')
encoding = "binary"
if self.base64: encoding = "base64"
self.rec.write("var VNC_frame_encoding = '%s';\n"
% encoding)
self.rec.write("var VNC_frame_data = [\n")
self.ws_connection = True
self.new_client()
except self.CClose:
# Close the client
_, exc, _ = sys.exc_info()
if self.client:
self.send_close(exc.args[0], exc.args[1])
except self.EClose:
_, exc, _ = sys.exc_info()
# Connection was not a WebSockets connection
if exc.args[0]:
self.msg("%s: %s" % (address[0], exc.args[0]))
except Exception:
_, exc, _ = sys.exc_info()
self.msg("handler exception: %s" % str(exc))
if self.verbose:
self.msg(traceback.format_exc())
finally:
if self.rec:
self.rec.write("'EOF'];\n")
self.rec.close()
if self.client and self.client != startsock:
# Close the SSL wrapped socket
# Original socket closed by caller
self.client.close()
def new_client(self):
""" Do something with a WebSockets client connection. """
raise("WebSocketServer.new_client() must be overloaded")
def start_server(self):
"""
Daemonize if requested. Listen for for connections. Run
do_handshake() method for each connection. If the connection
is a WebSockets client then call new_client() method (which must
be overridden) for each new client connection.
"""
lsock = self.socket(self.listen_host, self.listen_port, False, self.prefer_ipv6)
if self.daemon:
self.daemonize(keepfd=lsock.fileno(), chdir=self.web)
self.started() # Some things need to happen after daemonizing
# Allow override of SIGINT
signal.signal(signal.SIGINT, self.do_SIGINT)
if not multiprocessing:
# os.fork() (python 2.4) child reaper
signal.signal(signal.SIGCHLD, self.fallback_SIGCHLD)
last_active_time = self.launch_time
while True:
try:
try:
self.client = None
startsock = None
pid = err = 0
child_count = 0
if multiprocessing and self.idle_timeout:
child_count = len(multiprocessing.active_children())
time_elapsed = time.time() - self.launch_time
if self.timeout and time_elapsed > self.timeout:
self.msg('listener exit due to --timeout %s'
% self.timeout)
break
if self.idle_timeout:
idle_time = 0
if child_count == 0:
idle_time = time.time() - last_active_time
else:
idle_time = 0
last_active_time = time.time()
if idle_time > self.idle_timeout and child_count == 0:
self.msg('listener exit due to --idle-timeout %s'
% self.idle_timeout)
break
try:
self.poll()
ready = select.select([lsock], [], [], 1)[0]
if lsock in ready:
startsock, address = lsock.accept()
else:
continue
except Exception:
_, exc, _ = sys.exc_info()
if hasattr(exc, 'errno'):
err = exc.errno
elif hasattr(exc, 'args'):
err = exc.args[0]
else:
err = exc[0]
if err == errno.EINTR:
self.vmsg("Ignoring interrupted syscall")
continue
else:
raise
if self.run_once:
# Run in same process if run_once
self.top_new_client(startsock, address)
if self.ws_connection :
self.msg('%s: exiting due to --run-once'
% address[0])
break
elif multiprocessing:
self.vmsg('%s: new handler Process' % address[0])
p = multiprocessing.Process(
target=self.top_new_client,
args=(startsock, address))
p.start()
# child will not return
else:
# python 2.4
self.vmsg('%s: forking handler' % address[0])
pid = os.fork()
if pid == 0:
# child handler process
self.top_new_client(startsock, address)
break # child process exits
# parent process
self.handler_id += 1
except KeyboardInterrupt:
_, exc, _ = sys.exc_info()
print("In KeyboardInterrupt")
pass
except SystemExit:
_, exc, _ = sys.exc_info()
print("In SystemExit")
break
except Exception:
_, exc, _ = sys.exc_info()
self.msg("handler exception: %s" % str(exc))
if self.verbose:
self.msg(traceback.format_exc())
finally:
if startsock:
startsock.close()
# HTTP handler with WebSocket upgrade support
class WSRequestHandler(SimpleHTTPRequestHandler):
def __init__(self, req, addr, only_upgrade=False):
self.only_upgrade = only_upgrade # only allow upgrades
SimpleHTTPRequestHandler.__init__(self, req, addr, object())
def do_GET(self):
if (self.headers.get('upgrade') and
self.headers.get('upgrade').lower() == 'websocket'):
if (self.headers.get('sec-websocket-key1') or
self.headers.get('websocket-key1')):
# For Hixie-76 read out the key hash
self.headers.__setitem__('key3', self.rfile.read(8))
# Just indicate that an WebSocket upgrade is needed
self.last_code = 101
self.last_message = "101 Switching Protocols"
elif self.only_upgrade:
# Normal web request responses are disabled
self.last_code = 405
self.last_message = "405 Method Not Allowed"
else:
SimpleHTTPRequestHandler.do_GET(self)
def send_response(self, code, message=None):
# Save the status code
self.last_code = code
SimpleHTTPRequestHandler.send_response(self, code, message)
def log_message(self, f, *args):
# Save instead of printing
self.last_message = f % args
|
the-stack_0_16732 | __author__ = 'Omry_Nachman'
from time import sleep
class Tap(object):
def __init__(self, pi_face, output_pin, open_value=True):
self.pi_face = pi_face
self.output_pin = output_pin
self.open_value = open_value
self.state = None
self.close()
def switch(self, open_tap=True):
self.state = open_tap
if open_tap:
self.pi_face.output_pins[self.output_pin].value = self.open_value
else:
self.pi_face.output_pins[self.output_pin].value = not self.open_value
def open(self):
self.switch(True)
def close(self):
self.switch(False)
def toggle(self):
self.switch(not self.state)
def flick(self, duration, return_lambda=False, off_on_off=True):
def execute():
self.switch(not off_on_off)
sleep(duration)
self.switch(off_on_off)
if return_lambda:
return execute
else:
execute()
class DCTap(Tap):
def __init__(self, pi_face, charge_discharge_pin=6, discharge_value=False, open_close_pin=7, open_value=False):
self.pi_face = pi_face
self.charge_discharge_pin = charge_discharge_pin
self.discharge_value = discharge_value
self.open_close_pin = open_close_pin
self.open_value = open_value
self.state = None
self.close()
def switch(self, open_tap=True):
self.state = open_tap
if open_tap:
direction = self.open_value
else:
direction = not self.open_value
self.pi_face.output_pins[self.charge_discharge_pin].value = not self.discharge_value
self.pi_face.output_pins[self.charge_discharge_pin].value = direction
sleep(0.1)
self.pi_face.output_pins[self.charge_discharge_pin].value = self.discharge_value
sleep(0.1)
self.pi_face.output_pins[self.charge_discharge_pin].value = not self.discharge_value
|
the-stack_0_16734 | import turtle as t
t.screensize(400,400,"black")
t.pensize(1)
t.speed(30)
for index in range(400):
if index % 4 in [1]:
t.pencolor("red")
elif index % 4 in [2]:
t.pencolor("orange")
elif index % 4 in [3]:
t.pencolor("purple")
else:
t.pencolor("blue")
t.fd(5+index*2)
t.left(91)
|
the-stack_0_16735 | from django.urls import path, include # noqa
from rest_framework.routers import DefaultRouter # noqa
from recipe import views # noqa
router = DefaultRouter()
router.register('tags', views.TagViewSet)
router.register('ingredients', views.IngredientViewSet)
router.register('recipes', views.RecipeViewSet)
app_name = 'recipe'
urlpatterns = [
path('', include(router.urls))
]
|
the-stack_0_16736 | import datetime
import typing
import uuid
from dataclasses_avroschema import AvroModel, fields
now = datetime.datetime.now()
PRIMITIVE_TYPES = (
(str, fields.STRING),
(int, fields.INT),
(bool, fields.BOOLEAN),
(float, fields.FLOAT),
# (bytes, "bytes"),
)
PRIMITIVE_TYPES_AND_DEFAULTS = (
(str, "test"),
(int, 1),
(bool, True),
(float, 10.4),
# (bytes, "test".encode()),
)
PRIMITIVE_TYPES_AND_INVALID_DEFAULTS = (
(str, 1),
(int, "test"),
(bool, 10),
(float, False),
# (bytes, "test".encode()),
)
LIST_TYPE_AND_ITEMS_TYPE = (
(str, "string"),
(int, "int"),
(bool, "boolean"),
(float, "float"),
(bytes, "bytes"),
)
LOGICAL_TYPES = (
(datetime.date, fields.LOGICAL_DATE, now.date()),
(datetime.time, fields.LOGICAL_TIME, now.time()),
(datetime.datetime, fields.LOGICAL_DATETIME, now),
(uuid.uuid4, fields.LOGICAL_UUID, uuid.uuid4()),
)
UNION_PRIMITIVE_ELEMENTS = (
((str, int), (fields.STRING, fields.INT)),
((str, None), (fields.STRING, fields.NULL)),
(
(datetime.date, datetime.datetime),
(fields.PYTHON_TYPE_TO_AVRO[datetime.date], fields.PYTHON_TYPE_TO_AVRO[datetime.datetime],),
),
((float, str, int), (fields.FLOAT, fields.STRING, fields.INT)),
((str, float, int, bool), (fields.STRING, fields.FLOAT, fields.INT, fields.BOOLEAN),),
)
SEQUENCE_TYPES = (typing.List, typing.Tuple, typing.Sequence, typing.MutableSequence)
MAPPING_TYPES = (typing.Dict, typing.Mapping, typing.MutableMapping)
SEQUENCES_AND_TYPES = (
(sequence, python_type, items_type) for sequence in SEQUENCE_TYPES for python_type, items_type in PRIMITIVE_TYPES
)
SEQUENCES_LOGICAL_TYPES = (
(sequence, python_type, items_type, value)
for sequence in SEQUENCE_TYPES
for python_type, items_type, value in LOGICAL_TYPES
)
MAPPING_AND_TYPES = (
(mapping, python_type, items_type) for mapping in MAPPING_TYPES for python_type, items_type in PRIMITIVE_TYPES
)
MAPPING_LOGICAL_TYPES = (
(mapping, python_type, items_type, value)
for mapping in MAPPING_TYPES
for python_type, items_type, value in LOGICAL_TYPES
)
# Represent the logical types
# (python_type, avro_internal_type, logical_type)
LOGICAL_TYPES_AND_DEFAULTS = (
(datetime.date, fields.INT, fields.DATE),
(datetime.time, fields.INT, fields.TIME_MILLIS),
(datetime.datetime, fields.LONG, fields.TIMESTAMP_MILLIS),
(uuid.uuid4, fields.STRING, fields.UUID),
)
LOGICAL_TYPES_AND_INVALID_DEFAULTS = (
(datetime.date, 1, None),
(datetime.time, "test", None),
(datetime.datetime, 10, None),
(uuid.uuid4, 10, f"Invalid default type. Default should be {str} or {uuid.UUID}"),
)
class User(AvroModel):
"User"
first_name: str
avro_user = {
"name": "User",
"type": "record",
"doc": "User",
"fields": [{"name": "first_name", "type": "string"}],
}
ARRAY_WITH_UNION_TYPES = (
(typing.Union[int, str], [fields.INT, fields.STRING], [10, 20, "test"]),
(typing.Union[int, str, User], [fields.INT, fields.STRING, avro_user], [10, 20, "test"],),
)
|
the-stack_0_16737 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import extension_info
from nova import exception
from nova import policy
from nova import test
from nova.tests.api.openstack import fakes
class fake_extension(object):
def __init__(self, name, alias, description, namespace, version):
self.name = name
self.alias = alias
self.__doc__ = description
self.namespace = namespace
self.version = version
fake_extensions = {
'ext1-alias': fake_extension('ext1', 'ext1-alias', 'ext1 description',
'ext1 namespace', 1),
'ext2-alias': fake_extension('ext2', 'ext2-alias', 'ext2 description',
'ext2 namespace', 2),
'ext3-alias': fake_extension('ext3', 'ext3-alias', 'ext3 description',
'ext3 namespace', 1)
}
def fake_policy_enforce(context, action, target, do_raise=True):
return True
def fake_policy_enforce_selective(context, action, target, do_raise=True):
if action == 'compute_extension:v3:ext1-alias:discoverable':
raise exception.NotAuthorized
else:
return True
class ExtensionInfoTest(test.TestCase):
def setUp(self):
super(ExtensionInfoTest, self).setUp()
ext_info = plugins.LoadedExtensionInfo()
ext_info.extensions = fake_extensions
self.controller = extension_info.ExtensionInfoController(ext_info)
def test_extension_info_list(self):
self.stubs.Set(policy, 'enforce', fake_policy_enforce)
req = fakes.HTTPRequestV3.blank('/extensions')
res_dict = self.controller.index(req)
self.assertEqual(3, len(res_dict['extensions']))
for e in res_dict['extensions']:
self.assertIn(e['alias'], fake_extensions)
self.assertEqual(e['name'], fake_extensions[e['alias']].name)
self.assertEqual(e['alias'], fake_extensions[e['alias']].alias)
self.assertEqual(e['description'],
fake_extensions[e['alias']].__doc__)
self.assertEqual(e['namespace'],
fake_extensions[e['alias']].namespace)
self.assertEqual(e['version'],
fake_extensions[e['alias']].version)
def test_extension_info_show(self):
self.stubs.Set(policy, 'enforce', fake_policy_enforce)
req = fakes.HTTPRequestV3.blank('/extensions/ext1-alias')
res_dict = self.controller.show(req, 'ext1-alias')
self.assertEqual(1, len(res_dict))
self.assertEqual(res_dict['extension']['name'],
fake_extensions['ext1-alias'].name)
self.assertEqual(res_dict['extension']['alias'],
fake_extensions['ext1-alias'].alias)
self.assertEqual(res_dict['extension']['description'],
fake_extensions['ext1-alias'].__doc__)
self.assertEqual(res_dict['extension']['namespace'],
fake_extensions['ext1-alias'].namespace)
self.assertEqual(res_dict['extension']['version'],
fake_extensions['ext1-alias'].version)
def test_extension_info_list_not_all_discoverable(self):
self.stubs.Set(policy, 'enforce', fake_policy_enforce_selective)
req = fakes.HTTPRequestV3.blank('/extensions')
res_dict = self.controller.index(req)
self.assertEqual(2, len(res_dict['extensions']))
for e in res_dict['extensions']:
self.assertNotEqual('ext1-alias', e['alias'])
self.assertIn(e['alias'], fake_extensions)
self.assertEqual(e['name'], fake_extensions[e['alias']].name)
self.assertEqual(e['alias'], fake_extensions[e['alias']].alias)
self.assertEqual(e['description'],
fake_extensions[e['alias']].__doc__)
self.assertEqual(e['namespace'],
fake_extensions[e['alias']].namespace)
self.assertEqual(e['version'],
fake_extensions[e['alias']].version)
|
the-stack_0_16739 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import functools
from copy import copy
from typing import List
from jinja2 import Environment
from .import_serializer import FileImportSerializer
from ..models import LROOperation, PagingOperation, CodeModel, OperationGroup
from .builder_serializer import get_operation_serializer, get_request_builder_serializer
class OperationGroupSerializer:
def __init__(
self,
code_model: CodeModel,
env: Environment,
operation_groups: List[OperationGroup],
async_mode: bool,
is_python_3_file: bool,
) -> None:
self.code_model = code_model
self.env = env
self.operation_groups = operation_groups
self.async_mode = async_mode
self.is_python_3_file = is_python_3_file
def serialize(self) -> str:
def _is_lro(operation):
return isinstance(operation, LROOperation)
def _is_paging(operation):
return isinstance(operation, PagingOperation)
operation_group_template = self.env.get_template("operations_container.py.jinja2")
if not self.code_model.options["combine_operation_files"] and self.operation_groups[0].is_empty_operation_group:
operation_group_template = self.env.get_template("operations_container_mixin.py.jinja2")
has_schemas = self.code_model.schemas or self.code_model.enums
# extract all operations from operation_groups
operaions_all = [operation for groups in self.operation_groups for operation in groups.operations]
operation_group_temp = copy(self.operation_groups[0])
operation_group_temp.operations = operaions_all
return operation_group_template.render(
code_model=self.code_model,
operation_groups=self.operation_groups,
imports=FileImportSerializer(
operation_group_temp.imports(
async_mode=self.async_mode,
has_schemas=bool(has_schemas)
), is_python_3_file=self.is_python_3_file
),
async_mode=self.async_mode,
is_python_3_file=self.is_python_3_file,
is_lro=_is_lro,
is_paging=_is_paging,
get_operation_serializer=functools.partial(
get_operation_serializer,
code_model=self.code_model,
async_mode=self.async_mode,
is_python_3_file=self.is_python_3_file,
),
request_builder_serializer=get_request_builder_serializer(
self.code_model, self.is_python_3_file,
),
)
|
the-stack_0_16740 | import bz2
import gzip
import re
import tarfile
import zipfile
from io import (
BytesIO,
StringIO
)
from galaxy import util
from galaxy.util.image_util import image_type
HTML_CHECK_LINES = 100
CHUNK_SIZE = 2 ** 15 # 32Kb
HTML_REGEXPS = (
re.compile(r"<A\s+[^>]*HREF[^>]+>", re.I),
re.compile(r"<IFRAME[^>]*>", re.I),
re.compile(r"<FRAMESET[^>]*>", re.I),
re.compile(r"<META[\W][^>]*>", re.I),
re.compile(r"<SCRIPT[^>]*>", re.I),
)
def check_html(name, file_path=True):
"""
Returns True if the file/string contains HTML code.
"""
# Handles files if file_path is True or text if file_path is False
if file_path:
temp = open(name, "r", encoding='utf-8')
else:
temp = StringIO(util.unicodify(name))
try:
for _ in range(HTML_CHECK_LINES):
line = temp.readline(CHUNK_SIZE)
if not line:
break
if any(regexp.search(line) for regexp in HTML_REGEXPS):
return True
except UnicodeDecodeError:
return False
finally:
temp.close()
return False
def check_binary(name, file_path=True):
# Handles files if file_path is True or text if file_path is False
if file_path:
temp = open(name, "rb")
else:
temp = BytesIO(name)
try:
return util.is_binary(temp.read(1024))
finally:
temp.close()
def check_gzip(file_path, check_content=True):
# This method returns a tuple of booleans representing ( is_gzipped, is_valid )
# Make sure we have a gzipped file
try:
with open(file_path, "rb") as temp:
magic_check = temp.read(2)
if magic_check != util.gzip_magic:
return (False, False)
except Exception:
return (False, False)
# We support some binary data types, so check if the compressed binary file is valid
# If the file is Bam, it should already have been detected as such, so we'll just check
# for sff format.
try:
with gzip.open(file_path, 'rb') as fh:
header = fh.read(4)
if header == b'.sff':
return (True, True)
except Exception:
return(False, False)
if not check_content:
return (True, True)
with gzip.open(file_path, mode='rb') as gzipped_file:
chunk = gzipped_file.read(CHUNK_SIZE)
# See if we have a compressed HTML file
if check_html(chunk, file_path=False):
return (True, False)
return (True, True)
def check_bz2(file_path, check_content=True):
try:
with open(file_path, "rb") as temp:
magic_check = temp.read(3)
if magic_check != util.bz2_magic:
return (False, False)
except Exception:
return(False, False)
if not check_content:
return (True, True)
with bz2.BZ2File(file_path, mode='rb') as bzipped_file:
chunk = bzipped_file.read(CHUNK_SIZE)
# See if we have a compressed HTML file
if check_html(chunk, file_path=False):
return (True, False)
return (True, True)
def check_zip(file_path, check_content=True, files=1):
if not zipfile.is_zipfile(file_path):
return (False, False)
if not check_content:
return (True, True)
chunk = None
for filect, member in enumerate(iter_zip(file_path)):
handle, name = member
chunk = handle.read(CHUNK_SIZE)
if chunk and check_html(chunk, file_path=False):
return (True, False)
if filect >= files:
break
return (True, True)
def is_bz2(file_path):
is_bz2, is_valid = check_bz2(file_path, check_content=False)
return is_bz2
def is_gzip(file_path):
is_gzipped, is_valid = check_gzip(file_path, check_content=False)
return is_gzipped
def is_zip(file_path):
is_zipped, is_valid = check_zip(file_path, check_content=False)
return is_zipped
def is_single_file_zip(file_path):
for i, _ in enumerate(iter_zip(file_path)):
if i > 1:
return False
return True
def is_tar(file_path):
return tarfile.is_tarfile(file_path)
def iter_zip(file_path):
with zipfile.ZipFile(file_path) as z:
for f in filter(lambda x: not x.endswith('/'), z.namelist()):
yield (z.open(f), f)
def check_image(file_path):
""" Simple wrapper around image_type to yield a True/False verdict """
if image_type(file_path):
return True
return False
__all__ = (
'check_binary',
'check_bz2',
'check_gzip',
'check_html',
'check_image',
'check_zip',
'is_gzip',
'is_bz2',
'is_zip',
)
|
the-stack_0_16741 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
from paddlers.models.ppseg.datasets import Dataset
from paddlers.models.ppseg.cvlibs import manager
from paddlers.models.ppseg.transforms import Compose
@manager.DATASETS.add_component
class Cityscapes(Dataset):
"""
Cityscapes dataset `https://www.cityscapes-dataset.com/`.
The folder structure is as follow:
cityscapes
|
|--leftImg8bit
| |--train
| |--val
| |--test
|
|--gtFine
| |--train
| |--val
| |--test
Make sure there are **labelTrainIds.png in gtFine directory. If not, please run the conver_cityscapes.py in tools.
Args:
transforms (list): Transforms for image.
dataset_root (str): Cityscapes dataset directory.
mode (str, optional): Which part of dataset to use. it is one of ('train', 'val', 'test'). Default: 'train'.
edge (bool, optional): Whether to compute edge while training. Default: False
"""
NUM_CLASSES = 19
def __init__(self, transforms, dataset_root, mode='train', edge=False):
self.dataset_root = dataset_root
self.transforms = Compose(transforms)
self.file_list = list()
mode = mode.lower()
self.mode = mode
self.num_classes = self.NUM_CLASSES
self.ignore_index = 255
self.edge = edge
if mode not in ['train', 'val', 'test']:
raise ValueError(
"mode should be 'train', 'val' or 'test', but got {}.".format(
mode))
if self.transforms is None:
raise ValueError("`transforms` is necessary, but it is None.")
img_dir = os.path.join(self.dataset_root, 'leftImg8bit')
label_dir = os.path.join(self.dataset_root, 'gtFine')
if self.dataset_root is None or not os.path.isdir(
self.dataset_root) or not os.path.isdir(
img_dir) or not os.path.isdir(label_dir):
raise ValueError(
"The dataset is not Found or the folder structure is nonconfoumance."
)
label_files = sorted(
glob.glob(
os.path.join(label_dir, mode, '*',
'*_gtFine_labelTrainIds.png')))
img_files = sorted(
glob.glob(os.path.join(img_dir, mode, '*', '*_leftImg8bit.png')))
self.file_list = [[
img_path, label_path
] for img_path, label_path in zip(img_files, label_files)]
|
the-stack_0_16742 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path as osp
import PIL
import numpy as np
import scipy.sparse
from lib.model.utils.config import cfg
import pdb
ROOT_DIR = osp.join(osp.dirname(__file__), '..', '..')
class imdb(object):
"""Image database."""
def __init__(self, name, classes=None):
self._name = name
self._num_classes = 0
if not classes:
self._classes = []
else:
self._classes = classes
self._image_index = []
self._obj_proposer = 'gt'
self._roidb = None
self._roidb_handler = self.default_roidb
# Use this dict for storing dataset specific config options
self.config = {}
@property
def name(self):
return self._name
@property
def num_classes(self):
return len(self._classes)
@property
def classes(self):
return self._classes
@property
def image_index(self):
return self._image_index
@property
def roidb_handler(self):
return self._roidb_handler
@roidb_handler.setter
def roidb_handler(self, val):
self._roidb_handler = val
def set_proposal_method(self, method):
method = eval('self.' + method + '_roidb')
self.roidb_handler = method
@property
def roidb(self):
# A roidb is a list of dictionaries, each with the following keys:
# boxes
# gt_overlaps
# gt_classes
# flipped
if self._roidb is not None:
return self._roidb
self._roidb = self.roidb_handler()
return self._roidb
@property
def cache_path(self):
cache_path = osp.abspath(osp.join(cfg.DATA_DIR, 'cache'))
if not os.path.exists(cache_path):
os.makedirs(cache_path)
return cache_path
@property
def num_images(self):
return len(self.image_index)
def image_path_at(self, i):
raise NotImplementedError
def image_id_at(self, i):
raise NotImplementedError
def default_roidb(self):
raise NotImplementedError
def evaluate_detections(self, all_boxes, output_dir=None):
"""
all_boxes is a list of length number-of-classes.
Each list element is a list of length number-of-images.
Each of those list elements is either an empty list []
or a numpy array of detection.
all_boxes[class][image] = [] or np.array of shape #dets x 5
"""
raise NotImplementedError
def _get_widths(self):
return [PIL.Image.open(self.image_path_at(i)).size[0]
for i in range(self.num_images)]
def append_flipped_images(self):
num_images = self.num_images
widths = self._get_widths()
for i in range(num_images):
boxes = self.roidb[i]['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
boxes[:, 0] = widths[i] - oldx2 - 1
boxes[:, 2] = widths[i] - oldx1 - 1
assert (boxes[:, 2] >= boxes[:, 0]).all()
entry = {'boxes': boxes,
'gt_overlaps': self.roidb[i]['gt_overlaps'],
'gt_classes': self.roidb[i]['gt_classes'],
'flipped': True}
self.roidb.append(entry)
self._image_index = self._image_index * 2
def create_roidb_from_box_list(self, box_list, gt_roidb):
assert len(box_list) == self.num_images, \
'Number of boxes must match number of ground-truth images'
roidb = []
for i in range(self.num_images):
boxes = box_list[i]
num_boxes = boxes.shape[0]
overlaps = np.zeros((num_boxes, self.num_classes), dtype=np.float32)
if gt_roidb is not None and gt_roidb[i]['boxes'].size > 0:
gt_boxes = gt_roidb[i]['boxes']
gt_classes = gt_roidb[i]['gt_classes']
gt_overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
argmaxes = gt_overlaps.argmax(axis=1)
maxes = gt_overlaps.max(axis=1)
I = np.where(maxes > 0)[0]
overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]
overlaps = scipy.sparse.csr_matrix(overlaps)
roidb.append({
'boxes': boxes,
'gt_classes': np.zeros((num_boxes,), dtype=np.int32),
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': np.zeros((num_boxes,), dtype=np.float32),
})
return roidb
@staticmethod
def merge_roidbs(a, b):
assert len(a) == len(b)
for i in range(len(a)):
a[i]['boxes'] = np.vstack((a[i]['boxes'], b[i]['boxes']))
a[i]['gt_classes'] = np.hstack((a[i]['gt_classes'],
b[i]['gt_classes']))
a[i]['gt_overlaps'] = scipy.sparse.vstack([a[i]['gt_overlaps'],
b[i]['gt_overlaps']])
a[i]['seg_areas'] = np.hstack((a[i]['seg_areas'],
b[i]['seg_areas']))
return a
def competition_mode(self, on):
"""Turn competition mode on or off."""
pass
|
the-stack_0_16743 | from __future__ import absolute_import, division, print_function
import os
import time
import pandas as pd
import numpy as np
import seaborn as sns
from collections import Counter
import matplotlib.pyplot as plt
from sklearn.externals import joblib
from sklearn.preprocessing import Normalizer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import QuantileTransformer
from sklearn.decomposition import PCA
from src.meteoro_skills import CategoricalScores
from src.meteoro_skills import ContinuousScores
import tensorflow as tf
from tensorflow import keras
from keras import backend
from tensorflow.keras import layers
from keras.layers import GaussianNoise
from keras.layers import GaussianDropout
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
#from keras.models import model_from_yaml
from keras.models import load_model
print('TF version '+tf.__version__)
# ------------------------------------------------------------------------------
def tic():
global _start_time
_start_time = time.time()
def tac():
t_sec = round(time.time() - _start_time)
(t_min, t_sec) = divmod(t_sec, 60)
(t_hour, t_min) = divmod(t_min, 60)
print('Time passed: {}hour:{}min:{}sec'.format(t_hour, t_min, t_sec))
def mean_squared_error(y_test, y_pred):
return K.mean(K.square(y_pred - y_test), axis=-1)
# ------------------------------------------------------------------------------
class Training:
"""
This module is intended to automate the TensorFlow Neural Network training.
"""
PCA = PCA()
seed = 0
run_prefix = ''
version = ''
vernick = ''
file = ''
path = ''
fig_title = ''
path_fig = ''
mod_out_pth = ''
mod_out_name = ''
def __init__(self, random_seed=0,
run_prefix='',
version='',
version_nickname='',
csv_entry='',
csv_path='',
figure_path='',
model_out_path='',
model_out_name=''):
self.run_prefix = run_prefix
self.seed = random_seed
self.version = version
self.vernick = version_nickname
self.file = csv_entry
self.path = csv_path
self.path_fig = figure_path
self.fig_title = run_prefix + version + version_nickname
self.mod_out_pth = model_out_path
self.mod_out_name = model_out_name
# -------------------------------------------------------------------------
# DROP DATA OUTSIDE INTERVAL
# -------------------------------------------------------------------------
@staticmethod
def keep_interval(keepfrom: 0.0, keepto: 1.0, dataframe, target_col: str):
keepinterval = np.where((dataframe[target_col] >= keepfrom) &
(dataframe[target_col] <= keepto))
result = dataframe.iloc[keepinterval]
return result
# -------------------------------------------------------------------------
# BUILD MODELS DEFINITIONS : CLAS = CLASSIFICATION and REG = REGRESSION
# -------------------------------------------------------------------------
@staticmethod
def build_class_model():
'''
Fucntion to create the instance and configuration of the keras
model(Sequential and Dense).
'''
# Create the Keras model:
model = Sequential()
model.add(Dense(8, input_dim=4, kernel_initializer='uniform', activation='relu'))
model.add(Dense(2, kernel_initializer='uniform', activation='relu'))
model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='SGD', metrics=['accuracy'],)
return model
@staticmethod
def build_reg_model(input_size):
'''
Fucntion to create the instance and configuration of the keras
model(Sequential and Dense).
'''
model = Sequential()
model.add(GaussianNoise(0.01, input_shape=(input_size,)))
model.add(Dense(24, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error',
optimizer='adam',
metrics=['mean_absolute_error', 'mean_squared_error'])
return model
# -------------------------------------------------------------------------
# EXECUTION OF READING INPUT ATTRIBUTES, SCALING, PCA, SPLIT AND RUN MODEL!
# -------------------------------------------------------------------------
def autoExecClass(self):
# Fix random seed for reproducibility:
np.random.seed(self.seed)
# Load dataset:
df = pd.read_csv(os.path.join(self.path, self.file), sep=',', decimal='.')
x, y= df.loc[:,['36V', '89V', '166V', '190V']], df.loc[:,['TagRain']]
x_arr = np.asanyarray(x)
y_arr = np.asanyarray(y)
y_arr = np.ravel(y_arr)
# Scaling the input paramaters:
# scaler_min_max = MinMaxScaler()
norm_sc = Normalizer()
x_normalized= norm_sc.fit_transform(x_arr)
# Split the dataset in test and train samples:
x_train, x_test, y_train, y_test = train_test_split(x_normalized,
y_arr, test_size=0.10,
random_state=101)
# Create the instance for KerasRegressor:
model=self.build_class_model()
tic()
#------------------------------------------------------------------------------
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(x_train, y_train,
epochs=EPOCHS, validation_split=0.2, batch_size=10,
verbose=0, callbacks=[PrintDot()])
print(history.history.keys())
# ------------------------------------------------------------------------------
# Visualize the model's training progress using the stats
# stored in the history object.
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
# ------------------------------------------------------------------------------
# Saving model to YAML:
# model_yaml = model.to_yaml()
# with open(self.mod_out_pth + self.mod_out_name + '.yaml', 'w') as yaml_file:
# yaml_file.write(model_yaml)
#
# # serialize weights to HDF5
# model.save_weights(self.mod_out_pth + self.mod_out_name + '.h5')
# print("Saved model to disk")
# tac()
# Saving the complete model in HDF5:
model.save(self.mod_out_pth + self.mod_out_name + '.h5')
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
def autoExecReg(self):
# Fix random seed for reproducibility:
np.random.seed(self.seed)
# ------------------------------------------------------------------------------
df_orig = pd.read_csv(os.path.join(self.path, self.file), sep=',', decimal='.')
df_input = df_orig.loc[:, ['10V', '10H', '18V', '18H', '36V', '36H', '89V', '89H',
'166V', '166H', '183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',
'lat']]
colunas = ['10V', '10H', '18V', '18H', '36V', '36H', '89V', '89H',
'166V', '166H', '183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',
'lat']
scaler = StandardScaler()
normed_input = scaler.fit_transform(df_input)
df_normed_input = pd.DataFrame(normed_input[:],
columns=colunas)
ancillary = df_normed_input.loc[:, ['183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',
'lat']]
# regions=df_orig.loc[:,['R1','R2','R3','R4','R5']]
# ------------------------------------------------------------------------------
# Choosing the number of components:
TB1 = df_normed_input.loc[:, ['10V', '10H', '18V', '18H']]
TB2 = df_normed_input.loc[:, ['36V', '36H', '89V', '89H', '166V', '166H']]
# ------------------------------------------------------------------------------
# Verifying the number of components that most contribute:
pca = self.PCA
pca1 = pca.fit(TB1)
plt.plot(np.cumsum(pca1.explained_variance_ratio_))
plt.xlabel('Number of components for TB1')
plt.ylabel('Cumulative explained variance');
plt.savefig(self.path_fig + self.version + 'PCA_TB1.png')
# ---
pca_trans1 = PCA(n_components=2)
pca1 = pca_trans1.fit(TB1)
TB1_transformed = pca_trans1.transform(TB1)
print("original shape: ", TB1.shape)
print("transformed shape:", TB1_transformed.shape)
# ------------------------------------------------------------------------------
pca = PCA()
pca2 = pca.fit(TB2)
plt.plot(np.cumsum(pca2.explained_variance_ratio_))
plt.xlabel('Number of components for TB2')
plt.ylabel('Cumulative explained variance');
plt.savefig(self.path_fig + self.version + 'PCA_TB2.png')
# ---
pca_trans2 = PCA(n_components=2)
pca2 = pca_trans2.fit(TB2)
TB2_transformed = pca_trans2.transform(TB2)
print("original shape: ", TB2.shape)
print("transformed shape:", TB2_transformed.shape)
# ------------------------------------------------------------------------------
# JOIN THE TREATED VARIABLES IN ONE SINGLE DATASET AGAIN:
PCA1 = pd.DataFrame(TB1_transformed[:],
columns=['pca1_1', 'pca_2'])
PCA2 = pd.DataFrame(TB2_transformed[:],
columns=['pca2_1', 'pca2_2'])
dataset = PCA1.join(PCA2, how='right')
dataset = dataset.join(ancillary, how='right')
dataset = dataset.join(df_orig.loc[:, ['sfcprcp']], how='right')
# ------------------------------------------------------------------------------
dataset = self.keep_interval(0.2, 75, dataset, 'sfcprcp')
# ----------------------------------------
# SUBSET BY SPECIFIC CLASS (UNDERSAMPLING)
# n = 0.98
# to_remove = np.random.choice(
# dataset.index,
# size=int(dataset.shape[0] * n),
# replace=False)
# dataset = dataset.drop(to_remove)
# ------------------------------------------------------------------------------
# Split the data into train and test
# Now split the dataset into a training set and a test set.
# We will use the test set in the final evaluation of our model.
train_dataset = dataset.sample(frac=0.8, random_state=0)
test_dataset = dataset.drop(train_dataset.index)
# ------------------------------------------------------------------------------
# Inspect the data:
# Have a quick look at the joint distribution of a few pairs of columns from the training set.
colunas = list(dataset.columns.values)
# ------------------------------------------------------------------------------
# Also look at the overall statistics:
train_stats = train_dataset.describe()
train_stats.pop("sfcprcp")
train_stats = train_stats.transpose()
# ------------------------------------------------------------------------------
# Split features from labels:
# Separate the target value, or "label", from the features.
# This label is the value that you will train the model to predict.
y_train = train_dataset.pop('sfcprcp')
y_test = test_dataset.pop('sfcprcp')
# ------------------------------------------------------------------------------
# Normalize the data:
scaler = StandardScaler()
normed_train_data = scaler.fit_transform(train_dataset)
normed_test_data = scaler.fit_transform(test_dataset)
# ------------------------------------------------------------------------------
# Build the model:
model = self.build_reg_model(len(train_dataset.keys()))
# ------------------------------------------------------------------------------
# Inspect the model:
# Use the .summary method to print a simple description of the model
model.summary()
# ------------------------------------------------------------------------------
# It seems to be working, and it produces a result
# of the expected shape and type.
# Train the model:
# Train the model for 1000 epochs, and record the training
# and validation accuracy in the history object.
# ------------------------------------------------------------------------------
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, y_train,
epochs=EPOCHS, validation_split=0.2, verbose=0,
callbacks=[PrintDot()])
print(history.history.keys())
# ------------------------------------------------------------------------------
# Visualize the model's training progress using the stats
# stored in the history object.
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
self.plot_history(history)
# ------------------------------------------------------------------------------
model = self.build_reg_model(len(train_dataset.keys()))
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(normed_train_data, y_train, epochs=EPOCHS,
validation_split=0.2, verbose=0, callbacks=[early_stop, PrintDot()])
# ------------------------------------------------------------------------------
# Ploting again, but with the EarlyStopping apllied:
self.plot_history_EarlyStopping(history)
# The graph shows that on the validation set, the average error
# is usually around +/- 2 MPG. Is this good?
# We'll leave that decision up to you.
# ------------------------------------------------------------------------------
# Let's see how well the model generalizes by using
# the test set, which we did not use when training the model.
# This tells us how well we can expect the model to predict
# when we use it in the real world.
loss, mae, mse = model.evaluate(normed_test_data, y_test, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} sfcprcp".format(mae))
#------------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Make predictions
# Finally, predict SFCPRCP values using data in the testing set:
test_predictions = model.predict(normed_test_data).flatten()
# Appplying meteorological skills to verify the performance of the TRAIN/TESTE model, in this case, continous scores:
skills = ContinuousScores()
val_y_pred_mean, val_y_test_mean, val_mae, val_rmse, val_std, val_fseperc, val_fse, val_corr, val_num_pixels = skills.metrics(y_test, test_predictions)
#converting to text file
print("converting arrays to text files")
my_scores = {'val_y_pred_mean': val_y_pred_mean,
'val_y_test_mean': val_y_test_mean,
'val_mae': val_mae,
'val_rmse': val_rmse,
'val_std': val_std,
'val_fseperc': val_fseperc,
'val_fse': val_fse,
'val_corr': val_corr,
'val_num_pixels': val_num_pixels}
with open(self.path_fig+'continuous_scores_TEST_TRAIN_'+self.version+'.txt', 'w') as myfile:
myfile.write(str(my_scores))
print("Text file saved!")
plt.figure()
plt.scatter(y_test, test_predictions)
plt.xlabel('True Values [sfcprcp]')
plt.ylabel('Predictions [sfcprcp]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0, plt.xlim()[1]])
plt.ylim([0, plt.ylim()[1]])
plt.plot([-100, 100], [-100, 100])
fig_name = self.fig_title + "_plot_scatter_y_test_vs_y_pred.png"
plt.savefig(self.path_fig + fig_name)
plt.clf()
#------------------------------------------------------------------------------
ax = plt.gca()
ax.plot(y_test,test_predictions, 'o', c='blue', alpha=0.07, markeredgecolor='none')
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlabel('True Values [sfcprcp]')
ax.set_ylabel('Predictions [sfcprcp]')
plt.plot([-100, 100], [-100, 100])
fig_name = self.fig_title + "_plot_scatter_LOG_y_test_vs_y_pred.png"
plt.savefig(self.path_fig+fig_name)
plt.clf()
#------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# It looks like our model predicts reasonably well.
# Let's take a look at the error distribution.
error = test_predictions - y_test
plt.hist(error, bins=25)
plt.xlabel("Prediction Error [sfcprcp]")
plt.ylabel("Count")
fig_name = self.fig_title + "_prediction_error.png"
plt.savefig(self.path_fig + fig_name)
plt.clf()
# ------------------------------------------------------------------------------
# HISTROGRAM 2D
plt.hist2d(y_test, test_predictions, cmin=1, bins=(50, 50), cmap=plt.cm.jet, range=np.array([(0.2, 110), (0.2, 110)]))
plt.axis('equal')
plt.axis('square')
plt.plot([0, 100], [0, 100], ls="--", c=".3")
plt.xlim([0, max(y_test)])
plt.ylim([0, max(y_test)])
plt.colorbar()
plt.xlabel("Observed rain rate (mm/h) - Training")
plt.ylabel("Predicted rain rate (mm/h) - Training")
fig_name = self.fig_title + "_hist2D.png"
plt.savefig(self.path_fig + fig_name)
plt.clf()
# ------------------------------------------------------------------------------
# Saving model to YAML:
model_yaml = model.to_yaml()
with open(self.mod_out_pth + self.mod_out_name + '.yaml', 'w') as yaml_file:
yaml_file.write(model_yaml)
# serialize weights to HDF5
model.save_weights(self.mod_out_pth + self.mod_out_name + '.h5')
print("Saved model to disk")
# Saving the complete model in HDF5:
model.save(self.mod_out_pth + self.mod_out_name + '_tf.h5')
# -------------------------------------------------------------------------
# FUNCTIONS TO MAKE PLOTS ABOUT TRAINING:
# -------------------------------------------------------------------------
def plot_history(self, history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [sfcprcp]')
plt.plot(hist['epoch'], hist['mean_absolute_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_absolute_error'],
label='Val Error')
ylim_max = hist.val_mean_absolute_error.max() + 10
plt.ylim([0, ylim_max])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$scfprcp^2$]')
plt.plot(hist['epoch'], hist['mean_squared_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_squared_error'],
label='Val Error')
ylim_max = hist.val_mean_squared_error.max() + 10
plt.ylim([0, ylim_max])
plt.legend()
# plt.show()
fig_name = self.fig_title + "_error_per_epochs_history.png"
plt.savefig(self.path_fig + fig_name)
def plot_history_EarlyStopping(self, history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [sfcprcp]')
plt.plot(hist['epoch'], hist['mean_absolute_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_absolute_error'],
label='Val Error')
ylim_max = hist.val_mean_absolute_error.max() + 10
plt.ylim([0, ylim_max])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$sfcprcp^2$]')
plt.plot(hist['epoch'], hist['mean_squared_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_squared_error'],
label='Val Error')
ylim_max = hist.val_mean_squared_error.max() + 10
plt.ylim([0, ylim_max])
plt.legend()
fig_name = self.fig_title + "_error_per_epochs_EarlyStopping.png"
plt.savefig(self.path_fig + fig_name)
|
the-stack_0_16746 | import logging
import random
import pytest
from ocs_ci.framework.pytest_customization.marks import aws_platform_required
from ocs_ci.framework.testlib import ManageTest, tier4, tier4b
from ocs_ci.ocs.exceptions import CommandFailed
from tests import sanity_helpers
logger = logging.getLogger(__name__)
@tier4
@tier4b
@pytest.mark.polarion_id("OCS-1287")
@aws_platform_required
@pytest.mark.skip(reason="az blocking method need to be fixed")
class TestAvailabilityZones(ManageTest):
"""
test availability zone failure:
test stages:
1. Select availability zone
2. In this availability zone, backup instances original security groups
3. block availability zone by attaching security group with no permissions
4. validate - cluster functionality and health
2a. health check - warning or error
2b. create cephfs, create rbd, create pvc (validate_cluster)
5. restore availability zone access
6. validate - cluster functionality and health
"""
@pytest.fixture(autouse=True)
def init_sanity(self):
"""
init Sanity() object
"""
self.sanity_helpers = sanity_helpers.Sanity()
@pytest.fixture()
def teardown(self, request, ec2_instances, aws_obj):
def finalizer():
current_sg = aws_obj.store_security_groups_for_instances(self.instances_in_az)
if self.original_sgs != current_sg:
aws_obj.restore_instances_access(self.security_group_id, self.original_sgs)
logger.info(f"Access to EC2 instances {self.instances_in_az} has been restored")
if self.security_group_id in aws_obj.get_all_security_groups():
logger.info(f"Deleting: {self.security_group_id}")
aws_obj.delete_security_group(self.security_group_id)
request.addfinalizer(finalizer)
def test_availability_zone_failure(
self, aws_obj, ec2_instances, pvc_factory, pod_factory, teardown
):
"""
Simulate AWS availability zone failure
"""
# Select instances in randomly chosen availability zone:
self.instances_in_az = self.random_availability_zone_selector(aws_obj, ec2_instances)
logger.info(f"AZ selected, Instances: {self.instances_in_az} to be blocked")
# Storing current security groups for selected instances:
self.original_sgs = aws_obj.store_security_groups_for_instances(self.instances_in_az)
logger.info(f"Original security groups of selected instances: {self.original_sgs}")
# Blocking instances:
self.security_group_id = self.block_aws_availability_zone(aws_obj, self.instances_in_az)
logger.info(f"Access to EC2 instances {self.instances_in_az} has been blocked")
# Check cluster's health, need to be unhealthy at that point
assert not self.check_cluster_health(), (
"Cluster is wrongly reported as healthy."
"EC2 Instances {self.instances_in_az} are blocked"
)
# Create resources
logger.info("Trying to create resources on un-healthy cluster")
self.sanity_helpers.create_resources(pvc_factory, pod_factory)
logger.info("Resources Created")
# Delete resources
logger.info("Trying to delete resources on un-healthy cluster")
self.sanity_helpers.delete_resources()
logger.info("Resources Deleted")
# Restore access for blocked instances
aws_obj.restore_instances_access(self.security_group_id, self.original_sgs)
logger.info(f"Access restores")
# Check cluster's health, need to be healthy at that point
assert self.check_cluster_health(), "Cluster is unhealthy"
def random_availability_zone_selector(self, aws_obj, ec2_instances):
"""
Get all instances within random availability zone
Args:
aws_obj (obj): aws.AWS() object
ec2_instances (dict): cluster ec2 instances objects
Returns:
list: instances_in_az
"""
random_az_selector = random.choice(list(ec2_instances.keys()))
random_az_selected = aws_obj.get_availability_zone_id_by_instance_id(random_az_selector)
instances_in_az = list()
for instance in ec2_instances.keys():
az = aws_obj.get_availability_zone_id_by_instance_id(instance)
if random_az_selected == az:
instances_in_az.append(instance)
return instances_in_az
def block_aws_availability_zone(self, aws_obj, instances_in_az):
"""
1. get vpc_id
2. create security group in this vpc
3. block availability zone by using "append_security_group"
Args:
aws_obj (obj): aws.AWS() object
instances_in_az (list): ec2_instances within selected availability zone
Returns:
security_group_id (str): Newly created security id without access permissions
"""
group_name = "TEST_SEC_GROUP"
dict_permissions = {'IpProtocol': 'tcp',
'FromPort': 80,
'ToPort': 80,
'IpRanges': [{'CidrIp': '1.1.1.1/32'}]}
vpc_id = aws_obj.get_vpc_id_by_instance_id(instances_in_az[0])
security_group_id = aws_obj.create_security_group(group_name, dict_permissions, vpc_id)
aws_obj.block_instances_access(security_group_id, instances_in_az)
return security_group_id
def check_cluster_health(self):
try:
self.sanity_helpers.health_check()
return True
except CommandFailed as e:
if "Unable to connect to the server" in str(e):
logger.warning(f"{e}, Cluster is not healthy")
return False
|
the-stack_0_16747 | # qubit number=3
# total number=13
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.x(input_qubit[1]) # number=8
prog.cx(input_qubit[0],input_qubit[1]) # number=10
prog.x(input_qubit[1]) # number=11
prog.cx(input_qubit[0],input_qubit[1]) # number=12
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =3962
writefile = open("../data/startQiskit_noisy354.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
the-stack_0_16748 | from dataclasses import dataclass, field
from typing import List
from .diagram_element import DiagramElement
from .point import Point
__NAMESPACE__ = "http://www.omg.org/spec/DD/20100524/DI"
@dataclass
class Edge(DiagramElement):
class Meta:
namespace = "http://www.omg.org/spec/DD/20100524/DI"
waypoint: List[Point] = field(
default_factory=list,
metadata={
"type": "Element",
"min_occurs": 2,
}
)
|
the-stack_0_16749 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2019. All Rights Reserved.
# pylint: disable=line-too-long
""" Python Module that exposes the ExtPackage class """
import logging
import os
from setuptools import sandbox as use_setuptools
from resilient_circuits.util.ext.ExtCreate import ExtCreate
# Get the same logger object that is used in resilient_circuits_cmd.py
LOG = logging.getLogger("resilient_circuits_cmd_logger")
# Constants
BASE_NAME_SETUP_PY = "setup.py"
BASE_NAME_DIST_DIR = "dist"
PATH_CUSTOMIZE_PY = os.path.join("util", "customize.py")
PATH_CONFIG_PY = os.path.join("util", "config.py")
PATH_ICON_EXTENSION_LOGO = os.path.join("icons", "extension_logo.png")
PATH_ICON_COMPANY_LOGO = os.path.join("icons", "company_logo.png")
class ExtPackage(ExtCreate):
""" ExtPackage is a subclass of ExtCreate. It exposes one
method: package_extension() """
@classmethod
def package_extension(cls, path_to_src, custom_display_name=None, keep_build_dir=False):
""" Function that creates The Extension.zip file from the give source path and returns
the path to the new Extension.zip
- path_to_src [String]: must include a setup.py, customize.py and config.py file.
- custom_display_name [String]: will give the Extension that display name. Default: name from setup.py file
- keep_build_dir [Boolean]: if True, dist/build/ will not be remove. Default: False
- The code will be packaged into a Built Distribution (.tar.gz) in the /dist directory
- The Extension.zip will also be produced in the /dist directory"""
# Ensure the src directory exists and we have WRITE access
cls.__validate_directory__(os.W_OK, path_to_src)
# Generate paths to files required to create extension
path_setup_py_file = os.path.join(path_to_src, BASE_NAME_SETUP_PY)
path_customize_py_file = os.path.join(path_to_src, os.path.basename(path_to_src), PATH_CUSTOMIZE_PY)
path_config_py_file = os.path.join(path_to_src, os.path.basename(path_to_src), PATH_CONFIG_PY)
path_output_dir = os.path.join(path_to_src, BASE_NAME_DIST_DIR)
path_extension_logo = os.path.join(path_to_src, PATH_ICON_EXTENSION_LOGO)
path_company_logo = os.path.join(path_to_src, PATH_ICON_COMPANY_LOGO)
LOG.info("Creating Built Distribution in /dist directory")
# Create the built distribution
use_setuptools.run_setup(setup_script=path_setup_py_file, args=["sdist", "--formats=gztar"])
LOG.info("Built Distribution (.tar.gz) created at: %s", path_output_dir)
# Create the extension
path_the_extension_zip = cls.create_extension(
path_setup_py_file=path_setup_py_file,
path_customize_py_file=path_customize_py_file,
path_config_py_file=path_config_py_file,
output_dir=path_output_dir,
custom_display_name=custom_display_name,
keep_build_dir=keep_build_dir,
path_extension_logo=path_extension_logo,
path_company_logo=path_company_logo
)
LOG.info("Extension created at: %s", path_the_extension_zip)
return path_the_extension_zip
|
the-stack_0_16751 | from __future__ import print_function
import sys
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QAction, QTableWidget,QTableWidgetItem,QVBoxLayout
import matplotlib as mpl
import matplotlib.pyplot as plt
import cv2 as cv
import numpy as np
import os
import pickle
import copy
import time
class ContourGraph(pg.GraphItem):
def __init__(self):
pg.GraphItem.__init__(self)
def setData(self, **kwds):
self.data = copy.deepcopy(kwds)
if 'pos' in self.data:
npts = self.data['pos'].shape[0]
self.data['data'] = np.empty(npts, dtype=[('index', int)])
self.data['data']['index'] = np.arange(npts)
pg.GraphItem.setData(self, **self.data)
class ContourViewWidget(pg.GraphicsWindow):
def __init__(self, parent=None):
pg.GraphicsWindow.__init__(self)
self.setParent(parent)
self.w_sub = self.addLayout(row=0,col=0)
self.mov_nr = 0
self.v_list = []
self.img_list = []
self.contour_list_dest_body = []
self.contour_list_dest_wing_L = []
self.contour_list_dest_wing_R = []
self.contour_list_init_body = []
self.contour_list_init_wing_L = []
self.contour_list_init_wing_R = []
self.contour_list_outer = []
self.contour_list_dest = []
self.contour_list_init = []
self.init_check = False
self.dest_check = False
self.src_check = False
def loadFLT(self,flt):
self.flt = flt
def setMovNR(self,mov_nr):
self.mov_nr = mov_nr-1
def set_init_view(self,check):
self.init_check = check
def set_dest_view(self,check):
self.dest_check = check
def set_src_view(self,check):
self.src_check = check
def add_frame(self,frame_nr):
self.flt.load_frame(self.mov_nr,frame_nr)
self.image_size = []
frame_list = self.flt.get_frame()
for i, frame in enumerate(frame_list):
self.image_size.append(np.array([frame.shape[0],frame.shape[1]]))
self.v_list.append(self.w_sub.addViewBox(row=1,col=i,lockAspect=True))
self.img_list.append(pg.ImageItem(np.transpose(np.flipud(frame))))
self.v_list[i].addItem(self.img_list[i])
self.v_list[i].disableAutoRange('xy')
self.v_list[i].autoRange()
def update_frame(self,frame_nr):
self.flt.load_frame(self.mov_nr,frame_nr)
frame_list = self.flt.get_frame()
for i, frame in enumerate(frame_list):
self.img_list[i].setImage(np.transpose(np.flipud(frame)))
self.update_contour()
def add_contour(self, frame_nr):
self.flt.load_frame(self.mov_nr,frame_nr)
self.flt.segment_frame()
self.flt.project_frame_2_pcl()
self.flt.find_initial_state()
dest_contour_list = self.flt.return_dest_contour()
init_contour_list = self.flt.return_init_contour()
# Create 10 empty contours in each window
for i, contour_list in enumerate(dest_contour_list):
self.contour_list_dest.append([])
for j in range(10):
self.contour_list_dest[i].append(pg.PlotCurveItem())
self.contour_list_dest[i][j].setData(x=np.asarray([0.0]),y=np.asarray([0.0]),pen=(0,0,255))
self.v_list[i].addItem(self.contour_list_dest[i][j])
for i, contour_list in enumerate(init_contour_list):
self.contour_list_init.append([])
for j in range(10):
self.contour_list_init[i].append(pg.PlotCurveItem())
self.contour_list_init[i][j].setData(x=np.asarray([0.0]),y=np.asarray([0.0]),pen=(0,0,255))
self.v_list[i].addItem(self.contour_list_init[i][j])
def update_contour(self):
self.flt.segment_frame()
self.flt.project_frame_2_pcl()
self.flt.find_initial_state()
color_list = [(0,0,255), (255,0,0), (0,255,0)]
dest_contour_list = self.flt.return_dest_contour()
init_contour_list = self.flt.return_init_contour()
for i, contour_list in enumerate(dest_contour_list):
N_items = len(contour_list)
for j in range(10):
if (j<N_items):
if (np.amax(contour_list[j][2,:])>0):
color_now = color_list[int(np.amax(contour_list[j][2,:])-1)]
self.contour_list_dest[i][j].setData(x=contour_list[j][0,:],y=self.image_size[i][1]-contour_list[j][1,:],pen=color_now)
else:
self.contour_list_dest[i][j].setData(x=np.asarray([0.0]),y=np.asarray([0.0]),pen=(0,0,255))
else:
self.contour_list_dest[i][j].setData(x=np.asarray([0.0]),y=np.asarray([0.0]),pen=(0,0,255))
for i, contour_list in enumerate(init_contour_list):
N_items = len(contour_list)
for j in range(10):
if (j<N_items):
if (np.amax(contour_list[j][2,:])>0):
color_now = color_list[int(np.amax(contour_list[j][2,:])-1)]
self.contour_list_init[i][j].setData(x=contour_list[j][0,:],y=self.image_size[i][1]-contour_list[j][1,:],pen=color_now)
else:
self.contour_list_init[i][j].setData(x=np.asarray([0.0]),y=np.asarray([0.0]),pen=(0,0,255))
else:
self.contour_list_init[i][j].setData(x=np.asarray([0.0]),y=np.asarray([0.0]),pen=(0,0,255)) |
the-stack_0_16752 | from .base import BaseRaveAPI # pylint: disable=E0401
import requests
import random
import base64
import hashlib
#from cryptography.fernet import Fernet # pylint: disable=E0401
from Cryptodome.Cipher import DES3
from .errors import RaveAPIError, InvalidDataError
try:
import json
except:
import simplejson as json
class Transaction(BaseRaveAPI):
def _handle_request(self, method, url, encrypted_payload=None):
"""Handles all requests: GET, POST, PUT, DELETE etc"""
if not encrypted_payload:
raise InvalidDataError("Error: You need to pass a valid payload")
try:
response = requests.request(method, url, headers=self._headers(), data=json.dumps(encrypted_payload))
return response
except Exception as e:
raise ValueError(e)
def _get_encrypt_key(self):
"""Implementing the getEncryptKey() from the base class"""
seckey = self._get_key()
hashedseckey = hashlib.md5(seckey.encode("utf-8")).hexdigest()
hashedseckeylast12 = hashedseckey[-12:]
seckeyadjusted = seckey.replace('FLWSECK-', '')
seckeyadjustedfirst12 = seckeyadjusted[:12]
return seckeyadjustedfirst12 + hashedseckeylast12
def encrypt_data(self, payloader):
"""Implementing the encrypt_data() from base class"""
blockSize = 8
key = self._get_encrypt_key()
plain_text = payloader.json_payload()
padDiff = blockSize - (len(plain_text) % blockSize) # Using this line as specified by the rave docs
cipher_suite = DES3.new(key, DES3.MODE_ECB)
plain_text = "{}{}".format(plain_text, "".join(chr(padDiff) * padDiff)).encode("utf8")
encrypted_data = base64.b64encode(cipher_suite.encrypt(plain_text))
data = {
'PBFPubKey': self._get_key(),
'client': encrypted_data.decode("utf8"),
'alg': '3DES-24'
}
return data
def initialize(self, payloader):
"""Implement the base class to initialize the payment
DESCRIPTION
METHOD: 'post'
ENDPOINT: 'charge'
RETURNS
response (dict): api response depending on card of the customer
"""
endpoint = 'charge'
method = 'POST'
url = self._url(endpoint)
payload = self.encrypt_data(payloader)
# process the transaction
try:
response = self._handle_request(method, url, encrypted_payload=payload)
import pdb; pdb.set_trace()
if not response.get('status', False):
raise RaveAPIError("There is a problem with your API configuration.\
contact Pastor, Emmanuel on [email protected]")
except Exception as e:
raise ValueError(e)
return response
|
the-stack_0_16753 | #----------------------------------------------------#
# 获取测试集的detection-result和images-optional
# 具体视频教程可查看
# https://www.bilibili.com/video/BV1zE411u7Vw
#----------------------------------------------------#
from frcnn import FRCNN
from PIL import Image
from torch.autograd import Variable
import torch
import numpy as np
import os
import torch.backends.cudnn as cudnn
from torch.nn import functional as F
from utils.utils import loc2bbox, nms, DecodeBox
from nets.frcnn import FasterRCNN
from nets.frcnn_training import get_new_img_size
from PIL import Image, ImageFont, ImageDraw
import copy
class mAP_FRCNN(FRCNN):
#---------------------------------------------------#
# 检测图片
#---------------------------------------------------#
def detect_image(self,image_id,image):
self.confidence = 0.05
f = open("./input/detection-results/"+image_id+".txt","w")
image_shape = np.array(np.shape(image)[0:2])
old_width = image_shape[1]
old_height = image_shape[0]
width,height = get_new_img_size(old_width,old_height)
image = image.resize([width,height])
photo = np.array(image,dtype = np.float32)/255
photo = np.transpose(photo, (2, 0, 1))
with torch.no_grad():
images = []
images.append(photo)
images = np.asarray(images)
images = torch.from_numpy(images).cuda()
roi_cls_locs, roi_scores, rois, roi_indices = self.model(images)
decodebox = DecodeBox(self.std, self.mean, self.num_classes)
outputs = decodebox.forward(roi_cls_locs, roi_scores, rois, height=height, width=width, score_thresh = self.confidence)
if len(outputs)==0:
return
bbox = outputs[:,:4]
conf = outputs[:, 4]
label = outputs[:, 5]
bbox[:, 0::2] = (bbox[:, 0::2])/width*old_width
bbox[:, 1::2] = (bbox[:, 1::2])/height*old_height
bbox = np.array(bbox,np.int32)
for i, c in enumerate(label):
predicted_class = self.class_names[int(c)]
score = str(conf[i])
left, top, right, bottom = bbox[i]
f.write("%s %s %s %s %s %s\n" % (predicted_class, score[:6], str(int(left)), str(int(top)), str(int(right)),str(int(bottom))))
f.close()
return
frcnn = mAP_FRCNN()
image_ids = open('VOCdevkit/VOC2007/ImageSets/Main/test.txt').read().strip().split()
if not os.path.exists("./input"):
os.makedirs("./input")
if not os.path.exists("./input/detection-results"):
os.makedirs("./input/detection-results")
if not os.path.exists("./input/images-optional"):
os.makedirs("./input/images-optional")
for image_id in image_ids:
image_path = "./VOCdevkit/VOC2007/JPEGImages/"+image_id+".jpg"
image = Image.open(image_path)
image.save("./input/images-optional/"+image_id+".jpg")
frcnn.detect_image(image_id,image)
print(image_id," done!")
print("Conversion completed!")
|
the-stack_0_16755 | # -*- coding: utf-8 -*-
from nbsite.shared_conf import *
project = u' '
authors = u'Panel contributors'
copyright = u'2018 ' + authors
description = 'High-level dashboarding for python visualization libraries'
import panel
version = release = str(panel.__version__)
html_static_path += ['_static']
html_theme = 'sphinx_ioam_theme'
html_theme_options = {
'logo': 'logo_horizontal.png',
'favicon': 'favicon.ico',
'css': 'site.css'
}
_NAV = (
('User Guide', 'user_guide/index'),
('About', 'about')
)
templates_path = ['_templates']
html_context.update({
'PROJECT': project,
'DESCRIPTION': description,
'AUTHOR': authors,
'WEBSITE_URL': 'https://panel.pyviz.org',
'WEBSITE_SERVER': 'https://panel.pyviz.org',
'VERSION': version,
'NAV': _NAV,
'LINKS': _NAV,
'SOCIAL': (
('Gitter', '//gitter.im/pyviz/pyviz'),
('Github', '//github.com/pyviz/panel'),
)
})
nbbuild_patterns_to_take_along = ["simple.html"]
|
the-stack_0_16757 | """Compilation of datasets for few-shot text classification.
Few-shot Text Classification with Distributional Signatures
Yujia Bao, Menghua Wu, Shiyu Chang and Regina Barzilay.
https://arxiv.org/pdf/1908.06039.pdf
@inproceedings{
bao2020fewshot,
title={Few-shot Text Classification with Distributional Signatures},
author={Yujia Bao and Menghua Wu and Shiyu Chang and Regina Barzilay},
booktitle={International Conference on Learning Representations},
year={2020}
}
"""
import os
import json
import numpy as np
from tqdm import tqdm
import torch
from torch.utils.data.dataset import Dataset
from collections import Counter, defaultdict
from transformers import RobertaTokenizer
class BaseFewShotTextDataset(Dataset):
def __init__(
self,
data_root,
n_ways=5,
n_shots=5,
n_queries=25,
split='train',
roberta_device='cpu',
fix_seed=42,
):
super().__init__()
self.data_root = data_root
self.cache_dir = os.path.realpath(os.path.join(self.data_root, '../cache'))
if not os.path.isdir(self.cache_dir):
os.makedirs(self.cache_dir)
self.split = split
self.n_ways = n_ways
self.n_shots = n_shots
self.n_queries = n_queries
self.roberta_device = roberta_device
self.rs = np.random.RandomState(fix_seed)
self.fix_seed = fix_seed
self.max_seq_len = 512
self.tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
self.vocab_size = self.tokenizer.vocab_size
self.pad_index = self.tokenizer.pad_token_id
self.mask_index = self.tokenizer.mask_token_id
self.mask_token = self.tokenizer.mask_token
print('loading data...')
data, self.classes = self.load_data()
# NOTE: no side information since we don't have anything special
# NOTE: no smlmt for simplicitly
self.tokens, self.masks, self.labels = self.process_data(data)
def make_classes(self):
raise NotImplementedError
def load_data(self):
train_classes, val_classes, test_classes = self.make_classes()
if self.split == 'train':
classes = train_classes
elif self.split == 'val':
classes = val_classes
elif self.split == 'test':
classes = test_classes
else:
raise Exception(f'split {self.split} not supported.')
all_data = _load_json(self.data_root)
# partition data with classes!
data = []
for example in all_data:
if example['label'] in classes:
data.append(example)
return data, classes
def process_data(self, data):
texts = [row['text'] for row in data]
labels = [row['label'] for row in data]
tokens, masks = [], []
for text in texts:
outputs = self.tokenizer(
' '.join(text),
truncation=True,
padding='max_length',
max_length=self.max_seq_len,
pad_to_max_length=True,
return_tensors='pt',
)
tokens.append(outputs['input_ids'])
masks.append(outputs['attention_mask'])
labels = np.array(labels)
return tokens, masks, labels
def prep_smlmt_task(self, data):
all_text = [row['text'] for row in data]
unique_text = []
for i in range(len(all_text)):
text_i = np.unique(all_text[i])
unique_text.append(text_i)
unique_text = np.concatenate(unique_text)
freqs = Counter(unique_text)
valid_words = []
for word, fr in freqs.items():
if fr >= (self.n_shots + self.n_queries):
valid_words.append(word)
# these are the tokens with enough
# labels to choose from!
smlmt_cats = np.array(valid_words)
# now we need to map each of these cats to
# the indices of sentences that contain them
smlmt_mapping = defaultdict(lambda: [])
pbar = tqdm(total=len(all_text))
for text in all_text:
tokens = set(text)
for word in smlmt_cats:
if word in tokens:
smlmt_mapping[word].append(text)
pbar.update()
pbar.close()
# maps valid category to all sequences containing it
return smlmt_mapping
def build_smlmt_task(self, smlmt_mapping, data):
smlmt_words = list(smlmt_mapping.keys())
words = self.rs.choice(smlmt_words, self.n_ways, replace=False)
data = []
for i, word in enumerate(words):
data_i = {}
toks_i = smlmt_mapping[word][:100] # at most 100
for text in toks_i:
# perform the masking of ALL instances
text = np.array(text)
text[text == word] = self.mask_token
text = text.tolist()
data_i['text'] = text
data_i['label'] = i
data.append(data_i)
return data
def __getitem__(self, index):
categories = self.rs.choice(self.classes, size=self.n_ways, replace=False)
task_tokens = []
task_masks = []
task_labels = []
for c in range(len(categories)):
category = categories[c]
indices = np.where(self.labels == category)[0]
should_replace = True if len(indices) < (self.n_shots+self.n_queries) else False
indices = self.rs.choice(indices, size=self.n_shots+self.n_queries, replace=should_replace)
# task_tokens_i : (n_shots+n_queries) x 512
task_tokens_i = torch.stack([self.tokens[ix] for ix in indices])
# task_masks_i : (n_shots+n_queries) x 512
task_masks_i = torch.stack([self.masks[ix] for ix in indices])
# task_labels_i : (n_shots+n_queries)
task_labels_i = torch.zeros(self.n_shots+self.n_queries).long() + c
task_tokens.append(task_tokens_i)
task_masks.append(task_masks_i)
task_labels.append(task_labels_i)
# task_tokens : n_ways x (n_shots+n_queries) x 512
task_tokens = torch.stack(task_tokens)
# task_masks : n_ways x (n_shots+n_queries) x 512
task_masks = torch.stack(task_masks)
# task_labels : n_ways x (n_shots+n_queries)
task_labels = torch.stack(task_labels)
# task_lengths : n_ways x (n_shots+n_queries)
task_lengths = torch.sum(task_masks, dim=2)
task_dict = dict(
support_toks=task_tokens[:, :self.n_shots].long(),
support_masks=task_masks[:, :self.n_shots].long(),
support_labs=task_labels[:, :self.n_shots].long(),
support_lens=task_lengths[:, :self.n_shots].long(),
# --
query_toks=task_tokens[:, -self.n_queries:].long(),
query_masks=task_masks[:, -self.n_queries:].long(),
query_labs=task_labels[:, -self.n_queries:].long(),
query_lens=task_lengths[:, -self.n_queries:].long(),
# --
task_type=0,
)
return task_dict
def num_episodes(self):
if self.split == 'train':
return 100
elif self.split == 'val':
return 100
elif self.split == 'test':
return 1000
else:
raise Exception(f'Split {self.split} not supported.')
def __len__(self): # number of episodes
return self.num_episodes()
class FewShot20News(BaseFewShotTextDataset):
LABEL_DICT = {
'talk.politics.mideast': 0,
'sci.space': 1,
'misc.forsale': 2,
'talk.politics.misc': 3,
'comp.graphics': 4,
'sci.crypt': 5,
'comp.windows.x': 6,
'comp.os.ms-windows.misc': 7,
'talk.politics.guns': 8,
'talk.religion.misc': 9,
'rec.autos': 10,
'sci.med': 11,
'comp.sys.mac.hardware': 12,
'sci.electronics': 13,
'rec.sport.hockey': 14,
'alt.atheism': 15,
'rec.motorcycles': 16,
'comp.sys.ibm.pc.hardware': 17,
'rec.sport.baseball': 18,
'soc.religion.christian': 19,
}
def make_classes(self):
train_classes = []
for key in self.LABEL_DICT.keys():
if key[:key.find('.')] in ['sci', 'rec']:
train_classes.append(self.LABEL_DICT[key])
val_classes = []
for key in self.LABEL_DICT.keys():
if key[:key.find('.')] in ['comp']:
val_classes.append(self.LABEL_DICT[key])
test_classes = []
for key in self.LABEL_DICT.keys():
if key[:key.find('.')] not in ['comp', 'sci', 'rec']:
test_classes.append(self.LABEL_DICT[key])
return train_classes, val_classes, test_classes
class FewShotAmazon(BaseFewShotTextDataset):
LABEL_DICT = {
'Amazon_Instant_Video': 0,
'Apps_for_Android': 1,
'Automotive': 2,
'Baby': 3,
'Beauty': 4,
'Books': 5,
'CDs_and_Vinyl': 6,
'Cell_Phones_and_Accessories': 7,
'Clothing_Shoes_and_Jewelry': 8,
'Digital_Music': 9,
'Electronics': 10,
'Grocery_and_Gourmet_Food': 11,
'Health_and_Personal_Care': 12,
'Home_and_Kitchen': 13,
'Kindle_Store': 14,
'Movies_and_TV': 15,
'Musical_Instruments': 16,
'Office_Products': 17,
'Patio_Lawn_and_Garden': 18,
'Pet_Supplies': 19,
'Sports_and_Outdoors': 20,
'Tools_and_Home_Improvement': 21,
'Toys_and_Games': 22,
'Video_Games': 23
}
def make_classes(self):
train_classes = [2, 3, 4, 7, 11, 12, 13, 18, 19, 20]
val_classes = [1, 22, 23, 6, 9]
test_classes = [0, 5, 14, 15, 8, 10, 16, 17, 21]
return train_classes, val_classes, test_classes
class FewShotHuffPost(BaseFewShotTextDataset):
def make_classes(self):
train_classes = list(range(20))
val_classes = list(range(20,25))
test_classes = list(range(25,41))
return train_classes, val_classes, test_classes
class FewShotRCV1(BaseFewShotTextDataset):
def make_classes(self):
train_classes = [1, 2, 12, 15, 18, 20, 22, 25, 27, 32, 33, 34, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
54, 55, 56, 57, 58, 59, 60, 61, 66]
val_classes = [5, 24, 26, 28, 29, 31, 35, 23, 67, 36]
test_classes = [0, 3, 4, 6, 7, 8, 9, 10, 11, 13, 14, 16, 17, 19, 21, 30, 37,
62, 63, 64, 65, 68, 69, 70]
return train_classes, val_classes, test_classes
class FewShotReuters(BaseFewShotTextDataset):
def make_classes(self):
train_classes = list(range(15))
val_classes = list(range(15,20))
test_classes = list(range(20,31))
return train_classes, val_classes, test_classes
class FewShotFewRel(BaseFewShotTextDataset):
def make_classes(self):
# head=WORK_OF_ART validation/test split
train_classes = [0, 1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15, 16, 19, 21,
22, 24, 25, 26, 27, 28, 30, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 41, 43, 44, 45, 46, 48, 49, 50, 52, 53, 56, 57, 58,
59, 61, 62, 63, 64, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75,
76, 77, 78]
val_classes = [7, 9, 17, 18, 20]
test_classes = [23, 29, 42, 47, 51, 54, 55, 60, 65, 79]
return train_classes, val_classes, test_classes
def _load_json(path, max_seq_len=512):
'''
load data file
@param path: str, path to the data file
@return data: list of examples
'''
label = {}
text_len = []
with open(path, 'r', errors='ignore') as f:
data = []
for line in f:
row = json.loads(line)
# count the number of examples per label
if int(row['label']) not in label:
label[int(row['label'])] = 1
else:
label[int(row['label'])] += 1
item = {'label': int(row['label']),
'text': row['text'][:max_seq_len]}
text_len.append(len(row['text']))
keys = ['head', 'tail', 'ebd_id']
for k in keys:
if k in row:
item[k] = row[k]
data.append(item)
return data
|
the-stack_0_16760 | import numpy as np
import random
from keras.preprocessing.sequence import pad_sequences
def readfile(filename):
'''
read file
return format :
[ ['EU', 'B-ORG'], ['rejects', 'O'], ['German', 'B-MISC'], ['call', 'O'], ['to', 'O'], ['boycott', 'O'], ['British', 'B-MISC'], ['lamb', 'O'], ['.', 'O'] ]
'''
f = open(filename)
sentences = []
sentence = []
for line in f:
if len(line)==0 or line.startswith('-DOCSTART') or line[0]=="\n":
if len(sentence) > 0:
sentences.append(sentence)
sentence = []
continue
splits = line.split(' ')
sentence.append([splits[0],splits[-1]])
if len(sentence) >0:
sentences.append(sentence)
sentence = []
return sentences
def getCasing(word, caseLookup):
casing = 'other'
numDigits = 0
for char in word:
if char.isdigit():
numDigits += 1
digitFraction = numDigits / float(len(word))
if word.isdigit(): #Is a digit
casing = 'numeric'
elif digitFraction > 0.5:
casing = 'mainly_numeric'
elif word.islower(): #All lower case
casing = 'allLower'
elif word.isupper(): #All upper case
casing = 'allUpper'
elif word[0].isupper(): #is a title, initial char upper, then all lower
casing = 'initialUpper'
elif numDigits > 0:
casing = 'contains_digit'
return caseLookup[casing]
def createBatches(data):
l = []
for i in data:
l.append(len(i[0]))
l = set(l)
batches = []
batch_len = []
z = 0
for i in l:
for batch in data:
if len(batch[0]) == i:
batches.append(batch)
z += 1
batch_len.append(z)
return batches,batch_len
def createBatches(data):
l = []
for i in data:
l.append(len(i[0]))
l = set(l)
batches = []
batch_len = []
z = 0
for i in l:
for batch in data:
if len(batch[0]) == i:
batches.append(batch)
z += 1
batch_len.append(z)
return batches,batch_len
def createMatrices(sentences, word2Idx, label2Idx, case2Idx,char2Idx):
unknownIdx = word2Idx['UNKNOWN_TOKEN']
paddingIdx = word2Idx['PADDING_TOKEN']
dataset = []
wordCount = 0
unknownWordCount = 0
for sentence in sentences:
wordIndices = []
caseIndices = []
charIndices = []
labelIndices = []
for word,char,label in sentence:
wordCount += 1
if word in word2Idx:
wordIdx = word2Idx[word]
elif word.lower() in word2Idx:
wordIdx = word2Idx[word.lower()]
else:
wordIdx = unknownIdx
unknownWordCount += 1
charIdx = []
for x in char:
charIdx.append(char2Idx[x])
#Get the label and map to int
wordIndices.append(wordIdx)
caseIndices.append(getCasing(word, case2Idx))
charIndices.append(charIdx)
labelIndices.append(label2Idx[label])
dataset.append([wordIndices, caseIndices, charIndices, labelIndices])
return dataset
def iterate_minibatches(dataset,batch_len):
start = 0
for i in batch_len:
tokens = []
caseing = []
char = []
labels = []
data = dataset[start:i]
start = i
for dt in data:
t,c,ch,l = dt
l = np.expand_dims(l,-1)
tokens.append(t)
caseing.append(c)
char.append(ch)
labels.append(l)
yield np.asarray(labels),np.asarray(tokens),np.asarray(caseing),np.asarray(char)
def addCharInformatioin(Sentences):
for i,sentence in enumerate(Sentences):
for j,data in enumerate(sentence):
chars = [c for c in data[0]]
Sentences[i][j] = [data[0],chars,data[1]]
return Sentences
def padding(Sentences):
maxlen = 52
for sentence in Sentences:
char = sentence[2]
for x in char:
maxlen = max(maxlen,len(x))
for i,sentence in enumerate(Sentences):
Sentences[i][2] = pad_sequences(Sentences[i][2],52,padding='post')
return Sentences
|
the-stack_0_16761 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. py:currentmodule:: map
.. moduleauthor:: Hendrix Demers <[email protected]>
Map used in the phase analysis module.
"""
###############################################################################
# Copyright 2016 Hendrix Demers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Standard library modules.
import logging
import os.path
import csv
# Third party modules.
import numpy as np
from PIL import Image
from scipy.ndimage import gaussian_filter
import matplotlib
import matplotlib.pyplot as plt
# Local modules.
# Project modules
# Globals and constants variables.
class PhaseMap(object):
def __init__(self, phase_map_name, phase_analysis, is_dilation_erosion=False):
self.phase_map_name = phase_map_name
self.phase_analysis = phase_analysis
self.is_dilation_erosion = is_dilation_erosion
self.phases = {}
def add_phase(self, phase, color_name, label=None):
if label is None:
label = phase.name
self.phases[label] = ([phase], color_name, True)
def add_phases(self, label, phases, color_name, union=True):
self.phases[label] = (phases, color_name, union)
def display_map(self, label=None, use_gaussian_filter=False, legend=None, display_now=True):
image = self.get_image(label)
plt.figure()
if label is not None:
plt.title(label)
plt.imshow(image, aspect='equal')
plt.axis('off')
if label is None:
if legend is None:
patches, labels = self.get_legend()
else:
patches, labels = legend
plt.figlegend(patches, labels, 'upper right')
if display_now:
self.show()
def display_no_phase_map(self, display_now=True):
image = self.get_no_phase_image()
plt.figure()
plt.imshow(image, aspect='equal')
plt.axis('off')
patches = [matplotlib.patches.Patch(color="black"),
matplotlib.patches.Patch(edgecolor='black', facecolor='white')]
labels = ["No phase", "Phases"]
plt.figlegend(patches, labels, 'upper right')
if display_now:
self.show()
def display_overlap_map(self, display_now=True):
image = self.get_overlap_phase_image()
plt.figure()
plt.imshow(image, aspect='equal')
plt.axis('off')
patches = [matplotlib.patches.Patch(edgecolor='black', facecolor='white')]
labels = ["Overlap phases"]
plt.figlegend(patches, labels, 'upper right')
if display_now:
self.show()
def show(self):
plt.show()
def save_map(self, figures_path, label=None, use_gaussian_filter=False, legend=None):
image = self.get_image(label)
plt.figure()
if label is not None:
plt.title(label)
plt.imshow(image, aspect='equal')
plt.axis('off')
if label is None:
if legend is None:
patches, labels = self.get_legend()
else:
patches, labels = legend
plt.figlegend(patches, labels, 'upper right')
if label is None:
label = "allphases"
file_path = os.path.join(figures_path, self.phase_map_name + label + ".png")
plt.savefig(file_path)
plt.close()
def save_no_phase_map(self, figures_path):
image = self.get_no_phase_image()
plt.figure()
plt.imshow(image, aspect='equal')
plt.axis('off')
patches = [matplotlib.patches.Patch(color="black"),
matplotlib.patches.Patch(edgecolor='black', facecolor='white')]
labels = ["No phase", "Phases"]
plt.figlegend(patches, labels, 'upper right')
file_path = os.path.join(figures_path, self.phase_map_name + "_nophase" + ".png")
plt.savefig(file_path)
plt.close()
def save_overlap_map(self, figures_path):
image = self.get_overlap_phase_image()
plt.figure()
plt.imshow(image, aspect='equal')
plt.axis('off')
patches = [matplotlib.patches.Patch(edgecolor='black', facecolor='white')]
labels = ["Overlap phases"]
plt.figlegend(patches, labels, 'upper right')
file_path = os.path.join(figures_path, self.phase_map_name + "_overlap" + ".png")
plt.savefig(file_path)
plt.close()
def save_phases_fraction(self, figures_path):
phase_fractions = self.get_phases_fraction()
file_path = os.path.join(figures_path, self.phase_map_name + "_phases_fraction" + ".csv")
with open(file_path, 'w', newline='\n') as output_file:
writer = csv.writer(output_file)
header_row = ["Phase", "Pixel fraction"]
writer.writerow(header_row)
for phase_name in phase_fractions:
row = []
row.append(phase_name)
row.append(phase_fractions[phase_name])
writer.writerow(row)
def get_image(self, label=None, use_gaussian_filter=False):
width, height = self.phase_analysis.get_width_height()
image_data = np.zeros((width, height, 3), dtype=np.float32)
if label is None:
for label in self.phases:
phases, color_name, union = self.phases[label]
color = self._get_rgb(color_name)
data = self.phase_analysis.get_phase_data(phases, color, self.is_dilation_erosion, union)
image_data += data
else:
phases, color_name, union = self.phases[label]
color = self._get_rgb(color_name)
data = self.phase_analysis.get_phase_data(phases, color, self.is_dilation_erosion, union)
image_data += data
image = Image.fromarray(np.uint8(image_data*255.0))
if use_gaussian_filter:
image_filtered = gaussian_filter(image, sigma=(1, 1, 0), mode='nearest', order=0)
image = Image.fromarray(image_filtered)
return image
def get_no_phase_image(self):
color = (1, 1, 1)
width, height = self.phase_analysis.get_width_height()
image_data = np.zeros((width, height, 3), dtype=np.float32)
for label in self.phases:
phases, _color_name, union = self.phases[label]
data = self.phase_analysis.get_phase_data(phases, color, self.is_dilation_erosion, union)
image_data += data
image = Image.fromarray(np.uint8(image_data*255.0))
return image
def get_overlap_phase_image(self):
color = (1, 1, 1)
width, height = self.phase_analysis.get_width_height()
image_data = np.zeros((width, height, 3), dtype=np.float32)
for label in self.phases:
phases, _color_name, union = self.phases[label]
data = self.phase_analysis.get_phase_data(phases, color, self.is_dilation_erosion, union)
image_data += data
logging.debug(image_data.shape)
logging.debug(np.min(image_data))
logging.debug(np.max(image_data))
mask = image_data > 1
logging.debug(np.min(mask))
logging.debug(np.max(mask))
image_data[~mask] = 0
logging.debug(np.min(image_data))
logging.debug(np.max(image_data))
image = Image.fromarray(np.uint8(image_data*255.0))
return image
def get_phases_fraction(self):
phase_fractions = {}
for label in self.phases:
phases, _color_name, union = self.phases[label]
phase_fraction = self.phase_analysis.get_phase_fraction(phases, self.is_dilation_erosion, union)
phase_fractions[label] = phase_fraction
return phase_fractions
def get_legend(self):
patches = []
labels = []
for label in self.phases:
labels.append(label)
_phase, color_name, _union = self.phases[label]
color = self._get_rgb(color_name)
if color == (1, 1, 1):
patches.append(matplotlib.patches.Patch(edgecolor='black', facecolor='white'))
else:
patches.append(matplotlib.patches.Patch(color=color))
return patches, labels
def _get_rgb(self, name):
rgb = matplotlib.colors.hex2color(matplotlib.colors.cnames[name])
return rgb
def save_image(self, file_path, use_gaussian_filter=False):
image = self.get_image(use_gaussian_filter)
image.save(file_path)
def show_image(self, file_path, use_gaussian_filter=False, legend=None, save_only=False):
image = self.get_image(use_gaussian_filter)
plt.figure()
plt.imshow(image, aspect='equal')
plt.axis('off')
if legend is None:
patches, labels = self.get_legend()
else:
patches, labels = legend
plt.figlegend(patches, labels, 'upper right')
plt.savefig(file_path)
if save_only:
plt.close()
def create_no_phase_image(self, file_path):
image = self.get_no_phase_image()
plt.figure()
plt.imshow(image, aspect='equal')
plt.axis('off')
patches = [matplotlib.patches.Patch(color="black"), matplotlib.patches.Patch(edgecolor='black', facecolor='white')]
labels = ["No phase", "Phases"]
plt.figlegend(patches, labels, 'upper right')
plt.savefig(file_path)
def create_overlap_phase_image(self, file_path):
image = self.get_overlap_phase_image()
plt.figure()
plt.imshow(image, aspect='equal')
plt.axis('off')
patches = [matplotlib.patches.Patch(edgecolor='black', facecolor='white')]
labels = ["Overlap phases"]
plt.figlegend(patches, labels, 'upper right')
plt.savefig(file_path)
def save_phase_only(phase_map, phase, graphic_path, color):
"""
Save an png image of one phase.
.. todo:: Find why the parameter is phase_map, should we pass the width and height only?
:param phase_map: get the width and height of the image
:param phase: phase object to create a image
:param graphic_path: path to save the image
:param color: color to use for the image
"""
phase_image = PhaseMap(phase_map.width, phase_map.height)
phase_image.add_phase(phase, color)
filename = r'%s_%s_%s.png' % (phase_map.sampleName, phase_map.dataType, phase.name)
file_path = os.path.join(graphic_path, filename)
phase_image.save_image(file_path)
|
the-stack_0_16762 | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: mul_ad"""
import akg
from akg.ops.math import mul
from akg.utils import custom_tiling as ct_util
mul_ad_set_dim_map = {
}
def mul_ad_set_dim_func(head, a, b):
key = []
key.append(tuple(a.shape))
key.append(tuple(b.shape))
key.append(a.dtype)
hash_key = str(tuple(key))
if hash_key in mul_ad_set_dim_map.keys():
return ct_util.set_dims(mul_ad_set_dim_map[hash_key]), hash_key
else:
return "", hash_key
@ct_util.reg_set_dim_func(mul_ad_set_dim_func)
def mul_ad(head, a, b):
output = mul.mul(a, b)
jacs_ = list(akg.differentiate(output, [a], head))
return jacs_[0]
|
the-stack_0_16764 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flax
import jax
import jax.numpy as jnp
import numpy as np
class Optimizer(flax.optim.OptimizerDef):
"""Momentum optimizer that stores state using half-precision."""
@flax.struct.dataclass
class HyperParams:
learning_rate: np.ndarray
beta: np.ndarray
grad_norm_clip: np.ndarray
@flax.struct.dataclass
class State:
momentum: np.ndarray
def __init__(self,
learning_rate=None,
beta=0.9,
dtype='bfloat16',
grad_norm_clip=None):
hyper_params = Optimizer.HyperParams(learning_rate, beta, grad_norm_clip)
super().__init__(hyper_params)
self.dtype = dict(bfloat16=jnp.bfloat16, float32=jnp.float32)[dtype]
def init_param_state(self, param):
return Optimizer.State(jnp.zeros_like(param, dtype=self.dtype))
def apply_gradient(self, hyper_params, params, state, grads):
step = state.step
params_flat, treedef = jax.tree_flatten(params)
states_flat = treedef.flatten_up_to(state.param_states)
grads_flat = treedef.flatten_up_to(grads)
# Optionally resize the global gradient to a maximum norm.
if hyper_params.grad_norm_clip:
grads_l2 = jnp.sqrt(sum([jnp.vdot(p, p) for p in grads_flat]))
grads_factor = jnp.minimum(1.0, hyper_params.grad_norm_clip / grads_l2)
grads_flat = jax.tree_map(lambda param: grads_factor * param, grads_flat)
out = [
self.apply_param_gradient(step, hyper_params, param, state, grad)
for param, state, grad in zip(params_flat, states_flat, grads_flat)
]
new_params_flat, new_states_flat = list(zip(*out)) if out else ((), ())
new_params = jax.tree_unflatten(treedef, new_params_flat)
new_param_states = jax.tree_unflatten(treedef, new_states_flat)
new_state = flax.optim.OptimizerState(step + 1, new_param_states)
return new_params, new_state
def apply_param_gradient(self, step, hyper_params, param, state, grad):
del step
assert hyper_params.learning_rate is not None, 'no learning rate provided.'
momentum = state.momentum
new_momentum = hyper_params.beta * momentum + grad
new_param = param - hyper_params.learning_rate * new_momentum
new_state = Optimizer.State(new_momentum.astype(self.dtype))
return new_param, new_state
|
the-stack_0_16765 | from sympy.physics.wigner import wigner_3j
import time
def format_time(duration):
if duration > 1:
return f"{duration:5.7}s"
elif 1000 * duration > 1:
return f"{1000 * duration:5.7} ms"
if __name__ == "__main__":
for max_angular in [4, 8, 12]:
start = time.time()
for j1 in range(max_angular):
for j2 in range(max_angular):
for j3 in range(max_angular):
for m1 in range(-j1, j1 + 1):
for m2 in range(-j2, j2 + 1):
for m3 in range(-j3, j3 + 1):
c = wigner_3j(j1, j2, j3, m1, m2, m3)
print(f"max_angular = {max_angular} took {format_time(time.time() - start)}")
|
the-stack_0_16766 | import numpy as np
from gym.spaces import Box
from metaworld.envs.env_util import get_asset_full_path
from metaworld.envs.mujoco.sawyer_xyz.base import SawyerXYZEnv, _assert_task_is_set
class SawyerPlateSlideBackSideEnv(SawyerXYZEnv):
def __init__(self):
goal_low = (-0.1, 0.6, 0.015)
goal_high = (0.1, 0.6, 0.015)
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.25, 0.6, 0.02)
obj_high = (-0.25, 0.6, 0.02)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_angle': 0.3,
'obj_init_pos': np.array([-0.25, 0.6, 0.02], dtype=np.float32),
'hand_init_pos': np.array((0, 0.6, 0.2), dtype=np.float32),
}
self.goal = np.array([0., 0.6, 0.015])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self.max_path_length = 150
self.obj_and_goal_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
self.observation_space = Box(
np.hstack((self.hand_low, obj_low, obj_low, goal_low)),
np.hstack((self.hand_high, obj_high, obj_high, goal_high)),
)
@property
def model_name(self):
return get_asset_full_path('sawyer_xyz/sawyer_plate_slide_sideway.xml')
@_assert_task_is_set
def step(self, action):
self.set_xyz_action(action[:3])
self.do_simulation([action[-1], -action[-1]])
# The marker seems to get reset every time you do a simulation
self._set_goal_marker(self._state_goal)
ob = self._get_obs()
obs_dict = self._get_obs_dict()
reward, reachDist, pullDist = self.compute_reward(action, obs_dict)
self.curr_path_length += 1
info = {'reachDist': reachDist, 'goalDist': pullDist, 'epRew' : reward, 'pickRew':None, 'success': float(pullDist <= 0.07)}
info['goal'] = self.goal
return ob, reward, self.curr_path_length == self.max_path_length, info
def _get_pos_objects(self):
return self.data.get_geom_xpos('objGeom')
def _set_goal_marker(self, goal):
self.data.site_xpos[self.model.site_name2id('goal')] = (
goal[:3]
)
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:11] = pos
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
self._state_goal = self.goal.copy()
self.obj_init_pos = self.init_config['obj_init_pos']
self.objHeight = self.data.get_geom_xpos('objGeom')[2]
if self.random_init:
obj_pos = self._get_state_rand_vec()
self.obj_init_pos = obj_pos[:3]
goal_pos = obj_pos[3:]
self._state_goal = goal_pos
self._set_goal_marker(self._state_goal)
self.sim.model.body_pos[self.model.body_name2id('cabinet')] = self.obj_init_pos
self._set_obj_xyz(np.array([-0.2, 0.]))
self.maxDist = np.linalg.norm(self.data.get_geom_xpos('objGeom')[:-1] - self._state_goal[:-1])
self.target_reward = 1000*self.maxDist + 1000*2
return self._get_obs()
def _reset_hand(self):
for _ in range(10):
self.data.set_mocap_pos('mocap', self.hand_init_pos)
self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
self.do_simulation([-1,1], self.frame_skip)
rightFinger, leftFinger = self.get_site_pos('rightEndEffector'), self.get_site_pos('leftEndEffector')
self.init_fingerCOM = (rightFinger + leftFinger)/2
def compute_reward(self, actions, obs):
del actions
obs = obs['state_observation']
objPos = obs[3:6]
rightFinger, leftFinger = self.get_site_pos('rightEndEffector'), self.get_site_pos('leftEndEffector')
fingerCOM = (rightFinger + leftFinger)/2
pullGoal = self._state_goal
reachDist = np.linalg.norm(objPos - fingerCOM)
pullDist = np.linalg.norm(objPos[:-1] - pullGoal[:-1])
c1 = 1000
c2 = 0.01
c3 = 0.001
if reachDist < 0.05:
pullRew = 1000*(self.maxDist - pullDist) + c1*(np.exp(-(pullDist**2)/c2) + np.exp(-(pullDist**2)/c3))
pullRew = max(pullRew, 0)
else:
pullRew = 0
reward = -reachDist + pullRew
return [reward, reachDist, pullDist]
|
the-stack_0_16769 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import pandas as pd
from pkg_resources import resource_filename
from .utils import column_exists, fixup_columns
CENSUS2000 = resource_filename(__name__, "data/census/census_2000.csv")
CENSUS2010 = resource_filename(__name__, "data/census/census_2010.csv")
CENSUS_COLS = ['pctwhite', 'pctblack', 'pctapi', 'pctaian', 'pct2prace',
'pcthispanic']
class CensusLnData():
census_df = None
@classmethod
def census_ln(cls, df, namecol, year=2000):
"""Appends additional columns from Census data to the input DataFrame
based on the last name.
Removes extra space. Checks if the name is the Census data. If it is,
outputs data from that row.
Args:
df (:obj:`DataFrame`): Pandas DataFrame containing the last name
column.
namecol (str or int): Column's name or location of the name in
DataFrame.
year (int): The year of Census data to be used. (2000 or 2010)
(default is 2000)
Returns:
DataFrame: Pandas DataFrame with additional columns 'pctwhite',
'pctblack', 'pctapi', 'pctaian', 'pct2prace', 'pcthispanic'
"""
if namecol not in df.columns:
print("No column `{0!s}` in the DataFrame".format(namecol))
return df
df['__last_name'] = df[namecol].str.strip().str.upper()
if cls.census_df is None or cls.census_year != year:
if year == 2000:
cls.census_df = pd.read_csv(CENSUS2000, usecols=['name'] +
CENSUS_COLS)
elif year == 2010:
cls.census_df = pd.read_csv(CENSUS2010, usecols=['name'] +
CENSUS_COLS)
cls.census_df.drop(cls.census_df[cls.census_df.name.isnull()]
.index, inplace=True)
cls.census_df.columns = ['__last_name'] + CENSUS_COLS
cls.census_year = year
rdf = pd.merge(df, cls.census_df, how='left', on='__last_name')
del rdf['__last_name']
return rdf
census_ln = CensusLnData.census_ln
def main(argv=sys.argv[1:]):
title = 'Appends Census columns by last name'
parser = argparse.ArgumentParser(description=title)
parser.add_argument('input', default=None,
help='Input file')
parser.add_argument('-y', '--year', type=int, default=2000,
choices=[2000, 2010],
help='Year of Census data (default=2000)')
parser.add_argument('-o', '--output', default='census-output.csv',
help='Output file with Census data columns')
parser.add_argument('-l', '--last', required=True,
help='Name or index location of column contains '
'the last name')
args = parser.parse_args(argv)
print(args)
if not args.last.isdigit():
df = pd.read_csv(args.input)
else:
df = pd.read_csv(args.input, header=None)
args.last = int(args.last)
if not column_exists(df, args.last):
return -1
rdf = census_ln(df, args.last, args.year)
print("Saving output to file: `{0:s}`".format(args.output))
rdf.columns = fixup_columns(rdf.columns)
rdf.to_csv(args.output, index=False)
return 0
if __name__ == "__main__":
sys.exit(main())
|
the-stack_0_16770 | from tqdm import tqdm
from PIL import Image
from models import face_extractor, get_resnet
import numpy as np
import time
import torch
def getEmbedings(aligned, names, resnet):
embbedX = list()
embbedY = list()
print ("Creating embeddings for all training images")
for im, name in tqdm(zip(aligned, names), total = len(names)):
std = im.std()
mean = im.mean()
im = (im - mean) / std
emb = resnet(im.unsqueeze(0)).detach().numpy()
embbedX.append(emb)
embbedY.append(name)
return np.array(embbedX), np.array(embbedY)
def collate_fn(x):
return x[0]
def get_face_crop(image_src, device):
mtcnn = face_extractor(device)
if isinstance(image_src, np.ndarray): # When we get it from cv2
img = Image.fromarray(image_src)
elif isinstance(image_src, torch.Tensor):
img = Image.fromarray(image_src)
else:
img = Image.open(image_src)
img = mtcnn(img)
return img
def clear_buffer(cap, frame_rate = 30):
ret = True
while ret:
t1 = time.time()
ret, _ = cap.read()
if (time.time()-t1)> 1/frame_rate:
break
|
the-stack_0_16772 | pkgname = "linux-pam"
pkgver = "1.5.2"
pkgrel = 0
build_style = "gnu_configure"
configure_args = [
"--docdir=/usr/share/doc/pam", "--disable-nis", "--disable-audit",
"--disable-selinux", "--disable-regenerate-docu", "--disable-db",
"BUILD_CFLAGS=-Os", "BUILD_LDFLAGS=", "ac_cv_search_crypt=no"
]
hostmakedepends = ["pkgconf", "gettext-tiny"]
makedepends = ["gettext-tiny-devel", "libfl-devel", "linux-headers"]
checkdepends = ["linux-pam-base"]
depends = ["linux-pam-base"]
pkgdesc = "Pluggable Authentication Modules for Linux"
maintainer = "q66 <[email protected]>"
license = "BSD-3-Clause"
url = f"https://github.com/{pkgname}/{pkgname}"
source = f"{url}/releases/download/v{pkgver}/Linux-PAM-{pkgver}.tar.xz"
sha256 = "e4ec7131a91da44512574268f493c6d8ca105c87091691b8e9b56ca685d4f94d"
suid_files = ["usr/bin/unix_chkpwd"]
def post_install(self):
self.install_license("COPYING")
self.chmod(self.destdir / "usr/bin/unix_chkpwd", 0o4755)
self.rm(self.destdir / "usr/lib/systemd", recursive = True)
for f in ["limits.d", "namespace.d"]:
self.install_dir(f"etc/security/{f}")
(self.destdir / "etc/security" / f / ".empty").touch(mode = 0o644)
@subpackage("linux-pam-devel")
def _devel(self):
return self.default_devel(man = True, extra = ["usr/share/doc"])
@subpackage("linux-pam-libs")
def _libs(self):
return self.default_libs()
|
the-stack_0_16777 | from typing import Dict
from typing import List
from typing import Tuple
from .incompatibility import Incompatibility
from .incompatibility_cause import ConflictCause
from .incompatibility_cause import PythonCause
class SolveFailure(Exception):
def __init__(self, incompatibility): # type: (Incompatibility) -> None
self._incompatibility = incompatibility
@property
def message(self):
return str(self)
def __str__(self):
return _Writer(self._incompatibility).write()
class _Writer:
def __init__(self, root): # type: (Incompatibility) -> None
self._root = root
self._derivations = {} # type: Dict[Incompatibility, int]
self._lines = [] # type: List[Tuple[str, int]]
self._line_numbers = {} # type: Dict[Incompatibility, int]
self._count_derivations(self._root)
def write(self):
buffer = []
required_python_version = None
for incompatibility in self._root.external_incompatibilities:
if isinstance(incompatibility.cause, PythonCause):
required_python_version = incompatibility.cause.root_python_version
break
if required_python_version is not None:
buffer.append(
"The current project must support the following Python versions: {}".format(
required_python_version
)
)
buffer.append("")
if isinstance(self._root.cause, ConflictCause):
self._visit(self._root, {})
else:
self._write(
self._root, "Because {}, version solving failed.".format(self._root)
)
padding = (
0
if not self._line_numbers
else len("({}) ".format(list(self._line_numbers.values())[-1]))
)
last_was_empty = False
for line in self._lines:
message = line[0]
if not message:
if not last_was_empty:
buffer.append("")
last_was_empty = True
continue
last_was_empty = False
number = line[-1]
if number is not None:
message = "({})".format(number).ljust(padding) + message
else:
message = " " * padding + message
buffer.append(message)
return "\n".join(buffer)
def _write(
self, incompatibility, message, numbered=False
): # type: (Incompatibility, str, bool) -> None
if numbered:
number = len(self._line_numbers) + 1
self._line_numbers[incompatibility] = number
self._lines.append((message, number))
else:
self._lines.append((message, None))
def _visit(
self, incompatibility, details_for_incompatibility, conclusion=False
): # type: (Incompatibility, Dict, bool) -> None
numbered = conclusion or self._derivations[incompatibility] > 1
conjunction = "So," if conclusion or incompatibility == self._root else "And"
incompatibility_string = str(incompatibility)
cause = incompatibility.cause # type: ConflictCause
details_for_cause = {}
if isinstance(cause.conflict.cause, ConflictCause) and isinstance(
cause.other.cause, ConflictCause
):
conflict_line = self._line_numbers.get(cause.conflict)
other_line = self._line_numbers.get(cause.other)
if conflict_line is not None and other_line is not None:
self._write(
incompatibility,
"Because {}, {}.".format(
cause.conflict.and_to_string(
cause.other, details_for_cause, conflict_line, other_line
),
incompatibility_string,
),
numbered=numbered,
)
elif conflict_line is not None or other_line is not None:
if conflict_line is not None:
with_line = cause.conflict
without_line = cause.other
line = conflict_line
else:
with_line = cause.other
without_line = cause.conflict
line = other_line
self._visit(without_line, details_for_cause)
self._write(
incompatibility,
"{} because {} ({}), {}.".format(
conjunction, str(with_line), line, incompatibility_string
),
numbered=numbered,
)
else:
single_line_conflict = self._is_single_line(cause.conflict.cause)
single_line_other = self._is_single_line(cause.other.cause)
if single_line_other or single_line_conflict:
first = cause.conflict if single_line_other else cause.other
second = cause.other if single_line_other else cause.conflict
self._visit(first, details_for_cause)
self._visit(second, details_for_cause)
self._write(
incompatibility,
"Thus, {}.".format(incompatibility_string),
numbered=numbered,
)
else:
self._visit(cause.conflict, {}, conclusion=True)
self._lines.append(("", None))
self._visit(cause.other, details_for_cause)
self._write(
incompatibility,
"{} because {} ({}), {}".format(
conjunction,
str(cause.conflict),
self._line_numbers[cause.conflict],
incompatibility_string,
),
numbered=numbered,
)
elif isinstance(cause.conflict.cause, ConflictCause) or isinstance(
cause.other.cause, ConflictCause
):
derived = (
cause.conflict
if isinstance(cause.conflict.cause, ConflictCause)
else cause.other
)
ext = (
cause.other
if isinstance(cause.conflict.cause, ConflictCause)
else cause.conflict
)
derived_line = self._line_numbers.get(derived)
if derived_line is not None:
self._write(
incompatibility,
"Because {}, {}.".format(
ext.and_to_string(
derived, details_for_cause, None, derived_line
),
incompatibility_string,
),
numbered=numbered,
)
elif self._is_collapsible(derived):
derived_cause = derived.cause # type: ConflictCause
if isinstance(derived_cause.conflict.cause, ConflictCause):
collapsed_derived = derived_cause.conflict
else:
collapsed_derived = derived_cause.other
if isinstance(derived_cause.conflict.cause, ConflictCause):
collapsed_ext = derived_cause.other
else:
collapsed_ext = derived_cause.conflict
details_for_cause = {}
self._visit(collapsed_derived, details_for_cause)
self._write(
incompatibility,
"{} because {}, {}.".format(
conjunction,
collapsed_ext.and_to_string(ext, details_for_cause, None, None),
incompatibility_string,
),
numbered=numbered,
)
else:
self._visit(derived, details_for_cause)
self._write(
incompatibility,
"{} because {}, {}.".format(
conjunction, str(ext), incompatibility_string
),
numbered=numbered,
)
else:
self._write(
incompatibility,
"Because {}, {}.".format(
cause.conflict.and_to_string(
cause.other, details_for_cause, None, None
),
incompatibility_string,
),
numbered=numbered,
)
def _is_collapsible(self, incompatibility): # type: (Incompatibility) -> bool
if self._derivations[incompatibility] > 1:
return False
cause = incompatibility.cause # type: ConflictCause
if isinstance(cause.conflict.cause, ConflictCause) and isinstance(
cause.other.cause, ConflictCause
):
return False
if not isinstance(cause.conflict.cause, ConflictCause) and not isinstance(
cause.other.cause, ConflictCause
):
return False
complex = (
cause.conflict
if isinstance(cause.conflict.cause, ConflictCause)
else cause.other
)
return complex not in self._line_numbers
def _is_single_line(self, cause): # type: (ConflictCause) -> bool
return not isinstance(cause.conflict.cause, ConflictCause) and not isinstance(
cause.other.cause, ConflictCause
)
def _count_derivations(self, incompatibility): # type: (Incompatibility) -> None
if incompatibility in self._derivations:
self._derivations[incompatibility] += 1
else:
self._derivations[incompatibility] = 1
cause = incompatibility.cause
if isinstance(cause, ConflictCause):
self._count_derivations(cause.conflict)
self._count_derivations(cause.other)
|
the-stack_0_16779 | import argparse
import glob
import hypothesis as h
import hypothesis.workflow as w
import logging
import matplotlib.pyplot as plt
import numpy as np
import os
import papermill as pm
import shutil
from hypothesis.workflow import shell
from tqdm import tqdm
from util import simulate, coverage_of_estimator, mutual_information_of_estimator
from rej_abc import build_posterior
# Argument parsing
parser = argparse.ArgumentParser()
parser.add_argument("--redo", action="store_true", help="Executes the workflow from scratch by removing all postconditions (default: false).")
parser.add_argument("--slurm", action="store_true", help="Executes the workflow on a Slurm-enabled HPC system (default: false).")
parser.add_argument("--test", action="store_true", help="Execute the workflow with fast hyper parameters for testing (default: false).")
arguments, _ = parser.parse_known_args()
### BEGIN Pre-workflow #########################################################
# Pipeline constants
root = os.path.dirname(os.path.abspath(__file__))
logdir = root + "/sbi-logs"
outputdir = root + "/output"
if arguments.test:
num_ensembles = 2
simulations = [2 ** n for n in range(10, 11)]
#simulations = [2 ** n for n in range(15, 16)]
credible_interval_levels = [0.9, 0.95]
else:
num_ensembles = 250
simulations = [2 ** n for n in range(10, 18)]
credible_interval_levels = [x/20 for x in range(1, 20)]
# Check if everything needs to be cleaned.
if arguments.redo:
shutil.rmtree(logdir, ignore_errors=True)
shutil.rmtree(outputdir, ignore_errors=True)
### END Pre-workflow ###########################################################
### BEGIN Workflow definition ##################################################
@w.root
def main():
# Prepare the output directory
if not os.path.exists(outputdir):
logging.info("Creating the output directory.")
os.makedirs(outputdir)
def evaluate_rej_abc(simulation_budget):
storagedir = outputdir + "/" + str(simulation_budget)
@w.dependency(main)
@w.postcondition(w.at_least_num_files(storagedir + "/run-*/posterior.pkl", num_ensembles))
@w.slurm.cpu_and_memory(4, "4g")
@w.slurm.timelimit("36:00:00")
@w.tasks(num_ensembles)
def train_rej_abc(task_index):
resultdir = storagedir + "/run-" + str(task_index).zfill(5)
os.makedirs(resultdir, exist_ok=True)
if not os.path.exists(os.path.join(resultdir, "posterior.pkl")):
logging.info("Training posterior estimator ({index} / {n}) for the Weinberg problem.".format(index=task_index + 1, n=num_ensembles))
logging.info("Using the following hyper parameters:")
logging.info(" - simulations : " + str(simulation_budget))
build_posterior(simulation_budget, resultdir, task_index, num_workers=4)
@w.dependency(train_rej_abc)
@w.postcondition(w.exists(storagedir + "/coverage.npy"))
@w.slurm.cpu_and_memory(1, "4g")
@w.slurm.timelimit("12:00:00")
def coverage():
if not os.path.exists(storagedir + "/coverage.npy"):
query = storagedir + "/run-*/"
coverage = coverage_of_estimator(query, num_ensembles, cl_list=credible_interval_levels)
np.save(storagedir + "/coverage.npy", coverage)
@w.dependency(train_rej_abc)
@w.postcondition(w.exists(storagedir + "/mutual_information.npy"))
@w.slurm.cpu_and_memory(1, "4g")
@w.slurm.timelimit("12:00:00")
def mutual_information():
if not os.path.exists(storagedir + "/mutual_information.npy"):
query = storagedir + "/run-*/"
mutual_information = mutual_information_of_estimator(query, num_ensembles)
np.save(storagedir + "/mutual_information.npy", mutual_information)
for simulation_budget in simulations:
evaluate_rej_abc(simulation_budget)
### END Workflow definition ####################################################
# Execute the workflow
if __name__ == "__main__":
if arguments.slurm:
w.slurm.execute(directory=root)
else:
w.local.execute()
|
the-stack_0_16780 | import logging
def validateHeartRate(input, patientDict):
"""
Validates patient hr input, checking that fields are proper and exist
:returns: -1 if not successful, 1 if successful
"""
if(not isinstance(input, type({}))):
logging.error("input not type dict")
return -1
if "patient_id" not in input.keys():
logging.error("missing patient id")
return -1
if "heart_rate" not in input.keys():
logging.error("missing heart rate")
return -1
if input["patient_id"] not in patientDict.keys():
logging.error("patient not initialized")
return -1
try:
if(float(input["heart_rate"]) < 0):
logging.error("invalid hr")
return -1
except:
logging.error("non numeric hr")
return -1
return 1
|
the-stack_0_16785 | # Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements input and output processing from Gaussian.
"""
import re
import warnings
import numpy as np
import scipy.constants as cst
from monty.io import zopen
from pymatgen.core.composition import Composition
from pymatgen.core.operations import SymmOp
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Molecule
from pymatgen.core.units import Ha_to_eV
from pymatgen.electronic_structure.core import Spin
from pymatgen.util.coord import get_angle
__author__ = "Shyue Ping Ong, Germain Salvato-Vallverdu, Xin Chen"
__copyright__ = "Copyright 2013, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "8/1/15"
float_patt = re.compile(r"\s*([+-]?\d+\.\d+)")
def read_route_line(route):
"""
read route line in gaussian input/output and return functional basis_set
and a dictionary of other route parameters
Args:
route (str) : the route line
return
functional (str) : the method (HF, PBE ...)
basis_set (str) : the basis set
route (dict) : dictionary of parameters
"""
scrf_patt = re.compile(r"^([sS][cC][rR][fF])\s*=\s*(.+)")
multi_params_patt = re.compile(r"^([A-z]+[0-9]*)[\s=]+\((.*)\)$")
functional = None
basis_set = None
route_params = {}
dieze_tag = None
if route:
if "/" in route:
tok = route.split("/")
functional = tok[0].split()[-1]
basis_set = tok[1].split()[0]
for tok in [functional, basis_set, "/"]:
route = route.replace(tok, "")
for tok in route.split():
if scrf_patt.match(tok):
m = scrf_patt.match(tok)
route_params[m.group(1)] = m.group(2)
elif tok.upper() in ["#", "#N", "#P", "#T"]:
# does not store # in route to avoid error in input
if tok == "#":
dieze_tag = "#N"
else:
dieze_tag = tok
continue
else:
m = re.match(multi_params_patt, tok.strip("#"))
if m:
pars = {}
for par in m.group(2).split(","):
p = par.split("=")
pars[p[0]] = None if len(p) == 1 else p[1]
route_params[m.group(1)] = pars
else:
d = tok.strip("#").split("=")
route_params[d[0]] = None if len(d) == 1 else d[1]
return functional, basis_set, route_params, dieze_tag
class GaussianInput:
"""
An object representing a Gaussian input file.
"""
# Commonly used regex patterns
_zmat_patt = re.compile(r"^(\w+)*([\s,]+(\w+)[\s,]+(\w+))*[\-\.\s,\w]*$")
_xyz_patt = re.compile(r"^(\w+)[\s,]+([\d\.eE\-]+)[\s,]+([\d\.eE\-]+)[\s,]+" r"([\d\.eE\-]+)[\-\.\s,\w.]*$")
def __init__(
self,
mol,
charge=None,
spin_multiplicity=None,
title=None,
functional="HF",
basis_set="6-31G(d)",
route_parameters=None,
input_parameters=None,
link0_parameters=None,
dieze_tag="#P",
gen_basis=None,
):
"""
Args:
mol: Input molecule. It can either be a Molecule object,
a string giving the geometry in a format supported by Gaussian,
or ``None``. If the molecule is ``None``, you will need to use
read it in from a checkpoint. Consider adding ``CHK`` to the
``link0_parameters``.
charge: Charge of the molecule. If None, charge on molecule is used.
Defaults to None. This allows the input file to be set a
charge independently from the molecule itself.
If ``mol`` is not a Molecule object, then you must specify a charge.
spin_multiplicity: Spin multiplicity of molecule. Defaults to None,
which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons. If ``mol`` is not a Molecule object, then you
must specify the multiplicity
title: Title for run. Defaults to formula of molecule if None.
functional: Functional for run.
basis_set: Basis set for run.
route_parameters: Additional route parameters as a dict. For example,
{'SP':"", "SCF":"Tight"}
input_parameters: Additional input parameters for run as a dict. Used
for example, in PCM calculations. E.g., {"EPS":12}
link0_parameters: Link0 parameters as a dict. E.g., {"%mem": "1000MW"}
dieze_tag: # preceding the route line. E.g. "#p"
gen_basis: allows a user-specified basis set to be used in a Gaussian
calculation. If this is not None, the attribute ``basis_set`` will
be set to "Gen".
"""
self._mol = mol
# Determine multiplicity and charge settings
if isinstance(mol, Molecule):
self.charge = charge if charge is not None else mol.charge
nelectrons = mol.charge + mol.nelectrons - self.charge
if spin_multiplicity is not None:
self.spin_multiplicity = spin_multiplicity
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of {} and spin multiplicity of {} is"
" not possible for this molecule".format(self.charge, spin_multiplicity)
)
else:
self.spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
# Get a title from the molecule name
self.title = title if title else self._mol.composition.formula
else:
self.charge = charge
self.spin_multiplicity = spin_multiplicity
# Set a title
self.title = title if title else "Restart"
# Store the remaining settings
self.functional = functional
self.basis_set = basis_set
self.link0_parameters = link0_parameters if link0_parameters else {}
self.route_parameters = route_parameters if route_parameters else {}
self.input_parameters = input_parameters if input_parameters else {}
self.dieze_tag = dieze_tag if dieze_tag[0] == "#" else "#" + dieze_tag
self.gen_basis = gen_basis
if gen_basis is not None:
self.basis_set = "Gen"
@property
def molecule(self):
"""
Returns molecule associated with this GaussianInput.
"""
return self._mol
@staticmethod
def _parse_coords(coord_lines):
"""
Helper method to parse coordinates.
"""
paras = {}
var_pattern = re.compile(r"^([A-Za-z]+\S*)[\s=,]+([\d\-\.]+)$")
for l in coord_lines:
m = var_pattern.match(l.strip())
if m:
paras[m.group(1).strip("=")] = float(m.group(2))
species = []
coords = []
# Stores whether a Zmatrix format is detected. Once a zmatrix format
# is detected, it is assumed for the remaining of the parsing.
zmode = False
for l in coord_lines:
l = l.strip()
if not l:
break
if (not zmode) and GaussianInput._xyz_patt.match(l):
m = GaussianInput._xyz_patt.match(l)
species.append(m.group(1))
toks = re.split(r"[,\s]+", l.strip())
if len(toks) > 4:
coords.append([float(i) for i in toks[2:5]])
else:
coords.append([float(i) for i in toks[1:4]])
elif GaussianInput._zmat_patt.match(l):
zmode = True
toks = re.split(r"[,\s]+", l.strip())
species.append(toks[0])
toks.pop(0)
if len(toks) == 0:
coords.append(np.array([0, 0, 0]))
else:
nn = []
parameters = []
while len(toks) > 1:
ind = toks.pop(0)
data = toks.pop(0)
try:
nn.append(int(ind))
except ValueError:
nn.append(species.index(ind) + 1)
try:
val = float(data)
parameters.append(val)
except ValueError:
if data.startswith("-"):
parameters.append(-paras[data[1:]])
else:
parameters.append(paras[data])
if len(nn) == 1:
coords.append(np.array([0, 0, parameters[0]]))
elif len(nn) == 2:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
bl = parameters[0]
angle = parameters[1]
axis = [0, 1, 0]
op = SymmOp.from_origin_axis_angle(coords1, axis, angle, False)
coord = op.operate(coords2)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
elif len(nn) == 3:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
coords3 = coords[nn[2] - 1]
bl = parameters[0]
angle = parameters[1]
dih = parameters[2]
v1 = coords3 - coords2
v2 = coords1 - coords2
axis = np.cross(v1, v2)
op = SymmOp.from_origin_axis_angle(coords1, axis, angle, False)
coord = op.operate(coords2)
v1 = coord - coords1
v2 = coords1 - coords2
v3 = np.cross(v1, v2)
adj = get_angle(v3, axis)
axis = coords1 - coords2
op = SymmOp.from_origin_axis_angle(coords1, axis, dih - adj, False)
coord = op.operate(coord)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
def _parse_species(sp_str):
"""
The species specification can take many forms. E.g.,
simple integers representing atomic numbers ("8"),
actual species string ("C") or a labelled species ("C1").
Sometimes, the species string is also not properly capitalized,
e.g, ("c1"). This method should take care of these known formats.
"""
try:
return int(sp_str)
except ValueError:
sp = re.sub(r"\d", "", sp_str)
return sp.capitalize()
species = [_parse_species(sp) for sp in species]
return Molecule(species, coords)
@staticmethod
def from_string(contents):
"""
Creates GaussianInput from a string.
Args:
contents: String representing an Gaussian input file.
Returns:
GaussianInput object
"""
lines = [l.strip() for l in contents.split("\n")]
link0_patt = re.compile(r"^(%.+)\s*=\s*(.+)")
link0_dict = {}
for i, l in enumerate(lines):
if link0_patt.match(l):
m = link0_patt.match(l)
link0_dict[m.group(1).strip("=")] = m.group(2)
route_patt = re.compile(r"^#[sSpPnN]*.*")
route = ""
route_index = None
for i, l in enumerate(lines):
if route_patt.match(l):
route += " " + l
route_index = i
# This condition allows for route cards spanning multiple lines
elif (l == "" or l.isspace()) and route_index:
break
functional, basis_set, route_paras, dieze_tag = read_route_line(route)
ind = 2
title = []
while lines[route_index + ind].strip():
title.append(lines[route_index + ind].strip())
ind += 1
title = " ".join(title)
ind += 1
toks = re.split(r"[,\s]+", lines[route_index + ind])
charge = int(float(toks[0]))
spin_mult = int(toks[1])
coord_lines = []
spaces = 0
input_paras = {}
ind += 1
for i in range(route_index + ind, len(lines)):
if lines[i].strip() == "":
spaces += 1
if spaces >= 2:
d = lines[i].split("=")
if len(d) == 2:
input_paras[d[0]] = d[1]
else:
coord_lines.append(lines[i].strip())
mol = GaussianInput._parse_coords(coord_lines)
mol.set_charge_and_spin(charge, spin_mult)
return GaussianInput(
mol,
charge=charge,
spin_multiplicity=spin_mult,
title=title,
functional=functional,
basis_set=basis_set,
route_parameters=route_paras,
input_parameters=input_paras,
link0_parameters=link0_dict,
dieze_tag=dieze_tag,
)
@staticmethod
def from_file(filename):
"""
Creates GaussianInput from a file.
Args:
filename: Gaussian input filename
Returns:
GaussianInput object
"""
with zopen(filename, "r") as f:
return GaussianInput.from_string(f.read())
def _find_nn_pos_before_site(self, siteindex):
"""
Returns index of nearest neighbor atoms.
"""
alldist = [(self._mol.get_distance(siteindex, i), i) for i in range(siteindex)]
alldist = sorted(alldist, key=lambda x: x[0])
return [d[1] for d in alldist]
def get_zmatrix(self):
"""
Returns a z-matrix representation of the molecule.
"""
output = []
outputvar = []
for i, site in enumerate(self._mol):
if i == 0:
output.append(f"{site.specie}")
elif i == 1:
nn = self._find_nn_pos_before_site(i)
bondlength = self._mol.get_distance(i, nn[0])
output.append(f"{self._mol[i].specie} {nn[0] + 1} B{i}")
outputvar.append(f"B{i}={bondlength:.6f}")
elif i == 2:
nn = self._find_nn_pos_before_site(i)
bondlength = self._mol.get_distance(i, nn[0])
angle = self._mol.get_angle(i, nn[0], nn[1])
output.append(f"{self._mol[i].specie} {nn[0] + 1} B{i} {nn[1] + 1} A{i}")
outputvar.append(f"B{i}={bondlength:.6f}")
outputvar.append(f"A{i}={angle:.6f}")
else:
nn = self._find_nn_pos_before_site(i)
bondlength = self._mol.get_distance(i, nn[0])
angle = self._mol.get_angle(i, nn[0], nn[1])
dih = self._mol.get_dihedral(i, nn[0], nn[1], nn[2])
output.append(f"{self._mol[i].specie} {nn[0] + 1} B{i} {nn[1] + 1} A{i} {nn[2] + 1} D{i}")
outputvar.append(f"B{i}={bondlength:.6f}")
outputvar.append(f"A{i}={angle:.6f}")
outputvar.append(f"D{i}={dih:.6f}")
return "\n".join(output) + "\n\n" + "\n".join(outputvar)
def get_cart_coords(self):
"""
Return the cartesian coordinates of the molecule
"""
def to_s(x):
return f"{x:0.6f}"
outs = []
for i, site in enumerate(self._mol):
outs.append(" ".join([site.species_string, " ".join([to_s(j) for j in site.coords])]))
return "\n".join(outs)
def __str__(self):
return self.to_string()
def to_string(self, cart_coords=False):
"""
Return GaussianInput string
Option: when cart_coords is set to True return the cartesian coordinates
instead of the z-matrix
"""
def para_dict_to_string(para, joiner=" "):
para_str = []
# sorted is only done to make unittests work reliably
for par, val in sorted(para.items()):
if val is None or val == "":
para_str.append(par)
elif isinstance(val, dict):
val_str = para_dict_to_string(val, joiner=",")
para_str.append(f"{par}=({val_str})")
else:
para_str.append(f"{par}={val}")
return joiner.join(para_str)
output = []
if self.link0_parameters:
output.append(para_dict_to_string(self.link0_parameters, "\n"))
# Handle functional or basis set set to None, empty string or whitespace
func_str = "" if self.functional is None else self.functional.strip()
bset_str = "" if self.basis_set is None else self.basis_set.strip()
if func_str != "" and bset_str != "":
func_bset_str = f" {func_str}/{bset_str}"
else:
# don't use the slash if either or both are set as empty
func_bset_str = f" {func_str}{bset_str}".rstrip()
output.append(f"{self.dieze_tag}{func_bset_str} {para_dict_to_string(self.route_parameters)}")
output.append("")
output.append(self.title)
output.append("")
charge_str = "" if self.charge is None else f"{self.charge:.0f}"
multip_str = "" if self.spin_multiplicity is None else f" {self.spin_multiplicity:.0f}"
output.append(f"{charge_str}{multip_str}")
if isinstance(self._mol, Molecule):
if cart_coords is True:
output.append(self.get_cart_coords())
else:
output.append(self.get_zmatrix())
elif self._mol is not None:
output.append(str(self._mol))
output.append("")
if self.gen_basis is not None:
output.append(f"{self.gen_basis}\n")
output.append(para_dict_to_string(self.input_parameters, "\n"))
output.append("\n")
return "\n".join(output)
def write_file(self, filename, cart_coords=False):
"""
Write the input string into a file
Option: see __str__ method
"""
with zopen(filename, "w") as f:
f.write(self.to_string(cart_coords))
def as_dict(self):
"""
:return: MSONable dict
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"molecule": self.molecule.as_dict(),
"functional": self.functional,
"basis_set": self.basis_set,
"route_parameters": self.route_parameters,
"title": self.title,
"charge": self.charge,
"spin_multiplicity": self.spin_multiplicity,
"input_parameters": self.input_parameters,
"link0_parameters": self.link0_parameters,
"dieze_tag": self.dieze_tag,
}
@classmethod
def from_dict(cls, d):
"""
:param d: dict
:return: GaussianInput
"""
return GaussianInput(
mol=Molecule.from_dict(d["molecule"]),
functional=d["functional"],
basis_set=d["basis_set"],
route_parameters=d["route_parameters"],
title=d["title"],
charge=d["charge"],
spin_multiplicity=d["spin_multiplicity"],
input_parameters=d["input_parameters"],
link0_parameters=d["link0_parameters"],
)
class GaussianOutput:
"""
Parser for Gaussian output files.
.. note::
Still in early beta.
Attributes:
.. attribute:: structures
All structures from the calculation in the standard orientation. If the
symmetry is not considered, the standard orientation is not printed out
and the input orientation is used instead. Check the `standard_orientation`
attribute.
.. attribute:: structures_input_orientation
All structures from the calculation in the input orientation or the
Z-matrix orientation (if an opt=z-matrix was requested).
.. attribute:: opt_structures
All optimized structures from the calculation in the standard orientation,
if the attribute 'standard_orientation' is True, otherwise in the input
or the Z-matrix orientation.
.. attribute:: energies
All energies from the calculation.
.. attribute:: eigenvalues
List of eigenvalues for the last geometry
.. attribute:: MO_coefficients
Matrix of MO coefficients for the last geometry
.. attribute:: cart_forces
All cartesian forces from the calculation.
.. attribute:: frequencies
A list for each freq calculation and for each mode of a dict with
{
"frequency": freq in cm-1,
"symmetry": symmetry tag
"r_mass": Reduce mass,
"f_constant": force constant,
"IR_intensity": IR Intensity,
"mode": normal mode
}
The normal mode is a 1D vector of dx, dy dz of each atom.
.. attribute:: hessian
Matrix of second derivatives of the energy with respect to cartesian
coordinates in the **input orientation** frame. Need #P in the
route section in order to be in the output.
.. attribute:: properly_terminated
True if run has properly terminated
.. attribute:: is_pcm
True if run is a PCM run.
.. attribute:: is_spin
True if it is an unrestricted run
.. attribute:: stationary_type
If it is a relaxation run, indicates whether it is a minimum (Minimum)
or a saddle point ("Saddle").
.. attribute:: corrections
Thermochemical corrections if this run is a Freq run as a dict. Keys
are "Zero-point", "Thermal", "Enthalpy" and "Gibbs Free Energy"
.. attribute:: functional
Functional used in the run.
.. attribute:: basis_set
Basis set used in the run
.. attribute:: route
Additional route parameters as a dict. For example,
{'SP':"", "SCF":"Tight"}
.. attribute:: dieze_tag
# preceding the route line, e.g. "#P"
.. attribute:: link0
Link0 parameters as a dict. E.g., {"%mem": "1000MW"}
.. attribute:: charge
Charge for structure
.. attribute:: spin_multiplicity
Spin multiplicity for structure
.. attribute:: num_basis_func
Number of basis functions in the run.
.. attribute:: electrons
number of alpha and beta electrons as (N alpha, N beta)
.. attribute:: pcm
PCM parameters and output if available.
.. attribute:: errors
error if not properly terminated (list to be completed in error_defs)
.. attribute:: Mulliken_charges
Mulliken atomic charges
.. attribute:: eigenvectors
Matrix of shape (num_basis_func, num_basis_func). Each column is an
eigenvectors and contains AO coefficients of an MO.
eigenvectors[Spin] = mat(num_basis_func, num_basis_func)
.. attribute:: molecular_orbital
MO development coefficients on AO in a more convenient array dict
for each atom and basis set label.
mo[Spin][OM j][atom i] = {AO_k: coeff, AO_k: coeff ... }
.. attribute:: atom_basis_labels
Labels of AO for each atoms. These labels are those used in the output
of molecular orbital coefficients (POP=Full) and in the
molecular_orbital array dict.
atom_basis_labels[iatom] = [AO_k, AO_k, ...]
.. attribute:: resumes
List of gaussian data resume given at the end of the output file before
the quotation. The resumes are given as string.
.. attribute:: title
Title of the gaussian run.
.. attribute:: standard_orientation
If True, the geometries stored in the structures are in the standard
orientation. Else, the geometries are in the input orientation.
.. attribute:: bond_orders
Dict of bond order values read in the output file such as:
{(0, 1): 0.8709, (1, 6): 1.234, ...}
The keys are the atom indexes and the values are the Wiberg bond indexes
that are printed using `pop=NBOREAD` and `$nbo bndidx $end`.
Methods:
.. method:: to_input()
Return a GaussianInput object using the last geometry and the same
calculation parameters.
.. method:: read_scan()
Read a potential energy surface from a gaussian scan calculation.
.. method:: get_scan_plot()
Get a matplotlib plot of the potential energy surface
.. method:: save_scan_plot()
Save a matplotlib plot of the potential energy surface to a file
"""
def __init__(self, filename):
"""
Args:
filename: Filename of Gaussian output file.
"""
self.filename = filename
self._parse(filename)
@property
def final_energy(self):
"""
:return: Final energy in Gaussian output.
"""
return self.energies[-1]
@property
def final_structure(self):
"""
:return: Final structure in Gaussian output.
"""
return self.structures[-1]
def _parse(self, filename):
start_patt = re.compile(r" \(Enter \S+l101\.exe\)")
route_patt = re.compile(r" #[pPnNtT]*.*")
link0_patt = re.compile(r"^\s(%.+)\s*=\s*(.+)")
charge_mul_patt = re.compile(r"Charge\s+=\s*([-\d]+)\s+" r"Multiplicity\s+=\s*(\d+)")
num_basis_func_patt = re.compile(r"([0-9]+)\s+basis functions")
num_elec_patt = re.compile(r"(\d+)\s+alpha electrons\s+(\d+)\s+beta electrons")
pcm_patt = re.compile(r"Polarizable Continuum Model")
stat_type_patt = re.compile(r"imaginary frequencies")
scf_patt = re.compile(r"E\(.*\)\s*=\s*([-\.\d]+)\s+")
mp2_patt = re.compile(r"EUMP2\s*=\s*(.*)")
oniom_patt = re.compile(r"ONIOM:\s+extrapolated energy\s*=\s*(.*)")
termination_patt = re.compile(r"(Normal|Error) termination")
error_patt = re.compile(r"(! Non-Optimized Parameters !|Convergence failure)")
mulliken_patt = re.compile(r"^\s*(Mulliken charges|Mulliken atomic charges)")
mulliken_charge_patt = re.compile(r"^\s+(\d+)\s+([A-Z][a-z]?)\s*(\S*)")
end_mulliken_patt = re.compile(r"(Sum of Mulliken )(.*)(charges)\s*=\s*(\D)")
std_orientation_patt = re.compile(r"Standard orientation")
input_orientation_patt = re.compile(r"Input orientation|Z-Matrix orientation")
orbital_patt = re.compile(r"(Alpha|Beta)\s*\S+\s*eigenvalues --(.*)")
thermo_patt = re.compile(r"(Zero-point|Thermal) correction(.*)=" r"\s+([\d\.-]+)")
forces_on_patt = re.compile(r"Center\s+Atomic\s+Forces\s+\(Hartrees/Bohr\)")
forces_off_patt = re.compile(r"Cartesian\s+Forces:\s+Max.*RMS.*")
forces_patt = re.compile(r"\s+(\d+)\s+(\d+)\s+([0-9\.-]+)\s+([0-9\.-]+)\s+([0-9\.-]+)")
freq_on_patt = re.compile(r"Harmonic\sfrequencies\s+\(cm\*\*-1\),\sIR\sintensities.*Raman.*")
normal_mode_patt = re.compile(r"\s+(\d+)\s+(\d+)\s+([0-9\.-]{4,5})\s+([0-9\.-]{4,5}).*")
mo_coeff_patt = re.compile(r"Molecular Orbital Coefficients:")
mo_coeff_name_patt = re.compile(r"\d+\s((\d+|\s+)\s+([a-zA-Z]{1,2}|\s+))\s+(\d+\S+)")
hessian_patt = re.compile(r"Force constants in Cartesian coordinates:")
resume_patt = re.compile(r"^\s1\\1\\GINC-\S*")
resume_end_patt = re.compile(r"^\s.*\\\\@")
bond_order_patt = re.compile(r"Wiberg bond index matrix in the NAO basis:")
self.properly_terminated = False
self.is_pcm = False
self.stationary_type = "Minimum"
self.corrections = {}
self.energies = []
self.pcm = None
self.errors = []
self.Mulliken_charges = {}
self.link0 = {}
self.cart_forces = []
self.frequencies = []
self.eigenvalues = []
self.is_spin = False
self.hessian = None
self.resumes = []
self.title = None
self.bond_orders = {}
read_coord = 0
read_mulliken = False
read_eigen = False
eigen_txt = []
parse_stage = 0
num_basis_found = False
terminated = False
parse_forces = False
forces = []
parse_freq = False
frequencies = []
read_mo = False
parse_hessian = False
routeline = ""
standard_orientation = False
parse_bond_order = False
input_structures = []
std_structures = []
geom_orientation = None
opt_structures = []
with zopen(filename) as f:
for line in f:
if parse_stage == 0:
if start_patt.search(line):
parse_stage = 1
elif link0_patt.match(line):
m = link0_patt.match(line)
self.link0[m.group(1)] = m.group(2)
elif route_patt.search(line) or routeline != "":
if set(line.strip()) == {"-"}:
params = read_route_line(routeline)
self.functional = params[0]
self.basis_set = params[1]
self.route_parameters = params[2]
route_lower = {k.lower(): v for k, v in self.route_parameters.items()}
self.dieze_tag = params[3]
parse_stage = 1
else:
routeline += line.strip()
elif parse_stage == 1:
if set(line.strip()) == {"-"} and self.title is None:
self.title = ""
elif self.title == "":
self.title = line.strip()
elif charge_mul_patt.search(line):
m = charge_mul_patt.search(line)
self.charge = int(m.group(1))
self.spin_multiplicity = int(m.group(2))
parse_stage = 2
elif parse_stage == 2:
if self.is_pcm:
self._check_pcm(line)
if "freq" in route_lower and thermo_patt.search(line):
m = thermo_patt.search(line)
if m.group(1) == "Zero-point":
self.corrections["Zero-point"] = float(m.group(3))
else:
key = m.group(2).strip(" to ")
self.corrections[key] = float(m.group(3))
if read_coord:
[f.readline() for i in range(3)]
line = f.readline()
sp = []
coords = []
while set(line.strip()) != {"-"}:
toks = line.split()
sp.append(Element.from_Z(int(toks[1])))
coords.append([float(x) for x in toks[3:6]])
line = f.readline()
read_coord = False
if geom_orientation == "input":
input_structures.append(Molecule(sp, coords))
elif geom_orientation == "standard":
std_structures.append(Molecule(sp, coords))
if parse_forces:
m = forces_patt.search(line)
if m:
forces.extend([float(_v) for _v in m.groups()[2:5]])
elif forces_off_patt.search(line):
self.cart_forces.append(forces)
forces = []
parse_forces = False
# read molecular orbital eigenvalues
if read_eigen:
m = orbital_patt.search(line)
if m:
eigen_txt.append(line)
else:
read_eigen = False
self.eigenvalues = {Spin.up: []}
for eigenline in eigen_txt:
if "Alpha" in eigenline:
self.eigenvalues[Spin.up] += [float(e) for e in float_patt.findall(eigenline)]
elif "Beta" in eigenline:
if Spin.down not in self.eigenvalues:
self.eigenvalues[Spin.down] = []
self.eigenvalues[Spin.down] += [float(e) for e in float_patt.findall(eigenline)]
eigen_txt = []
# read molecular orbital coefficients
if (not num_basis_found) and num_basis_func_patt.search(line):
m = num_basis_func_patt.search(line)
self.num_basis_func = int(m.group(1))
num_basis_found = True
elif read_mo:
# build a matrix with all coefficients
all_spin = [Spin.up]
if self.is_spin:
all_spin.append(Spin.down)
mat_mo = {}
for spin in all_spin:
mat_mo[spin] = np.zeros((self.num_basis_func, self.num_basis_func))
nMO = 0
end_mo = False
while nMO < self.num_basis_func and not end_mo:
f.readline()
f.readline()
self.atom_basis_labels = []
for i in range(self.num_basis_func):
line = f.readline()
# identify atom and OA labels
m = mo_coeff_name_patt.search(line)
if m.group(1).strip() != "":
iat = int(m.group(2)) - 1
# atname = m.group(3)
self.atom_basis_labels.append([m.group(4)])
else:
self.atom_basis_labels[iat].append(m.group(4))
# MO coefficients
coeffs = [float(c) for c in float_patt.findall(line)]
for j, c in enumerate(coeffs):
mat_mo[spin][i, nMO + j] = c
nMO += len(coeffs)
line = f.readline()
# manage pop=regular case (not all MO)
if nMO < self.num_basis_func and (
"Density Matrix:" in line or mo_coeff_patt.search(line)
):
end_mo = True
warnings.warn("POP=regular case, matrix coefficients not complete")
f.readline()
self.eigenvectors = mat_mo
read_mo = False
# build a more convenient array dict with MO
# coefficient of each atom in each MO.
# mo[Spin][OM j][atom i] =
# {AO_k: coeff, AO_k: coeff ... }
mo = {}
for spin in all_spin:
mo[spin] = [
[{} for iat in range(len(self.atom_basis_labels))] for j in range(self.num_basis_func)
]
for j in range(self.num_basis_func):
i = 0
for iat, labels in enumerate(self.atom_basis_labels):
for label in labels:
mo[spin][j][iat][label] = self.eigenvectors[spin][i, j]
i += 1
self.molecular_orbital = mo
elif parse_freq:
while line.strip() != "": # blank line
ifreqs = [int(val) - 1 for val in line.split()]
for ifreq in ifreqs:
frequencies.append(
{
"frequency": None,
"r_mass": None,
"f_constant": None,
"IR_intensity": None,
"symmetry": None,
"mode": [],
}
)
# read freq, intensity, masses, symmetry ...
while "Atom AN" not in line:
if "Frequencies --" in line:
freqs = map(float, float_patt.findall(line))
for ifreq, freq in zip(ifreqs, freqs):
frequencies[ifreq]["frequency"] = freq
elif "Red. masses --" in line:
r_masses = map(float, float_patt.findall(line))
for ifreq, r_mass in zip(ifreqs, r_masses):
frequencies[ifreq]["r_mass"] = r_mass
elif "Frc consts --" in line:
f_consts = map(float, float_patt.findall(line))
for ifreq, f_const in zip(ifreqs, f_consts):
frequencies[ifreq]["f_constant"] = f_const
elif "IR Inten --" in line:
IR_intens = map(float, float_patt.findall(line))
for ifreq, intens in zip(ifreqs, IR_intens):
frequencies[ifreq]["IR_intensity"] = intens
else:
syms = line.split()[:3]
for ifreq, sym in zip(ifreqs, syms):
frequencies[ifreq]["symmetry"] = sym
line = f.readline()
# read normal modes
line = f.readline()
while normal_mode_patt.search(line):
values = list(map(float, float_patt.findall(line)))
for i, ifreq in zip(range(0, len(values), 3), ifreqs):
frequencies[ifreq]["mode"].extend(values[i : i + 3])
line = f.readline()
parse_freq = False
self.frequencies.append(frequencies)
frequencies = []
elif parse_hessian:
# read Hessian matrix under "Force constants in Cartesian coordinates"
# Hessian matrix is in the input orientation framework
# WARNING : need #P in the route line
parse_hessian = False
ndf = 3 * len(input_structures[0])
self.hessian = np.zeros((ndf, ndf))
j_indices = range(5)
jndf = 0
while jndf < ndf:
for i in range(jndf, ndf):
line = f.readline()
vals = re.findall(r"\s*([+-]?\d+\.\d+[eEdD]?[+-]\d+)", line)
vals = [float(val.replace("D", "E")) for val in vals]
for jval, val in enumerate(vals):
j = j_indices[jval]
self.hessian[i, j] = val
self.hessian[j, i] = val
jndf += len(vals)
line = f.readline()
j_indices = [j + 5 for j in j_indices]
elif parse_bond_order:
# parse Wiberg bond order
line = f.readline()
line = f.readline()
nat = len(input_structures[0])
matrix = []
for iat in range(nat):
line = f.readline()
matrix.append([float(v) for v in line.split()[2:]])
self.bond_orders = {}
for iat in range(nat):
for jat in range(iat + 1, nat):
self.bond_orders[(iat, jat)] = matrix[iat][jat]
parse_bond_order = False
elif termination_patt.search(line):
m = termination_patt.search(line)
if m.group(1) == "Normal":
self.properly_terminated = True
terminated = True
elif error_patt.search(line):
error_defs = {
"! Non-Optimized Parameters !": "Optimization error",
"Convergence failure": "SCF convergence error",
}
m = error_patt.search(line)
self.errors.append(error_defs[m.group(1)])
elif num_elec_patt.search(line):
m = num_elec_patt.search(line)
self.electrons = (int(m.group(1)), int(m.group(2)))
elif (not self.is_pcm) and pcm_patt.search(line):
self.is_pcm = True
self.pcm = {}
elif "freq" in route_lower and "opt" in route_lower and stat_type_patt.search(line):
self.stationary_type = "Saddle"
elif mp2_patt.search(line):
m = mp2_patt.search(line)
self.energies.append(float(m.group(1).replace("D", "E")))
elif oniom_patt.search(line):
m = oniom_patt.matcher(line)
self.energies.append(float(m.group(1)))
elif scf_patt.search(line):
m = scf_patt.search(line)
self.energies.append(float(m.group(1)))
elif std_orientation_patt.search(line):
standard_orientation = True
geom_orientation = "standard"
read_coord = True
elif input_orientation_patt.search(line):
geom_orientation = "input"
read_coord = True
elif "Optimization completed." in line:
line = f.readline()
if " -- Stationary point found." not in line:
warnings.warn(
"\n" + self.filename + ": Optimization complete but this is not a stationary point"
)
if standard_orientation:
opt_structures.append(std_structures[-1])
else:
opt_structures.append(input_structures[-1])
elif not read_eigen and orbital_patt.search(line):
eigen_txt.append(line)
read_eigen = True
elif mulliken_patt.search(line):
mulliken_txt = []
read_mulliken = True
elif not parse_forces and forces_on_patt.search(line):
parse_forces = True
elif freq_on_patt.search(line):
parse_freq = True
[f.readline() for i in range(3)]
elif mo_coeff_patt.search(line):
if "Alpha" in line:
self.is_spin = True
read_mo = True
elif hessian_patt.search(line):
parse_hessian = True
elif resume_patt.search(line):
resume = []
while not resume_end_patt.search(line):
resume.append(line)
line = f.readline()
# security if \\@ not in one line !
if line == "\n":
break
resume.append(line)
resume = "".join([r.strip() for r in resume])
self.resumes.append(resume)
elif bond_order_patt.search(line):
parse_bond_order = True
if read_mulliken:
if not end_mulliken_patt.search(line):
mulliken_txt.append(line)
else:
m = end_mulliken_patt.search(line)
mulliken_charges = {}
for line in mulliken_txt:
if mulliken_charge_patt.search(line):
m = mulliken_charge_patt.search(line)
dic = {int(m.group(1)): [m.group(2), float(m.group(3))]}
mulliken_charges.update(dic)
read_mulliken = False
self.Mulliken_charges = mulliken_charges
# store the structures. If symmetry is considered, the standard orientation
# is used. Else the input orientation is used.
if standard_orientation:
self.structures = std_structures
self.structures_input_orientation = input_structures
else:
self.structures = input_structures
self.structures_input_orientation = input_structures
# store optimized structure in input orientation
self.opt_structures = opt_structures
if not terminated:
warnings.warn("\n" + self.filename + ": Termination error or bad Gaussian output file !")
def _check_pcm(self, line):
energy_patt = re.compile(r"(Dispersion|Cavitation|Repulsion) energy" r"\s+\S+\s+=\s+(\S*)")
total_patt = re.compile(r"with all non electrostatic terms\s+\S+\s+" r"=\s+(\S*)")
parameter_patt = re.compile(r"(Eps|Numeral density|RSolv|Eps" r"\(inf[inity]*\))\s+=\s*(\S*)")
if energy_patt.search(line):
m = energy_patt.search(line)
self.pcm[f"{m.group(1)} energy"] = float(m.group(2))
elif total_patt.search(line):
m = total_patt.search(line)
self.pcm["Total energy"] = float(m.group(1))
elif parameter_patt.search(line):
m = parameter_patt.search(line)
self.pcm[m.group(1)] = float(m.group(2))
def as_dict(self):
"""
Json-serializable dict representation.
"""
structure = self.final_structure
d = {
"has_gaussian_completed": self.properly_terminated,
"nsites": len(structure),
}
comp = structure.composition
d["unit_cell_formula"] = comp.as_dict()
d["reduced_cell_formula"] = Composition(comp.reduced_formula).as_dict()
d["pretty_formula"] = comp.reduced_formula
d["is_pcm"] = self.is_pcm
d["errors"] = self.errors
d["Mulliken_charges"] = self.Mulliken_charges
unique_symbols = sorted(list(d["unit_cell_formula"].keys()))
d["elements"] = unique_symbols
d["nelements"] = len(unique_symbols)
d["charge"] = self.charge
d["spin_multiplicity"] = self.spin_multiplicity
vin = {
"route": self.route_parameters,
"functional": self.functional,
"basis_set": self.basis_set,
"nbasisfunctions": self.num_basis_func,
"pcm_parameters": self.pcm,
}
d["input"] = vin
nsites = len(self.final_structure)
vout = {
"energies": self.energies,
"final_energy": self.final_energy,
"final_energy_per_atom": self.final_energy / nsites,
"molecule": structure.as_dict(),
"stationary_type": self.stationary_type,
"corrections": self.corrections,
}
d["output"] = vout
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
def read_scan(self):
"""
Read a potential energy surface from a gaussian scan calculation.
Returns:
A dict: {"energies": [ values ],
"coords": {"d1": [ values ], "A2", [ values ], ... }}
"energies" are the energies of all points of the potential energy
surface. "coords" are the internal coordinates used to compute the
potential energy surface and the internal coordinates optimized,
labelled by their name as defined in the calculation.
"""
def floatList(l):
"""return a list of float from a list of string"""
return [float(v) for v in l]
scan_patt = re.compile(r"^\sSummary of the potential surface scan:")
optscan_patt = re.compile(r"^\sSummary of Optimized Potential Surface Scan")
coord_patt = re.compile(r"^\s*(\w+)((\s*[+-]?\d+\.\d+)+)")
# data dict return
data = {"energies": [], "coords": {}}
# read in file
with zopen(self.filename, "r") as f:
line = f.readline()
while line != "":
if optscan_patt.match(line):
f.readline()
line = f.readline()
endScan = False
while not endScan:
data["energies"] += floatList(float_patt.findall(line))
line = f.readline()
while coord_patt.match(line):
icname = line.split()[0].strip()
if icname in data["coords"]:
data["coords"][icname] += floatList(float_patt.findall(line))
else:
data["coords"][icname] = floatList(float_patt.findall(line))
line = f.readline()
if not re.search(r"^\s+((\s*\d+)+)", line):
endScan = True
else:
line = f.readline()
elif scan_patt.match(line):
line = f.readline()
data["coords"] = {icname: [] for icname in line.split()[1:-1]}
f.readline()
line = f.readline()
while not re.search(r"^\s-+", line):
values = floatList(line.split())
data["energies"].append(values[-1])
for i, icname in enumerate(data["coords"]):
data["coords"][icname].append(values[i + 1])
line = f.readline()
else:
line = f.readline()
return data
def get_scan_plot(self, coords=None):
"""
Get a matplotlib plot of the potential energy surface.
Args:
coords: internal coordinate name to use as abcissa.
"""
from pymatgen.util.plotting import pretty_plot
plt = pretty_plot(12, 8)
d = self.read_scan()
if coords and coords in d["coords"]:
x = d["coords"][coords]
plt.xlabel(coords)
else:
x = range(len(d["energies"]))
plt.xlabel("points")
plt.ylabel("Energy (eV)")
e_min = min(d["energies"])
y = [(e - e_min) * Ha_to_eV for e in d["energies"]]
plt.plot(x, y, "ro--")
return plt
def save_scan_plot(self, filename="scan.pdf", img_format="pdf", coords=None):
"""
Save matplotlib plot of the potential energy surface to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
coords: internal coordinate name to use as abcissa.
"""
plt = self.get_scan_plot(coords)
plt.savefig(filename, format=img_format)
def read_excitation_energies(self):
"""
Read a excitation energies after a TD-DFT calculation.
Returns:
A list: A list of tuple for each transition such as
[(energie (eV), lambda (nm), oscillatory strength), ... ]
"""
transitions = []
# read in file
with zopen(self.filename, "r") as f:
line = f.readline()
td = False
while line != "":
if re.search(r"^\sExcitation energies and oscillator strengths:", line):
td = True
if td:
if re.search(r"^\sExcited State\s*\d", line):
val = [float(v) for v in float_patt.findall(line)]
transitions.append(tuple(val[0:3]))
line = f.readline()
return transitions
def get_spectre_plot(self, sigma=0.05, step=0.01):
"""
Get a matplotlib plot of the UV-visible xas. Transition are plotted
as vertical lines and as a sum of normal functions with sigma with. The
broadening is applied in energy and the xas is plotted as a function
of the wavelength.
Args:
sigma: Full width at half maximum in eV for normal functions.
step: bin interval in eV
Returns:
A dict: {"energies": values, "lambda": values, "xas": values}
where values are lists of abscissa (energies, lamba) and
the sum of gaussian functions (xas).
A matplotlib plot.
"""
from scipy.stats import norm
from pymatgen.util.plotting import pretty_plot
plt = pretty_plot(12, 8)
transitions = self.read_excitation_energies()
minval = min(val[0] for val in transitions) - 5.0 * sigma
maxval = max(val[0] for val in transitions) + 5.0 * sigma
npts = int((maxval - minval) / step) + 1
eneval = np.linspace(minval, maxval, npts) # in eV
lambdaval = [cst.h * cst.c / (val * cst.e) * 1.0e9 for val in eneval] # in nm
# sum of gaussian functions
spectre = np.zeros(npts)
for trans in transitions:
spectre += trans[2] * norm(eneval, trans[0], sigma)
spectre /= spectre.max()
plt.plot(lambdaval, spectre, "r-", label="spectre")
data = {"energies": eneval, "lambda": lambdaval, "xas": spectre}
# plot transitions as vlines
plt.vlines(
[val[1] for val in transitions],
0.0,
[val[2] for val in transitions],
color="blue",
label="transitions",
linewidth=2,
)
plt.xlabel("$\\lambda$ (nm)")
plt.ylabel("Arbitrary unit")
plt.legend()
return data, plt
def save_spectre_plot(self, filename="spectre.pdf", img_format="pdf", sigma=0.05, step=0.01):
"""
Save matplotlib plot of the spectre to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
sigma: Full width at half maximum in eV for normal functions.
step: bin interval in eV
"""
d, plt = self.get_spectre_plot(sigma, step)
plt.savefig(filename, format=img_format)
def to_input(
self,
mol=None,
charge=None,
spin_multiplicity=None,
title=None,
functional=None,
basis_set=None,
route_parameters=None,
input_parameters=None,
link0_parameters=None,
dieze_tag=None,
cart_coords=False,
):
"""
Create a new input object using by default the last geometry read in
the output file and with the same calculation parameters. Arguments
are the same as GaussianInput class.
Returns
gaunip (GaussianInput) : the gaussian input object
"""
if not mol:
mol = self.final_structure
if charge is None:
charge = self.charge
if spin_multiplicity is None:
spin_multiplicity = self.spin_multiplicity
if not title:
title = self.title
if not functional:
functional = self.functional
if not basis_set:
basis_set = self.basis_set
if not route_parameters:
route_parameters = self.route_parameters
if not link0_parameters:
link0_parameters = self.link0
if not dieze_tag:
dieze_tag = self.dieze_tag
return GaussianInput(
mol=mol,
charge=charge,
spin_multiplicity=spin_multiplicity,
title=title,
functional=functional,
basis_set=basis_set,
route_parameters=route_parameters,
input_parameters=input_parameters,
link0_parameters=link0_parameters,
dieze_tag=dieze_tag,
)
|
the-stack_0_16786 | from decimal import (
Decimal,
)
import pytest
from eth_abi import (
encode_abi,
)
from newchain_web3._utils.filters import (
match_fn,
)
@pytest.mark.parametrize(
"data,expected,match_data_and_abi",
(
(
(-12345, 000, 111, Decimal(2) + Decimal(1) / Decimal(10)),
False,
(
("int", (-12345,)),
("uint32", (444,)),
("int", (565,)),
("ufixed256x4", (Decimal(1.66660),))
)
),
(
(-12345, 000, 111, Decimal(2) + Decimal(1) / Decimal(10)),
True,
(
("int", (-12345,)),
("uint32", None),
("int", None),
("ufixed256x4", None)
)
),
(
("aye", "bee", "sea", b"\xde\xee"),
False,
(
("string", ("eee",)),
("string", ("aye",)),
("string", ("sea",)),
("bytes", (b"\x00",))
)
),
(
("aye", "bee", "sea", b"\xde\xee"),
True,
(
("string", ("aye",)),
("string", ("bee",)),
("string", ("sea",)),
("bytes", (b"\xde\xee",))
)
),
(
("aye", "bee", "sea", b"\xde\xee"),
True,
(
("string", None),
("string", None),
("string", None),
("bytes", None)
)
),
(
(("aye", "bee"), ("sea", "dee")),
True,
(
("string[]", (("aye", "bee"),)),
("string[]", (("sea", "dee"),)),
)
),
(
(["eee", "eff"], ["gee", "eich"]),
False,
(
("string[]", (("aye", "bee"),)),
("string[]", (("sea", "dee"),)),
)
),
)
)
def test_match_fn_with_various_data_types(data, expected, match_data_and_abi):
abi_types, match_data = zip(*match_data_and_abi)
encoded_data = encode_abi(abi_types, data)
assert match_fn(match_data_and_abi, encoded_data) == expected
def test_wrong_type_match_data():
data = ("hello", "goodbye")
match_data_and_abi = (
("string", (50505050,)),
("string", (50505050,)),
)
abi_types, match_data = zip(*match_data_and_abi)
encoded_data = encode_abi(abi_types, data)
with pytest.raises(ValueError):
match_fn(match_data_and_abi, encoded_data)
|
the-stack_0_16787 | import storageManager
from tkinter import *
from os import path
from tkinter import filedialog
from tkinter import Menu
from tkinter import ttk
from tkinter import scrolledtext
from tkinter import messagebox
import os
#from sintactico import ejecutar_analisis
import reportes.RealizarReportes
import reportes.reportesimbolos as rs
import reportes.RealizarGramatica
from Instrucciones.TablaSimbolos.Tabla import Tabla
from Instrucciones.TablaSimbolos.Arbol import Arbol
from Instrucciones.Excepcion import Excepcion
from Instrucciones.Sql_create.CreateDatabase import CreateDatabase
from storageManager.jsonMode import *
from Codigo_3D import FuncionesPara3D
from Codigo_3D import Optimizacion
from Instrucciones.TablaSimbolos.Simbolo3D import Simbolo3d
import sintactico
import graficarArbol
import sintacticoGraph
global arbol
arbol = None
global tablaSym
tablaSym = None
'''
instruccion = CreateDatabase("bd1",None,"TRUE",None,None,None,None, 1,2)
instruccion.ejecutar(None,None)
# ---------------------------- PRUEBA DE UNA SUMA ----------------------------
from Instrucciones.TablaSimbolos.Tipo import Tipo_Dato, Tipo
from Instrucciones.Expresiones import Primitivo, Logica
p1 = Primitivo.Primitivo(True,Tipo("",Tipo_Dato.BOOLEAN),1,1)
p2 = Primitivo.Primitivo(True,Tipo("",Tipo_Dato.BOOLEAN),1,1)
a = Arbol([])
op = Logica.Logica(p1,p2,'AND',1,2)
print('Resultado logica: ' + str(suma.ejecutar(None,a)))
# ---------------------------- PRUEBA DE UNA SUMA CON ERROR DE TIPO ----------------------------
from Instrucciones.TablaSimbolos.Tipo import Tipo_Dato, Tipo
from Instrucciones.Expresiones import Primitivo, Aritmetica
p1 = Primitivo.Primitivo(1,Tipo("",Tipo_Dato.BOOLEAN),1,1)
p2 = Primitivo.Primitivo(2,Tipo("",Tipo_Dato.INTEGER),1,1)
a = Arbol([])
suma = Aritmetica.Aritmetica(p1,p2,'+',1,2)
suma.ejecutar(None,a)
reportes.RealizarReportes.RealizarReportes.generar_reporte_lexicos(a.excepciones)
'''
class interfaz():
def __init__(self):
##############################################VENTANA PRINCIPAL####################################
self.window=Tk()
#self.window.configure(background="#04DE5E")
img = PhotoImage(file='img/icons/postgesql2.png')
self.window.tk.call('wm', 'iconphoto', self.window._w, img)
#img = PhotoImage(file='img/icons/Postgresql.ico')
#self.window.tk.call('wm', 'iconphoto', self.window._w, img)
self.window.configure(background="#6a8d92")
self.window.title("Query Tool - Grupo 7")
#w, h = self.window.winfo_screenwidth()/2, self.window.winfo_screenheight()/2
w, h = 1370,670
self.window.geometry("%dx%d+0+0" % (w, h))
##############################################MENU####################################
menu = Menu(self.window)
new_item = Menu(menu,tearoff=0)
new_item.add_command(label='Abrir', command=self.abrir_click)
new_item.add_command(label='Guardar', command=self.guardar_click)
new_item.add_command(label='Guardar Como...', command=self.guardar_como_click)
#new_item.add_separator()
#new_item.add_command(label='Edit')
menu.add_cascade(label='Archivo', menu=new_item)
mnreportes = Menu(menu,tearoff=0)
mnreportes.add_command(label='Tabla de Errores', command=self.tblerrores_click)
mnreportes.add_command(label='Tabla de Simbolos', command=self.tblsimbolos_click)
mnreportes.add_command(label='AST', command=self.ast_click)
mnreportes.add_command(label='Reporte Gramatical', command=self.repDin_click)
menu.add_cascade(label='Reportes', menu=mnreportes)
menu3d = Menu(menu,tearoff=0)
menu3d.add_command(label='Traducir C3D', command=self.traducirc3d_click)
menu3d.add_command(label='Ejecutar C3D', command=self.ejecutarc3d_click)
menu3d.add_command(label='Optimizar C3D', command=self.optimizarc3d_click)
menu.add_cascade(label='3 Direcciones', menu=menu3d)
self.window.config(menu=menu)
##############################################BOTONES####################################
img2 = PhotoImage(file='img/icons/AnalyzeMP.png')
btnanalizar = Button(self.window,image=img2 , bg="#6a8d92",height=35, width=40, command=self.btnanalizar_click)
btnanalizar.place(x=20,y=4)
img3 = PhotoImage(file='img/icons/play32.png')
btnejecutar = Button(self.window,image = img3 , bg="#6a8d92",height=35, width=40,command=self.btnejecutar_click)
btnejecutar.place(x=115,y=5)
##############################################PESTAÑAS####################################
self.tab = ttk.Notebook(self.window)
self.tab.pack(fill='both',padx=20, pady=[50,20])
self.tab_frame =[]
self.txtentrada =[]
self.txtsalida =[]
self.crear_tab("","Nuevo.sql")
lblentrada= Label(self.window,text="Archivo de Entrada:",height=1, width=17,bg='#80b192')
lblentrada.place(x=20,y=80)
lblsalida= Label(self.window,text="Consola de Salida:",height=1, width=15,bg='#80b192')
lblsalida.place(x=20,y=350)
#redimensionar los elementos
#self.window.bind('<Configure>',self.resizeEvent)
#Objeto que almacena el Archivo
self.file=""
self.window.mainloop()
def ejecutar(self):
print("Hello World!")
print("Estoy ejecutando el main")
f = open("./entrada.txt", "r")
input = f.read()
#lista = "" : ""
#insert(database: "world", table: "countries", register: lista)
#print(input)
#parser.parse(input)
#Inserta "Archivo Analizado" en txtsalida
##############################################EVENTO REDIMENSIONAR LA VENTANA####################################
def resizeEvent(self, event):
print(event.width,event.height)
##############################################EVENTOS DE LOS BOTONES DEL MENU####################################
def traducirc3d_click(self):
global arbol
arbol = None
global tablaSym
dropAll()
os.system ("cls")
#Elimina el Contenido de txtsalida
self.txtsalida[self.tab.index("current")].delete(1.0,END)
input=self.txtentrada[self.tab.index("current")].get(1.0,END)
tablaGlobal = Tabla(None)
inst = sintactico.ejecutar_analisis(input)
arbol = Arbol(inst)
resultado = ""
for i in arbol.instrucciones:
res = i.traducir(tablaGlobal,arbol,"")
if isinstance(res, Simbolo3d):
resultado += res.codigo
else:
resultado += res
FuncionesPara3D.FuncionesPara3D.GenerarArchivo(resultado)
tablaSym = tablaGlobal
print("Archivo Traducido")
pass
def ejecutarc3d_click(self):
dropAll()
'''
FuncionesPara3D.FuncionesPara3D.ejecutarsentecia("CREATE DATABASE IF NOT EXISTS test\
OWNER = 'root'\
MODE = 1;")
FuncionesPara3D.FuncionesPara3D.ejecutarsentecia("USE test;")
FuncionesPara3D.FuncionesPara3D.ejecutarsentecia("CREATE TABLE persona (\
idpersona integer NOT NULL primary key,\
nombre varchar(15));")
FuncionesPara3D.FuncionesPara3D.ejecutarsentecia("insert into persona values(1,\"Carlos\");")
FuncionesPara3D.FuncionesPara3D.ejecutarsentecia("insert into persona values(2,\"Maria\");")
FuncionesPara3D.FuncionesPara3D.ejecutarsentecia("insert into persona values(3,\"David\");")
FuncionesPara3D.FuncionesPara3D.ejecutarsentecia("SELECT * FROM persona;")'''
from Codigo_3D import Codigo3D
#c3d.ejecutar()
mensaje = ""
for m in FuncionesPara3D.arbol.consola:
mensaje += m + '\n'
self.txtsalida[self.tab.index("current")].insert(INSERT,mensaje)
pass
def optimizarc3d_click(self):
op = Optimizacion.Optimizacion()
op.Optimizar()
op.GenerarReporte()
pass
def abrir_click(self):
try:
self.file = filedialog.askopenfilename(initialdir= os.path.dirname(__file__))
archivo=open(self.file,"r")
entrada=archivo.read()
archivo.close()
self.crear_tab(entrada,self.file.split("/").pop())
except FileNotFoundError:
messagebox.showwarning("Abrir","No selecciono ningún Archivo.")
except UnicodeDecodeError:
messagebox.showerror('Abrir','El Archivo seleccionado no es admitido.')
def guardar_click(self):
try:
archivo=open(self.file,"w")
archivo.write(self.txtentrada[self.tab.index("current")].get(1.0,END))
messagebox.showinfo('Aviso','Se Guardo el Archivo Correctamente!')
except FileNotFoundError:
messagebox.showerror('Guardar','No abrio ningun Archivo.')
except:
messagebox.showerror("Error","Contacte al Administrador del sistema.")
def guardar_como_click(self):
self.file = filedialog.askdirectory(initialdir= path.dirname(__file__))
archivo=open(self.file+"/"+self.tab.tab(self.tab.select(),"text"),"w")
archivo.write(self.txtentrada[self.tab.index("current")].get(1.0,END))
print(self.file+"/"+self.tab.tab(self.tab.select(),"text"))
print("guardar_como")
def tblerrores_click(self):
if len(sintactico.lista_lexicos)==0:
messagebox.showinfo('Tabla de Errores','La Entrada no Contiene Errores!')
else:
reportes.RealizarReportes.RealizarReportes.generar_reporte_lexicos(sintactico.lista_lexicos)
def tblsimbolos_click(self):
# Función que crea el reporte de tabla de símbolos, recibe como parametro una tabla.
global arbol
global tablaSym
if arbol is not None:
rs.crear_tabla(arbol, tablaSym)
else:
rs.crear_tabla(FuncionesPara3D.arbol, FuncionesPara3D.tablaGlobal)
arbol = None
def ast_click(self):
input=self.txtentrada[self.tab.index("current")].get(1.0,END)
inst = sintacticoGraph.ejecutar_analisis(input)
if len(sintactico.lista_lexicos)>0:
messagebox.showerror('Tabla de Errores','La Entrada Contiene Errores!')
reportes.RealizarReportes.RealizarReportes.generar_reporte_lexicos(sintactico.lista_lexicos)
grafica = graficarArbol.GraphArbol(inst)
grafica.crearArbol()
print("ast")
def repDin_click(self):
global arbol
reportes.RealizarGramatica.RealizarGramatica.generar_reporte_gamatical(arbol.lRepDin)
arbol = None
##############################################EVENTOS DE LOS BOTONES DEL FRAME####################################
def btnanalizar_click(self):
global arbol
arbol = None
global tablaSym
dropAll()
os.system ("cls")
#Elimina el Contenido de txtsalida
self.txtsalida[self.tab.index("current")].delete(1.0,END)
#Inserta "Archivo Analizado" en txtsalida
#self.txtsalida[self.tab.index("current")].insert(INSERT,"Archivo Analizado")
#Selecciona el contenido de txt entrada
#print(self.txtentrada[self.tab.index("current")].get(1.0,END))
input=self.txtentrada[self.tab.index("current")].get(1.0,END)
tablaGlobal = Tabla(None)
inst = sintactico.ejecutar_analisis(input)
arbol = Arbol(inst)
if len(sintactico.lista_lexicos)>0:
messagebox.showerror('Tabla de Errores','La Entrada Contiene Errores!')
reportes.RealizarReportes.RealizarReportes.generar_reporte_lexicos(sintactico.lista_lexicos)
# Ciclo que recorrerá todas las instrucciones almacenadas por la gramática.
arbol.lRepDin.append("<init> ::= <instrucciones>")
arbol.lRepDin.append("<instrucciones> ::= <instrucciones> <instruccion>")
arbol.lRepDin.append("<instrucciones> ::= <instruccion>")
for i in arbol.instrucciones:
# La variable resultado nos permitirá saber si viene un return, break o continue fuera de sus entornos.
resultado = i.ejecutar(tablaGlobal,arbol)
# Después de haber ejecutado todas las instrucciones se verifica que no hayan errores semánticos.
if len(arbol.excepciones) != 0:
reportes.RealizarReportes.RealizarReportes.generar_reporte_lexicos(arbol.excepciones)
# Ciclo que imprimirá todos los mensajes guardados en la variable consola.
tablaSym = tablaGlobal
mensaje = ''
for m in arbol.consola:
mensaje += m + '\n'
self.txtsalida[self.tab.index("current")].insert(INSERT,mensaje)
def btnejecutar_click(self):
print("se va ejecutar el archivo")
##############################################CREA PESTAÑAS EN EL TAB####################################
def crear_tab(self,entrada,nombre):
self.tab_frame.append(Frame(self.tab,width=200, height=700,background="#80b192"))
self.tab_frame[-1].pack(fill='both', expand=1)
self.tab_frame[-1].config(bd=5)
self.tab.add(self.tab_frame[-1],text=nombre)
self.txtentrada.append(scrolledtext.ScrolledText(self.tab_frame[-1],width=162,height=15))
self.txtentrada[-1].place(x=0,y=25)
self.txtentrada[-1].insert(INSERT,entrada+"")
#self.txtentrada[-1].bind("<MouseWheel>", self.OnMouseWheel)
self.txtsalida.append(scrolledtext.ScrolledText(self.tab_frame[-1],width=162,height=15,background="#070707",foreground="#FEFDFD"))
self.txtsalida[-1].place(x=0,y=298)
#nombre del archivo
#print(self.tab.tab(self.tab.select(),"text"))
self.tab.select(int(len(self.tab_frame)-1))
#self.txtsalida[-1].insert(INSERT,entrada+"")
#def OnMouseWheel(self,event):
# print("scrool mouse")
def main():
mi_app = interfaz()
return(0)
if __name__ == '__main__':
main() |
the-stack_0_16788 | # Copyright 2017-2019 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import codecs
import collections
import csv
import datetime
import os
import re
import shutil
import stat
import jinja2
import six
import yaml
from guild import guildfile
from guild import index2 as indexlib
from guild import run_util
from guild import util
DEFAULT_DEST_HOME = "published-runs"
DEFAULT_TEMPLATE = "default"
COPY_DEFAULT_FILES = 1
COPY_ALL_FILES = 2
class PublishError(Exception):
pass
class TemplateError(PublishError):
def __init__(self, e):
super(TemplateError, self).__init__(e)
self._e = e
def __str__(self):
e = self._e
msg = e.filename
if hasattr(e, "lineno"):
msg += ":" + str(e.lineno)
if e.message:
msg += ": " + e.message
return msg
class GenerateError(PublishError):
def __init__(self, e, template):
super(GenerateError, self).__init__(e)
self._e = e
self._template = template
def __str__(self):
return "%s: %s" % (
_format_template_files(self._template),
self._e.message)
def _format_template_files(t):
if len(t.files) == 1:
basename = t.files[0]
else:
basename = "{%s}" % ",".join(sorted(t.files))
return os.path.join(t.path, basename)
class RunFilters(object):
IMG_PATTERN = re.compile(
r"\.(png|gif|jpe?g|tiff?|bmp|webp)",
re.IGNORECASE)
def __init__(self, run_dest):
self.run_dest = run_dest
def install(self, env):
env.filters.update({
"csv_dict_rows": self.csv_dict_rows,
"empty": self.empty,
"file_size": self.file_size,
"flag_val": self.flag_val,
"nbhyph": self.nbhyph,
"nbsp": self.nbsp,
"runfile_link": self.runfile_link,
"scalar_key": self.scalar_key,
"short_id": self.short_id,
"utc_date": self.utc_date,
})
@staticmethod
def empty(val):
if val in (None, "") or isinstance(val, jinja2.Undefined):
return ""
return val
@staticmethod
def flag_val(val):
if isinstance(val, jinja2.Undefined):
return ""
return run_util.format_attr(val)
def runfile_link(self, path):
if self.run_dest is None:
raise TemplateError(
"runfile_link cannot be used in this context "
"(not publishing a run")
if not isinstance(path, six.string_types):
return ""
maybe_runfile = os.path.join(self.run_dest, "runfiles", path)
if os.path.isfile(maybe_runfile):
return "runfiles/" + path
return None
@staticmethod
def utc_date(val, unit="s"):
if not isinstance(val, (int, float) + six.string_types):
return ""
try:
val = int(val)
except (ValueError, TypeError):
return ""
else:
if unit == "s":
ts = val * 1000000
elif unit == "ms":
ts = val * 1000
elif unit == "us":
ts = val
else:
raise ValueError(
"unsupported unit %r (expected s, ms, or us)"
% unit)
return util.utcformat_timestamp(ts)
@staticmethod
def file_size(val):
if not isinstance(val, (int, float) + six.string_types):
return ""
try:
bytes = int(val)
except (ValueError, TypeError):
return ""
else:
return util.format_bytes(bytes)
@staticmethod
def scalar_key(s):
return run_util.run_scalar_key(s)
@staticmethod
def csv_dict_rows(csv_rows):
keys = csv_rows[0]
return [dict(zip(keys, row)) for row in csv_rows[1:]]
@staticmethod
def nbsp(x):
if not x:
return " "
return x
@staticmethod
def short_id(id):
if not isinstance(id, six.string_types):
return ""
return id[:8]
@staticmethod
def nbhyph(s):
if not s:
return s
return s.replace("-", "‑")
class Template(object):
def __init__(self, path, run_dest=None, filters=None):
if not os.path.exists(path):
raise RuntimeError("invalid template source: %s" % path)
self.path = path
self._file_templates = sorted(
_init_file_templates(path, run_dest, filters))
@property
def files(self):
return [t[0] for t in self._file_templates]
def generate(self, dest, vars):
util.ensure_dir(dest)
for relpath, src, template in self._file_templates:
file_dest = os.path.join(dest, relpath)
util.ensure_dir(os.path.dirname(file_dest))
if template is None:
shutil.copyfile(src, file_dest)
else:
_render_template(template, vars, file_dest)
def _init_file_templates(path, run_dest=None, filters=None):
ts = []
for root, _dirs, files in os.walk(path):
for name in files:
if name[:1] == "_":
continue
abspath = os.path.join(root, name)
relpath = os.path.relpath(abspath, path)
template = _init_file_template(abspath, run_dest, filters)
ts.append((relpath, abspath, template))
return ts
def _init_file_template(path, run_dest=None, filters=None):
if not util.is_text_file(path):
return None
dirname, basename = os.path.split(path)
templates_home = _local_path("templates")
env = jinja2.Environment(
loader=jinja2.FileSystemLoader([dirname, templates_home]),
autoescape=jinja2.select_autoescape(['html', 'xml']))
RunFilters(run_dest).install(env)
if filters:
env.filters.update(filters)
try:
return env.get_template(basename)
except jinja2.TemplateError as e:
raise TemplateError(e)
def _render_template(template, vars, dest):
with open(dest, "w") as f:
for part in template.generate(vars):
f.write(part)
f.write(os.linesep)
PublishRunState = collections.namedtuple(
"PublishRunState", [
"run",
"opdef",
"copy_files",
"include_links",
"formatted_run",
"dest_home",
"template",
"run_dest",
"md5s",
])
def publish_run(run, dest=None, template=None, copy_files=None,
include_links=False, md5s=True, formatted_run=None):
state = _init_publish_run_state(
run,
dest,
template,
copy_files,
include_links,
md5s,
formatted_run)
_init_published_run(state)
_publish_run_guild_files(state)
_copy_sourcecode(state)
_copy_runfiles(state)
_generate_template(state)
def _init_publish_run_state(run, dest, template, copy_files, include_links,
md5s, formatted_run):
dest_home = dest or DEFAULT_DEST_HOME
opdef = _run_opdef(run)
run_dest = _published_run_dest(dest_home, run)
template = _init_template(template, opdef, run_dest)
if not formatted_run:
formatted_run = _format_run_for_publish(run)
return PublishRunState(
run,
opdef,
copy_files,
include_links,
formatted_run,
dest_home,
template,
run_dest,
md5s,
)
def _run_opdef(run):
try:
gf = guildfile.for_run(run)
except (guildfile.NoModels, TypeError):
return None
else:
assert run.opref, run.path
try:
m = gf.models[run.opref.model_name]
except KeyError:
return None
else:
return m.get_operation(run.opref.op_name)
def _init_template(template, opdef, run_dest):
template_spec = util.find_apply([
lambda: template,
lambda: _opdef_template(opdef)
])
template_path = _find_template(template_spec, opdef)
return Template(template_path, run_dest)
def _opdef_template(opdef):
return util.find_apply([
lambda: _opdef_publish_template(opdef),
lambda: DEFAULT_TEMPLATE
])
def _opdef_publish_template(opdef):
if not opdef or not opdef.publish:
return None
return opdef.publish.template
def _find_template(name, opdef):
return util.find_apply([
lambda: _abs_template(name),
lambda: _default_template(name),
lambda: _project_template(name, opdef),
lambda: _cannot_find_template_error(name)])
def _abs_template(name):
if name[:1] == "." and os.path.exists(name):
return name
return None
def _default_template(name):
if name == "default":
return _local_path("templates/publish-default")
return None
def _local_path(path):
return os.path.join(os.path.dirname(__file__), path)
def _project_template(name, opdef):
path = os.path.join(opdef.guildfile.dir, name)
if os.path.exists(path):
return path
return None
def _cannot_find_template_error(name):
raise PublishError("cannot find template %s" % name)
def _published_run_dest(dest_home, run):
return os.path.join(dest_home, run.id)
def _format_run_for_publish(run):
frun = run_util.format_run(run)
if not frun["stopped"]:
frun["duration"] = ""
return frun
def _init_published_run(state):
"""Ensure empty target directory for published run.
As a side effect, lazily creates `state.dest_home` and creates
`.guild-nocopy` to ensure that the published runs home is not
considered by Guild for source snapshots.
"""
util.ensure_dir(state.dest_home)
util.touch(os.path.join(state.dest_home, ".guild-nocopy"))
if os.path.exists(state.run_dest):
util.safe_rmtree(state.run_dest)
os.mkdir(state.run_dest)
def _publish_run_guild_files(state):
_publish_run_info(state)
_publish_flags(state)
_publish_scalars(state)
_publish_output(state)
_publish_sourcecode_list(state)
_publish_runfiles_list(state)
def _publish_run_info(state):
"""Write run.yml to run publish dest.
This function should be kept in sync with output generated by
`guild runs info` - minus system-specific values (e.g. run_dir and
pid) and flags (which are written to a separate file).
"""
run = state.run
frun = state.formatted_run
path = os.path.join(state.run_dest, "run.yml")
encode = lambda x: util.encode_yaml(x).rstrip()
fmt_ts = util.utcformat_timestamp
started = run.get("started")
stopped = run.get("stopped")
with codecs.open(path, "w", "utf-8") as f:
f.write("id: %s\n" % run.id)
f.write("operation: %s\n" % encode(frun["operation"]))
f.write("status: %s\n" % encode(frun["status"]))
f.write("started: %s\n" % fmt_ts(started))
f.write("stopped: %s\n" % fmt_ts(stopped))
f.write("time: %s\n" % _format_time(started, stopped))
f.write("marked: %s\n" % encode(frun["marked"]))
f.write("label: %s\n" % encode(run.get("label")))
f.write("command: %s\n" % encode(frun["command"]))
f.write("exit_status: %s\n" % encode(frun["exit_status"]))
def _format_time(started, stopped):
if started and stopped:
return util.format_duration(started, stopped)
return ""
def _publish_flags(state):
flags = state.run.get("flags") or {}
dest = os.path.join(state.run_dest, "flags.yml")
_save_yaml(flags, dest)
def _save_yaml(val, path):
with open(path, "w") as f:
yaml.safe_dump(
val, f,
default_flow_style=False,
indent=2,
encoding="utf-8",
allow_unicode=True)
def _publish_scalars(state):
cols = [
"prefix",
"tag",
"count",
"total",
"avg_val",
"first_val",
"first_step",
"last_val",
"last_step",
"min_val",
"min_step",
"max_val",
"max_step",
]
dest = os.path.join(state.run_dest, "scalars.csv")
scalars = _run_scalars(state)
with open(dest, "w") as f:
out = csv.writer(f, lineterminator="\n")
out.writerow(cols)
for s in scalars:
out.writerow([s[col] for col in cols])
def _run_scalars(state):
index = indexlib.RunIndex()
index.refresh([state.run], ["scalar"])
return list(index.run_scalars(state.run))
def _publish_output(state):
src = state.run.guild_path("output")
if os.path.isfile(src):
dest = os.path.join(state.run_dest, "output.txt")
shutil.copyfile(src, dest)
def _publish_sourcecode_list(state):
src = state.run.guild_path("sourcecode")
dest = os.path.join(state.run_dest, "sourcecode.csv")
paths = _dir_paths(src, skip_guildfiles=True)
with open(dest, "w") as f:
_write_paths_csv(paths, src, state.md5s, f)
def _dir_paths(dir, skip_guildfiles=False):
seen = set()
paths = []
for root, dirs, names in os.walk(dir, followlinks=True):
if skip_guildfiles:
_remove_guild_dir(dirs)
for name in dirs + names:
path = os.path.join(root, name)
abs_path = os.path.abspath(path)
if abs_path in seen:
continue
seen.add(abs_path)
paths.append(path)
paths.sort()
return paths
def _remove_guild_dir(dirs):
try:
dirs.remove(".guild")
except ValueError:
pass
def _write_paths_csv(paths, root, md5s, f):
out = csv.writer(f, lineterminator="\n")
out.writerow(["path", "type", "size", "mtime", "md5"])
for path in paths:
out.writerow(_path_row(path, root, md5s))
def _path_row(path, root, md5):
try:
st = os.stat(path)
except OSError:
st = None
try:
lst = os.lstat(path)
except OSError:
lst = None
return [
os.path.relpath(path, root),
_path_type(st, lst),
st.st_size if st else "",
_path_mtime(st),
_path_md5(path, st) if md5 else "",
]
def _path_type(st, lst):
parts = []
if st:
if stat.S_ISREG(st.st_mode):
parts.append("file")
elif stat.S_ISDIR(st.st_mode):
parts.append("dir")
else:
parts.append("other")
if lst:
if stat.S_ISLNK(lst.st_mode):
parts.append("link")
return " ".join(parts)
def _path_mtime(st):
if not st:
return ""
return int((st.st_mtime + _utc_offset()) * 1000000)
def _utc_offset():
try:
return globals()["__utc_offset"]
except KeyError:
globals()["__utc_offset"] = offset = int(round(
(datetime.datetime.now() -
datetime.datetime.utcnow()).total_seconds()))
return offset
def _path_md5(path, st):
if not st or not stat.S_ISREG(st.st_mode):
return ""
return util.file_md5(path)
def _publish_runfiles_list(state):
dest = os.path.join(state.run_dest, "runfiles.csv")
paths = _dir_paths(state.run.dir, skip_guildfiles=True)
with open(dest, "w") as f:
_write_paths_csv(paths, state.run.dir, state.md5s, f)
def _copy_sourcecode(state):
src = state.run.guild_path("sourcecode")
if not os.path.isdir(src):
return
dest = os.path.join(state.run_dest, "sourcecode")
shutil.copytree(src, dest)
class PublishRunVars(object):
def __init__(self, state):
self._state = state
self._cache = {}
self._keys = [
"flags",
"output",
"run",
"runfiles",
"scalars",
"sourcecode",
]
def keys(self):
return self._keys
def __getitem__(self, name):
try:
return self._cache[name]
except KeyError:
self._cache[name] = val = self._load(name)
return val
def _load(self, name):
return util.find_apply([
self._load_yaml,
self._load_csv,
self._load_txt], name)
def _load_yaml(self, name):
path = os.path.join(self._state.run_dest, name + ".yml")
if not os.path.exists(path):
return None
return yaml.safe_load(open(path, "r"))
def _load_csv(self, name):
path = os.path.join(self._state.run_dest, name + ".csv")
if not os.path.exists(path):
return None
with open(path, "r") as f:
return list(csv.reader(f))
def _load_txt(self, name):
path = os.path.join(self._state.run_dest, name + ".txt")
if not os.path.exists(path):
return None
return open(path, "r").read()
class CopyRunFilesFilter(object):
def __init__(self, state):
self._run_dir = state.run.dir
self._include_links = state.include_links
def delete_excluded_dirs(self, root, dirs):
self._delete_guild_dir(dirs)
self._maybe_delete_links(root, dirs)
@staticmethod
def _delete_guild_dir(dirs):
try:
dirs.remove(".guild")
except ValueError:
pass
def _maybe_delete_links(self, root, dirs):
if self._include_links:
return
for name in list(dirs):
if os.path.islink(os.path.join(root, name)):
dirs.remove(name)
def default_select_path(self, path):
if os.path.islink(path):
return self._include_links
return True
@staticmethod
def pre_copy(_to_copy):
pass
def _copy_runfiles(state):
if not state.copy_files:
return
util.select_copytree(
state.run.dir,
_runfiles_dest(state),
_copy_runfiles_config(state),
CopyRunFilesFilter(state))
def _runfiles_dest(state):
return os.path.join(state.run_dest, "runfiles")
def _copy_runfiles_config(state):
if state.copy_files == COPY_ALL_FILES or not state.opdef:
return []
return [state.opdef.publish.files]
def _generate_template(state):
template = state.template
render_vars = PublishRunVars(state)
try:
template.generate(state.run_dest, render_vars)
except jinja2.TemplateRuntimeError as e:
raise GenerateError(e, template)
except jinja2.exceptions.TemplateNotFound as e:
e.message = "template not found: %s" % e.message
raise GenerateError(e, template)
def _template_config(opdef):
if not opdef or not opdef.publish:
return {}
config = opdef.publish.get("config") or {}
return {
name.replace("-", "_"): val
for name, val in config.items()
}
def refresh_index(dest):
dest_home = dest or DEFAULT_DEST_HOME
index_template_path = _local_path("templates/runs-index/README.md")
index_template = _init_file_template(index_template_path)
assert index_template, index_template_path
index_path = os.path.join(dest_home, "README.md")
runs = _published_runs(dest_home)
_render_template(index_template, {"runs": runs}, index_path)
def _published_runs(dest_home):
runs = []
for name in os.listdir(dest_home):
run_yml = os.path.join(dest_home, name, "run.yml")
if not os.path.exists(run_yml):
continue
info = yaml.safe_load(open(run_yml, "r"))
runs.append(info)
return sorted(runs, key=lambda run: run.get("started"), reverse=True)
|
the-stack_0_16791 | import numpy
import pytest
import chainer
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
from chainerx_tests import op_utils
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('a_shape,b_shape', [
((), ()),
((), (2, 3)),
((0, 2), (2, 0)),
((2, 0), (0, 3)),
((0, 0), (0, 0)),
((2, 3), (3, 4)),
((1, 2, 3), (3, 4)),
((1, 2, 0), (0, 4)),
((1, 0, 3), (3, 0)),
((1, 0, 3), (3, 4)),
((1, 2, 3), (3, 0)),
((1, 2), (1, 2, 3)),
((1, 0), (1, 0, 3)),
((0, 2), (1, 2, 0)),
((0, 2), (1, 2, 3)),
((1, 2), (1, 2, 0)),
((4, 5, 2), (3, 2, 5)),
((2, 3, 4, 4), (3, 4, 2)),
((2, 2, 3, 1), (2, 1, 3, 1, 4)),
((2, 4, 3), (1, 2, 3, 2)),
((1, 2, 3, 0), (4, 0, 5)),
((1, 2, 0, 3), (4, 3, 0)),
((1, 2, 0, 3), (4, 3, 5))
])
@chainer.testing.parameterize_pytest(
'in_dtypes,chx_expected_dtype', dtype_utils.result_dtypes_two_arrays)
@chainer.testing.parameterize_pytest('is_module', [True, False])
class TestDot(op_utils.NumpyOpTest):
def setup(self):
device = chainerx.get_default_device()
a_dtype, b_dtype = self.in_dtypes
a_kind = numpy.dtype(a_dtype).kind
b_kind = numpy.dtype(b_dtype).kind
# TODO(beam2d): Remove the skip after supporting non-float dot on CUDA
if device.name == 'cuda:0' and (a_kind != 'f' and b_kind != 'f'):
pytest.skip('non-float dot is not supported on CUDA')
# Skip backward/double-backward tests for int dtypes
if a_kind != 'f' or b_kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
# Skip backward/double-backward tests if the output will be
# disconnected.
# TODO(niboshi): Remove this skip condition after enabling backward()
# for such cases.
if self.a_shape and self.a_shape[-1] == 0:
self.skip_backward_test = True
self.skip_double_backward_test = True
if a_dtype == 'float16' or b_dtype == 'float16':
self.check_forward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
def generate_inputs(self):
a_dtype, b_dtype = self.in_dtypes
a_shape = self.a_shape
b_shape = self.b_shape
a = numpy.random.uniform(-1, 1, a_shape).astype(a_dtype)
b = numpy.random.uniform(-1, 1, b_shape).astype(b_dtype)
return a, b
def forward_xp(self, inputs, xp):
a, b = inputs
if self.is_module:
y = xp.dot(a, b)
else:
y = a.dot(b)
y = dtype_utils.cast_if_numpy_array(xp, y, self.chx_expected_dtype)
return y,
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(chainerx.DimensionError, ValueError))
@pytest.mark.parametrize('a_shape,b_shape', [
((3, 2), (1, 3)),
((4, 3, 2, 5), (6, 4, 1, 2))
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_dot_invalid(is_module, xp, device, a_shape, b_shape, dtype):
# TODO(beam2d): Remove the skip after supporting non-float dot on CUDA
if device.name == 'cuda:0' and numpy.dtype(dtype).kind != 'f':
return chainerx.testing.ignore()
a = array_utils.create_dummy_ndarray(xp, a_shape, dtype)
b = array_utils.create_dummy_ndarray(xp, b_shape, dtype)
if is_module:
return xp.dot(a, b)
else:
return a.dot(b)
class NumpyLinalgOpTest(op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def setup(self):
device = chainerx.get_default_device()
if (device.backend.name == 'native'
and not chainerx.linalg._is_lapack_available()):
pytest.skip('LAPACK is not linked to ChainerX')
self.check_backward_options.update({'rtol': 5e-3})
self.check_double_backward_options.update({'rtol': 5e-3})
_numpy_does_not_support_0d_input113 = \
numpy.lib.NumpyVersion(numpy.__version__) < '1.13.0'
_numpy_does_not_support_0d_input116 = \
numpy.lib.NumpyVersion(numpy.__version__) < '1.16.0'
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(0, 0), (1, 1), (3, 3), (6, 6)],
'b_columns': [(), (1,), (3,), (4,)],
'dtypes': [
('float32', 'float32'),
('float64', 'float64'),
('float64', 'float32'),
('float32', 'float64')]
})
))
class TestSolve(NumpyLinalgOpTest):
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtypes[0])
b = numpy.random.random(
(self.shape[0], *self.b_columns)).astype(self.dtypes[1])
return a, b
def forward_xp(self, inputs, xp):
a, b = inputs
out = xp.linalg.solve(a, b)
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(2, 3), (3, 2)],
'dtype': ['float32', 'float64']
})
))
class TestSolveFailing(NumpyLinalgOpTest):
forward_accept_errors = (numpy.linalg.LinAlgError,
chainerx.DimensionError)
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
b = numpy.random.random(self.shape).astype(self.dtype)
return a, b
def forward_xp(self, inputs, xp):
a, b = inputs
out = xp.linalg.solve(a, b)
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape', [(3, 3)])
@chainer.testing.parameterize_pytest('dtype', ['float16'])
class TestSolveDtypeFailing(NumpyLinalgOpTest):
forward_accept_errors = (TypeError,
chainerx.DtypeError)
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
b = numpy.random.random(self.shape).astype(self.dtype)
return a, b
def forward_xp(self, inputs, xp):
a, b = inputs
out = xp.linalg.solve(a, b)
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(0, 0), (1, 1), (3, 3), (6, 6)],
'dtype': ['float32', 'float64']
})
))
class TestInverse(NumpyLinalgOpTest):
# For zero sized input strides are different
check_numpy_strides_compliance = False
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
out = xp.linalg.inv(a)
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(2, 3), (3, 2)],
'dtype': ['float32', 'float64']
})
))
class TestInverseFailing(NumpyLinalgOpTest):
forward_accept_errors = (numpy.linalg.LinAlgError,
chainerx.DimensionError)
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
out = xp.linalg.inv(a)
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape', [(3, 3)])
@chainer.testing.parameterize_pytest('dtype', ['float16'])
class TestInverseDtypeFailing(NumpyLinalgOpTest):
forward_accept_errors = (TypeError,
chainerx.DtypeError)
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
out = xp.linalg.inv(a)
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(0, 0), (0, 3), (3, 0), (1, 1), (2, 3), (3, 2), (6, 6)],
'dtype': ['float32', 'float64'],
'full_matrices': [False],
'compute_uv': [True]
}) + chainer.testing.product({
'shape': [(0, 0), (0, 3), (3, 0), (1, 1), (2, 3), (3, 2), (6, 6)],
'dtype': ['float32', 'float64'],
'full_matrices': [True],
'compute_uv': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestSVD(NumpyLinalgOpTest):
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
if (_numpy_does_not_support_0d_input116 and a.size == 0):
pytest.skip('Older NumPy versions do not work with empty arrays')
out = xp.linalg.svd(a,
full_matrices=self.full_matrices,
compute_uv=self.compute_uv)
# NOTE: cuSOLVER's (CuPy's) and NumPy's outputs of u and v might
# differ in signs, which is not a problem mathematically
if self.compute_uv:
u, s, v = out
return xp.abs(u), s, xp.abs(v)
else:
s = out
return s,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape', [(2, 3)])
@chainer.testing.parameterize_pytest('dtype', ['float16'])
class TestSVDDtypeFailing(NumpyLinalgOpTest):
forward_accept_errors = (TypeError,
chainerx.DtypeError)
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
out = xp.linalg.svd(a)
u, s, v = out
return xp.abs(u), s, xp.abs(v)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(0, 0), (0, 3), (3, 0), (1, 1), (2, 3), (3, 2), (6, 6)],
'rcond': [1e-15, 0.3, 0.5, 0.6],
'dtype': ['float32', 'float64']
})
))
class TestPseudoInverse(NumpyLinalgOpTest):
# For zero sized input strides are different
check_numpy_strides_compliance = False
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
a = a * 10 + numpy.ones(self.shape)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
if (_numpy_does_not_support_0d_input113 and a.size == 0):
pytest.skip('Older NumPy versions do not work with empty arrays')
out = xp.linalg.pinv(a, rcond=self.rcond)
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(), ],
'rcond': [1e-15, ],
'dtype': ['float32', 'float64']
})
))
class TestPseudoInverseFailing(NumpyLinalgOpTest):
forward_accept_errors = (numpy.linalg.LinAlgError,
chainerx.ChainerxError,
chainerx.DimensionError)
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
out = xp.linalg.pinv(a, rcond=self.rcond)
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape', [(2, 3)])
@chainer.testing.parameterize_pytest('dtype', ['float16'])
class TestPseudoInverseDtypeFailing(NumpyLinalgOpTest):
forward_accept_errors = (TypeError,
chainerx.DtypeError)
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
out = xp.linalg.pinv(a)
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# backward for 'r', 'raw' modes is not implemented
chainer.testing.product({
'shape': [(0, 3), (3, 0), (1, 1), (2, 3), (3, 2), (6, 6)],
'in_dtypes': ['float32', 'float64'],
'mode': ['r', 'raw'],
'skip_backward_test': [True],
'skip_double_backward_test': [True]
}) +
# backward for non-square `R` is not implemented
chainer.testing.product({
'shape': [(0, 3), (3, 0), (2, 3), (3, 2)],
'in_dtypes': ['float32', 'float64'],
'mode': ['complete', 'reduced'],
'skip_backward_test': [True],
'skip_double_backward_test': [True]
}) +
chainer.testing.product({
'shape': [(1, 1), (6, 6)],
'in_dtypes': ['float32', 'float64'],
'mode': ['reduced', 'complete']
}) + chainer.testing.product({
'shape': [(3, 2)],
'in_dtypes': ['float32', 'float64'],
'mode': ['reduced']
})
))
class TestQR(NumpyLinalgOpTest):
# For input with shape (N, 0) strides are different
check_numpy_strides_compliance = False
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.in_dtypes)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
if (numpy.lib.NumpyVersion(numpy.__version__) < '1.16.0'
and a.size == 0):
pytest.skip('Older NumPy versions do not work with empty arrays')
out = xp.linalg.qr(a, mode=self.mode)
if self.mode == 'r':
r = out
return r,
if self.mode == 'raw':
if a.dtype.char == 'f':
return out[0].astype(xp.float64), out[1].astype(xp.float64)
return out
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(1, 1), (2, 3), (3, 2), (6, 6)],
'in_dtypes': ['float16'],
'mode': ['r', 'raw', 'reduced', 'complete']
})
))
class TestQRFailing(NumpyLinalgOpTest):
forward_accept_errors = (TypeError,
chainerx.DtypeError)
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.in_dtypes)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
out = xp.linalg.qr(a, mode=self.mode)
return out
|
the-stack_0_16794 | import pandas as pd
import numpy as np
from scipy.spatial import cKDTree
import scipy.sparse as sp
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import shortest_path
# from sklearn.preprocessing import OrdinalEncoder
from ..tools.Markov import DiscreteTimeMarkovChain
from ..prediction.fate import _fate
from ..vectorfield import vector_field_function
from ..tools.utils import fetch_states
from ..tools.clustering import neighbors
from .utils import (
remove_redundant_points_trajectory,
arclength_sampling,
integrate_streamline,
)
import anndata
from typing import List, Union
from ..dynamo_logger import LoggerManager, main_info, main_warning
def classify_clone_cell_type(adata, clone, clone_column, cell_type_column, cell_type_to_excluded):
"""find the dominant cell type of all the cells that are from the same clone"""
cell_ids = np.where(adata.obs[clone_column] == clone)[0]
to_check = adata[cell_ids].obs[cell_type_column].value_counts().index.isin(list(cell_type_to_excluded))
cell_type = np.where(to_check)[0]
return cell_type
def prune_transition(
adata: anndata.AnnData,
group: str,
basis: str = "umap",
n_neighbors: int = 30,
neighbor_key: Union[str, None] = None,
graph_mat: np.ndarray = None,
state_graph_method: str = "vf",
):
"""This function prune a cell group transiton graph based on cell similarity graph (kNN graph).
The pruning algorithm is as following: assuming the vf based cell-type transition graph is `m` (cell type x cell
type matrix); the `M` matrix as the cell to cell-type assignment matrix (row is the cell and column the cell type;
if i-th cell is j-th cell type, the `M_{ij}` is 1). the knn graph between cells based on the umap embedding (or
others) is `n` (number of cells x number of cells matrix). We compute `t(M) n M` to get a cell-type by cell type
connectivity graph M' (basically this propagates the cell type to cell matrix to the cell-cell knn graph and then
lump the transition down to cell-type). Lastly, `g * M'` will give pruned graph, where `g` is the vector field
based cell-type transition graph. As you can see the resultant graph considers both vector field based connection
and the similarity relationship of cells in expression space.
Parameters
----------
adata:
AnnData object.
group:
Cell graph that will be used to build transition graph and lineage tree.
basis:
The basis that will be used to build the k-nearest neighbor graph when neighbor_key is not set.
n_neighbors:
The number of neighbors that will be used to build the k-nn graph, passed to `dyn.tl.neighbors` function. Not
used when neighbor_key provided.
neighbor_key:
The nearest neighbor graph key in `adata.obsp`. This nearest neighbor graph will be used to build a
gene-expression space based cell-type level connectivity graph.
state_graph_method:
Method that will be used to build the initial state graph.
Returns
-------
M:
The pruned cell state transition graph.
"""
logger = LoggerManager.gen_logger("dynamo-prune_transition")
logger.log_time()
from patsy import dmatrix
if group not in adata.obs.columns:
raise Exception(f"group has to be in adata.obs.columns, but you have {group}. ")
data = adata.obs
groups = data[group]
uniq_grps, data[group] = groups.unique(), list(groups)
sorted_grps = np.sort(uniq_grps)
if graph_mat is not None:
if graph_mat.shape != (len(uniq_grps), len(uniq_grps)):
raise Exception(f"the input graph_mat has to have the same shape as ({len(uniq_grps), len(uniq_grps)})")
group_graph = graph_mat
else:
if group + "_graph" not in adata.uns_keys():
main_info(f"build state graph `g` via {state_graph_method}")
state_graph(adata, group=group, basis=basis, method=state_graph_method) # the markov method
group_graph = adata.uns[group + "_graph"]["group_graph"]
if neighbor_key is None:
main_info(f"build knn graph with {n_neighbors} neighbors in {basis} basis.")
neighbors(adata, basis=basis, result_prefix=basis + "_knn", n_neighbors=n_neighbors)
transition_matrix = adata.obsp[basis + "_knn_distances"]
else:
main_info(f"retrieve knn graph via {neighbor_key} ley.")
transition_matrix = adata.obsp[neighbor_key]
main_info("build cell to cell graph assignment matrix via `dmatrix` from `pasty`")
cell_membership = csr_matrix(dmatrix(f"~{group}+0", data=data))
main_info("build lumped cell group to cell group connectivity matrix via `t(M) n M`.")
membership_matrix = cell_membership.T.dot(transition_matrix).dot(cell_membership)
main_info("prune vf based cell graph transition graph via g' = `M' g")
# note that dmatrix will first sort the unique group names and then construct the design matrix, so this is needed.
membership_df = pd.DataFrame(membership_matrix.A > 0, index=sorted_grps, columns=sorted_grps)
M = (group_graph * (membership_df.loc[uniq_grps, uniq_grps].values > 0) > 0).astype(float)
logger.finish_progress(progress_name="prune_transition")
return M
def state_graph(
adata,
group,
method="vf",
transition_mat_key="pearson_transition_matrix",
approx=False,
eignum=5,
basis="umap",
layer=None,
arc_sample=False,
sample_num=100,
prune_graph=False,
**kwargs,
):
"""Estimate the transition probability between cell types using method of vector field integrations or Markov chain
lumping.
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object that will be used to calculate a cell type (group) transition graph.
group: `str`
The attribute to group cells (column names in the adata.obs).
method: `str` (default: 'vf')
The method that will be used to construct lumped cell state graph. Must be one of {`vf` or `markov`}
transition_mat_key: `str` (default: 'pearson_transition_matrix')
The key that corresponds to the transition graph used in the KernelMarkovChain class for lumping.
approx: `bool` (default: False)
Whether to use streamplot to get the integration lines from each cell.
eignum: `int` (default: 5)
The number of eigen-vectors when performing the eigen-decomposition to obtain the stationary
distribution. 5 should be sufficient as the stationary distribution will be the first eigenvector. This also
accelerates the calculation.
basis: `str` or None (default: `umap`)
The embedding data to use for predicting cell fate. If `basis` is either `umap` or `pca`, the reconstructed
trajectory will be projected back to high dimensional space via the `inverse_transform` function.
layer: `str` or None (default: `None`)
Which layer of the data will be used for predicting cell fate with the reconstructed vector field function.
The layer once provided, will override the `basis` argument and then predicting cell fate in high
dimensional space.
sample_num: `int` (default: 100)
The number of cells to sample in each group that will be used for calculating the transitoin graph between
cell groups. This is required for facilitating the calculation.
prune_graph: `bool` (default: `False`)
Whether to prune the transition graph based on cell similarities in `basis` bases.
kwargs:
Additional parameters that will be passed to `prune_transition` function.
Returns
-------
An updated adata object that is added with the `group + '_graph'` key, including the transition graph
and the average transition time.
"""
logger = LoggerManager.get_main_logger()
timer_logger = LoggerManager.get_temp_timer_logger()
timer_logger.log_time()
logger.info("Estimating the transition probability between cell types...")
groups, uniq_grp = adata.obs[group], list(adata.obs[group].unique())
if method.lower() in ["naive", "markov"]:
logger.info("Applying kernel Markov chain")
T = adata.obsp[transition_mat_key]
if np.isclose(T.sum(1), 1).sum() > np.isclose(T.sum(0), 1).sum():
logger.info("KernelMarkovChain assuming column sum to be 1. Transposing transition matrix")
T = T.T
if sp.issparse(T):
T = T.A
dtmc = DiscreteTimeMarkovChain(P=T, eignum=eignum, check_norm=False)
# ord_enc = OrdinalEncoder()
# labels = ord_enc.fit_transform(adata.obs[[group]])
# labels = labels.flatten().astype(int)
labels = np.zeros(len(groups), dtype=int)
for i, grp in enumerate(uniq_grp):
labels[groups == grp] = i
grp_graph = dtmc.lump(labels).T if method == "markov" else dtmc.naive_lump(T.A, labels).T
label_len, grp_avg_time = len(np.unique(labels)), None
grp_graph = grp_graph[:label_len, :label_len]
elif method == "vf":
logger.info("Applying vector field")
grp_graph = np.zeros((len(uniq_grp), len(uniq_grp)))
grp_avg_time = np.zeros((len(uniq_grp), len(uniq_grp)))
all_X, VecFld, t_end, _ = fetch_states(
adata,
init_states=None,
init_cells=adata.obs_names,
basis=basis,
layer=layer,
average=False,
t_end=None,
)
logger.report_progress(percent=0, progress_name="KDTree parameter preparation computation")
logger.log_time()
kdt = cKDTree(all_X, leafsize=30)
logger.finish_progress(progress_name="KDTree computation")
vf_dict = adata.uns["VecFld_" + basis]
for i, cur_grp in enumerate(LoggerManager.progress_logger(uniq_grp, progress_name="iterate groups")):
init_cells = adata.obs_names[groups == cur_grp]
if sample_num is not None:
cell_num = np.min((sample_num, len(init_cells)))
ind = np.random.choice(len(init_cells), cell_num, replace=False)
init_cells = init_cells[ind]
init_states, _, _, _ = fetch_states(
adata,
init_states=None,
init_cells=init_cells,
basis=basis,
layer=layer,
average=False,
t_end=None,
)
if approx and basis != "pca" and layer is None:
X_grid, V_grid = (
vf_dict["grid"],
vf_dict["grid_V"],
)
N = int(np.sqrt(V_grid.shape[0]))
X_grid, V_grid = (
np.array([np.unique(X_grid[:, 0]), np.unique(X_grid[:, 1])]),
np.array(
[
V_grid[:, 0].reshape((N, N)),
V_grid[:, 1].reshape((N, N)),
]
),
)
t, X = integrate_streamline(
X_grid[0],
X_grid[1],
V_grid[0],
V_grid[1],
integration_direction="forward",
init_states=init_states,
interpolation_num=250,
average=False,
)
else:
t, X = _fate(
lambda x: vector_field_function(x=x, vf_dict=vf_dict),
init_states,
t_end=t_end,
step_size=None,
direction="forward",
interpolation_num=250,
average=False,
)
# t, X = np.hstack(t), np.hstack(X).T
len_per_cell = None if type(t) == list else len(t)
cell_num = len(t) if type(X) == list else int(X.shape[0] / len(t))
knn_dist_, knn_ind_ = kdt.query(init_states, k=2)
dist_min, dist_threshold = (
np.max([knn_dist_[:, 1].min(), 1e-3]),
np.mean(knn_dist_[:, 1]),
)
for j in np.arange(cell_num):
if len_per_cell is not None:
cur_ind = np.arange(j * len_per_cell, (j + 1) * len_per_cell)
Y, arclength, T_bool = remove_redundant_points_trajectory(
X[cur_ind], tol=dist_min, output_discard=True
)
if arc_sample:
Y, arclength, T = arclength_sampling(Y, arclength / 1000, t=t[~T_bool])
else:
T = t[~T_bool]
else:
Y, T = X[j].T, t[j] if type(t[j]) == np.ndarray else np.array(t[j])
knn_dist, knn_ind = kdt.query(Y, k=1)
# set up a dataframe with group and time
pass_t = np.where(knn_dist < dist_threshold)[0]
pass_df = pd.DataFrame({"group": adata[knn_ind[pass_t]].obs[group], "t": T[pass_t]})
# only consider trajectory that pass at least 10 cells in group as confident pass
pass_group_counter = pass_df.group.value_counts()
pass_groups, confident_pass_check = (
pass_group_counter.index.tolist(),
np.where(pass_group_counter > 10)[0],
)
# assign the transition matrix and average transition time
if len(confident_pass_check) > 0:
ind_other_cell_type = [uniq_grp.index(k) for k in np.array(pass_groups)[confident_pass_check]]
grp_graph[i, ind_other_cell_type] += 1
grp_avg_time[i, ind_other_cell_type] += (
pass_df.groupby("group")["t"].mean()[confident_pass_check].values
)
# average across cells
grp_avg_time[i, :] /= grp_graph[i, :]
grp_graph[i, :] /= cell_num
else:
raise NotImplementedError("Only vector field (vf) or Markov chain (markov) based lumping are supported.")
if prune_graph:
grp_graph = prune_transition(
adata,
group,
basis,
graph_mat=grp_graph,
**kwargs,
)
adata.uns[group + "_graph"] = {"group_graph": grp_graph, "group_avg_time": grp_avg_time, "group_names": uniq_grp}
timer_logger.finish_progress(progress_name="State graph estimation")
return adata
def tree_model(
adata: anndata.AnnData,
group: str,
progenitor: str,
terminators: List[str],
basis: str = "umap",
n_neighbors: int = 30,
neighbor_key: Union[str, None] = None,
graph_mat: np.ndarray = None,
state_graph_method: str = "vf",
prune_graph: bool = True,
row_norm: bool = True,
) -> pd.DataFrame:
"""This function learns a tree model of cell states (types).
It is based on the shortest path from the source to target cells of the pruned vector field based cell-type
transition graph. The pruning was done by restricting cell state transition that are only between cell states that
are nearby in gene expression space (often low gene expression space).
Parameters
----------
adata:
AnnData object.
group:
Cell graph that will be used to build transition graph and lineage tree.
progenitor:
The source cell type name of the lineage tree.
terminators:
The terminal cell type names of the lineage tree.
basis:
The basis that will be used to build the k-nearest neighbor graph when neighbor_key is not set.
n_neighbors:
The number of neighbors that will be used to build the k-nn graph, passed to `dyn.tl.neighbors` function. Not
used when neighbor_key provided.
neighbor_key:
The nearest neighbor graph key in `adata.obsp`. This nearest neighbor graph will be used to build a
gene-expression space based cell-type level connectivity graph.
state_graph_method:
Method that will be used to build the initial state graph.
prune_graph: `bool` (default: `True`)
Whether to prune the transition graph based on cell similarities in `basis` bases first before learning tree
model.
row_norm: `bool` (default: `True`)
Whether to normalize each row so that each row sum up to be 1. Note that row, columns in transition matrix
correspond to source and targets in dynamo by default.
Returns
-------
res:
The final tree model of cell groups. See following example on how to visualize the tree via dynamo.
Examples
--------
>>> import dynamo as dyn
>>> adata = dyn.sample_data.pancreatic_endocrinogenesis()
>>> dyn.pp.recipe_monocle(adata)
>>> dyn.tl.dynamics(adata)
>>> dyn.tl.cell_velocities(adata)
>>> dyn.vf.VectorField(adata, basis='umap', pot_curl_div=False)
>>> dyn.pd.state_graph(adata, group='clusters', basis='umap')
>>> res = dyn.pd.tree_model(adata, group='clusters', basis='umap')
>>> # in the following we first copy the state_graph result to a new key and then replace the `group_graph` key of
>>> # the state_graph result and visualize tree model via dynamo.
>>> adata.obs['clusters2'] = adata.obs['clusters'].copy()
>>> adata.uns['clusters2_graph'] = adata.uns['clusters_graph'].copy()
>>> adata.uns['clusters2_graph']['group_graph'] = res
>>> dyn.pl.state_graph(adata, group='clusters2', keep_only_one_direction=False, transition_threshold=None,
>>> color='clusters2', basis='umap', show_legend='on data')
"""
logger = LoggerManager.gen_logger("dynamo-tree_model")
logger.log_time()
data = adata.obs
groups = data[group]
uniq_grps, data[group] = groups.unique(), list(groups)
progenitor = progenitor[0] if type(progenitor) is not str else progenitor
if progenitor not in uniq_grps:
raise Exception(f"progenitor has to be in adata.obs[{group}], but you have {progenitor}. ")
else:
progenitor = list(uniq_grps).index(progenitor)
if not set(terminators) <= set(uniq_grps):
raise Exception(f"all terminators have to be in adata.obs[{group}], but you have {terminators}.")
else:
terminators = [list(uniq_grps).index(i) for i in terminators]
if prune_graph:
M = prune_transition(
adata,
group,
basis,
n_neighbors,
neighbor_key,
graph_mat,
state_graph_method,
)
else:
M = graph_mat
if np.any(M < 0):
main_warning("the transition graph have negative values.")
M[M < 0] = 0
M += 1e-5 - 1e-5 # ensure no -0 values existed
if row_norm:
M /= M.sum(1)
M[M > 0] = 1 - M[M > 0] # because it is shortest path, so we need to use 1 - M[M > 0]
D, Pr = shortest_path(np.copy(M, order="c"), directed=False, method="FW", return_predecessors=True)
res = np.zeros(M.shape)
# this builds the tree based on each shortest path connecting the source to each target cell type
main_info("builds the tree model based on each shortest path connecting the source to each target cell type in g'.")
for j in terminators:
p = j
while Pr[progenitor, p] != -9999:
res[Pr[progenitor, p], p] = 1
p = Pr[progenitor, p]
res = pd.DataFrame(res, index=uniq_grps, columns=uniq_grps)
logger.finish_progress(progress_name="tree_model building")
return res
|
the-stack_0_16795 | """
Create / compile projects for .NET version of rhino3dm
"""
import os
import sys
import fileinput
import shutil
def system(cmd):
# copied from setup.py
rv = os.system(cmd)
rc = rv if os.name == 'nt' else os.WEXITSTATUS(rv)
if (rc != 0):
raise RuntimeError('The command "{}" exited with {}'.format(cmd, rc))
def methodgen(dotnetcore):
# set up args to pass to methodgen application
dir_cpp = os.getcwd() + '/librhino3dm_native'
dir_cs = os.getcwd() + '/dotnet'
path_replace = '../lib/opennurbs'
args = ' "{0}" "{1}" "{2}"'.format(dir_cpp, dir_cs, path_replace)
if dotnetcore:
# staging and compilation occurs in the build directory
build_dir = "build/methodgen"
if not os.path.exists(build_dir):
if(not os.path.exists("build")):
os.mkdir("build")
os.mkdir(build_dir)
src_files = os.listdir('./methodgen')
for file_name in src_files:
if file_name.endswith('.cs'):
full_path = os.path.join('./methodgen', file_name)
if os.path.isfile(full_path):
shutil.copy(full_path, build_dir)
if file_name.endswith('.core'):
full_path = os.path.join('./methodgen', file_name)
if os.path.isfile(full_path):
shutil.copy(full_path, build_dir + '/methodgen.csproj')
# compile methodgen
system('dotnet build ' + './' + build_dir)
# execute methodgen
system('dotnet run --project ' + build_dir + '/methodgen.csproj ' + args)
else:
# compile methodgen
# system('msbuild ./methodgen')
# execute methodgen for Rhino3dm
app = os.getcwd() + '/methodgen/bin/Debug/methodgen.exe'
if os.name == 'nt': # windows build
system(app + args)
else:
system('mono ' + app + args)
def create_cpp_project(bitness, compile):
# staging and compilation occurs in the build directory
build_dir = "build/librhino3dm_native_{0}".format(bitness)
if not os.path.exists(build_dir):
if(not os.path.exists("build")):
os.mkdir("build")
os.mkdir(build_dir)
os.chdir(build_dir)
if os.name == 'nt': # windows build
arch = ""
if bitness == 64:
arch = "-A x64"
else:
arch = "-A Win32"
# args = '-G "Visual Studio 16 2019" -A -A Win64'.format(arch)
args = '-G "Visual Studio 16 2019" {0}'.format(arch)
system('cmake ' + args + ' ../../librhino3dm_native')
if bitness == 64:
for line in fileinput.input("librhino3dm_native.vcxproj", inplace=1):
print(line.replace("WIN32;", "WIN64;"))
if compile:
system("cmake --build . --config Release --target librhino3dm_native")
else:
system("cmake ../../librhino3dm_native")
if compile:
system("make")
os.chdir("../..")
def compilerhino3dm(dotnetcore):
if dotnetcore:
conf = '/p:Configuration=Release;OutDir="../build/dotnet"'
system('dotnet build ./dotnet/Rhino3dm.core.csproj {}'.format(conf))
else:
conf = '/p:Configuration=Release;OutDir="../build/dotnet"'
system('msbuild ./dotnet/Rhino3dm.csproj {}'.format(conf))
if __name__ == '__main__':
dotnetcore = False
if len(sys.argv) > 1 and sys.argv[1] == '--core':
dotnetcore = True
if sys.platform.startswith('linux'):
dotnetcore = True
# make the script always execute from it's directory
scriptpath = os.path.realpath(__file__)
os.chdir(os.path.dirname(scriptpath))
# always compile and run methodgen first to make sure the pinvoke
# definitions are in place
methodgen(dotnetcore)
# only create 32 bit compile on windows
# if os.name == 'nt':
# create_cpp_project(32, True)
create_cpp_project(64, True)
# compile Rhino3dm .NET project
compilerhino3dm(dotnetcore)
|
the-stack_0_16796 | """
Helpers for managing Docker network settings.
"""
from typing import Callable, Optional, Union
import click
import docker
from docker.models.networks import Network
from ._common import docker_client
def _validate_docker_network(
ctx: click.core.Context,
param: Union[click.core.Option, click.core.Parameter],
value: Optional[Union[int, bool, str]],
) -> Network:
"""
Validate that a given network name is an existing Docker network name.
"""
# We "use" variables to satisfy linting tools.
for _ in (ctx, param):
pass
client = docker_client()
try:
return client.networks.get(network_id=value)
except docker.errors.NotFound:
message = (
'No such Docker network with the name "{value}".\n'
'Docker networks are:\n{networks}'
).format(
value=value,
networks='\n'.join(
[network.name for network in client.networks.list()],
),
)
raise click.BadParameter(message=message)
def docker_network_option(command: Callable[..., None]) -> Callable[..., None]:
"""
An option decorator for choosing a Docker network.
"""
click_option_function = click.option(
'--network',
type=str,
default='bridge',
help=(
'The Docker network containers will be connected to.'
'It may not be possible to SSH to containers on a custom network '
'on macOS. '
),
callback=_validate_docker_network,
) # type: Callable[[Callable[..., None]], Callable[..., None]]
function = click_option_function(command) # type: Callable[..., None]
return function
|
the-stack_0_16797 | # coding: utf-8
import os
import pytest
import unittest
import ray
from ray import tune
from ray.rllib import _register_all
from ray.tune import Trainable, TuneError
from ray.tune.ray_trial_executor import RayTrialExecutor
from ray.tune.registry import _global_registry, TRAINABLE_CLASS
from ray.tune.result import TRAINING_ITERATION
from ray.tune.suggest import BasicVariantGenerator
from ray.tune.trial import Trial, Checkpoint
from ray.tune.resources import Resources
from ray.cluster_utils import Cluster
from ray.tune.utils.placement_groups import PlacementGroupFactory
class TrialExecutorInsufficientResourcesTest(unittest.TestCase):
def setUp(self):
os.environ["TUNE_WARN_INSUFFICENT_RESOURCE_THRESHOLD_S"] = "1"
self.cluster = Cluster(
initialize_head=True,
connect=True,
head_node_args={
"num_cpus": 4,
"num_gpus": 2,
})
def tearDown(self):
ray.shutdown()
self.cluster.shutdown()
# no autoscaler case, resource is not sufficient. Raise error.
def testRaiseErrorNoAutoscaler(self):
def train(config):
pass
with pytest.raises(TuneError) as cm:
tune.run(
train,
resources_per_trial={
"cpu": 5, # more than what the cluster can offer.
"gpu": 3,
})
msg = ("You asked for 5.0 cpu and 3.0 gpu per trial, "
"but the cluster only has 4.0 cpu and 2.0 gpu. "
"Stop the tuning job and "
"adjust the resources requested per trial "
"(possibly via `resources_per_trial` "
"or via `num_workers` for rllib) "
"and/or add more resources to your Ray runtime.")
assert str(cm._excinfo[1]) == msg
class RayTrialExecutorTest(unittest.TestCase):
def setUp(self):
# Wait up to five seconds for placement groups when starting a trial
os.environ["TUNE_PLACEMENT_GROUP_WAIT_S"] = "5"
# Block for results even when placement groups are pending
os.environ["TUNE_TRIAL_STARTUP_GRACE_PERIOD"] = "0"
os.environ["TUNE_TRIAL_RESULT_WAIT_TIME_S"] = "99999"
self.trial_executor = RayTrialExecutor(queue_trials=False)
ray.init(num_cpus=2, ignore_reinit_error=True)
_register_all() # Needed for flaky tests
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def testStartStop(self):
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
running = self.trial_executor.get_running_trials()
self.assertEqual(1, len(running))
self.trial_executor.stop_trial(trial)
def testAsyncSave(self):
"""Tests that saved checkpoint value not immediately set."""
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
trial.last_result = self.trial_executor.fetch_result(trial)[-1]
checkpoint = self.trial_executor.save(trial, Checkpoint.PERSISTENT)
self.assertEqual(checkpoint, trial.saving_to)
self.assertEqual(trial.checkpoint.value, None)
self.process_trial_save(trial)
self.assertEqual(checkpoint, trial.checkpoint)
self.trial_executor.stop_trial(trial)
self.assertEqual(Trial.TERMINATED, trial.status)
def testSaveRestore(self):
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
trial.last_result = self.trial_executor.fetch_result(trial)[-1]
self.trial_executor.save(trial, Checkpoint.PERSISTENT)
self.process_trial_save(trial)
self.trial_executor.restore(trial)
self.trial_executor.stop_trial(trial)
self.assertEqual(Trial.TERMINATED, trial.status)
def testPauseResume(self):
"""Tests that pausing works for trials in flight."""
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
self.trial_executor.pause_trial(trial)
self.assertEqual(Trial.PAUSED, trial.status)
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
self.trial_executor.stop_trial(trial)
self.assertEqual(Trial.TERMINATED, trial.status)
def testSavePauseResumeErrorRestore(self):
"""Tests that pause checkpoint does not replace restore checkpoint."""
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
trial.last_result = self.trial_executor.fetch_result(trial)[-1]
# Save
checkpoint = self.trial_executor.save(trial, Checkpoint.PERSISTENT)
self.assertEqual(Trial.RUNNING, trial.status)
self.assertEqual(checkpoint.storage, Checkpoint.PERSISTENT)
# Process save result (simulates trial runner)
self.process_trial_save(trial)
# Train
self.trial_executor.continue_training(trial)
trial.last_result = self.trial_executor.fetch_result(trial)[-1]
# Pause
self.trial_executor.pause_trial(trial)
self.assertEqual(Trial.PAUSED, trial.status)
self.assertEqual(trial.checkpoint.storage, Checkpoint.MEMORY)
# Resume
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
# Error
trial.set_status(Trial.ERROR)
# Restore
self.trial_executor.restore(trial)
self.trial_executor.stop_trial(trial)
self.assertEqual(Trial.TERMINATED, trial.status)
def testStartFailure(self):
_global_registry.register(TRAINABLE_CLASS, "asdf", None)
trial = Trial("asdf", resources=Resources(1, 0))
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.ERROR, trial.status)
def testPauseResume2(self):
"""Tests that pausing works for trials being processed."""
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
self.trial_executor.fetch_result(trial)
checkpoint = self.trial_executor.pause_trial(trial)
self.assertEqual(Trial.PAUSED, trial.status)
self.trial_executor.start_trial(trial, checkpoint)
self.assertEqual(Trial.RUNNING, trial.status)
self.trial_executor.stop_trial(trial)
self.assertEqual(Trial.TERMINATED, trial.status)
def _testPauseUnpause(self, result_buffer_length):
"""Tests that unpausing works for trials being processed."""
os.environ["TUNE_RESULT_BUFFER_LENGTH"] = f"{result_buffer_length}"
os.environ["TUNE_RESULT_BUFFER_MIN_TIME_S"] = "1"
# Need a new trial executor so the ENV vars are parsed again
self.trial_executor = RayTrialExecutor(queue_trials=False)
base = max(result_buffer_length, 1)
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
trial.last_result = self.trial_executor.fetch_result(trial)[-1]
self.assertEqual(trial.last_result.get(TRAINING_ITERATION), base)
self.trial_executor.pause_trial(trial)
self.assertEqual(Trial.PAUSED, trial.status)
self.trial_executor.unpause_trial(trial)
self.assertEqual(Trial.PENDING, trial.status)
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
trial.last_result = self.trial_executor.fetch_result(trial)[-1]
self.assertEqual(trial.last_result.get(TRAINING_ITERATION), base * 2)
self.trial_executor.stop_trial(trial)
self.assertEqual(Trial.TERMINATED, trial.status)
def testPauseUnpauseNoBuffer(self):
self._testPauseUnpause(0)
def testPauseUnpauseTrivialBuffer(self):
self._testPauseUnpause(1)
def testPauseUnpauseActualBuffer(self):
self._testPauseUnpause(8)
def testNoResetTrial(self):
"""Tests that reset handles NotImplemented properly."""
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
exists = self.trial_executor.reset_trial(trial, {}, "modified_mock")
self.assertEqual(exists, False)
self.assertEqual(Trial.RUNNING, trial.status)
def testResetTrial(self):
"""Tests that reset works as expected."""
class B(Trainable):
def step(self):
return dict(timesteps_this_iter=1, done=True)
def reset_config(self, config):
self.config = config
return True
trials = self.generate_trials({
"run": B,
"config": {
"foo": 0
},
}, "grid_search")
trial = trials[0]
self.trial_executor.start_trial(trial)
exists = self.trial_executor.reset_trial(trial, {"hi": 1},
"modified_mock")
self.assertEqual(exists, True)
self.assertEqual(trial.config.get("hi"), 1)
self.assertEqual(trial.experiment_tag, "modified_mock")
self.assertEqual(Trial.RUNNING, trial.status)
@staticmethod
def generate_trials(spec, name):
suggester = BasicVariantGenerator()
suggester.add_configurations({name: spec})
trials = []
while not suggester.is_finished():
trial = suggester.next_trial()
if trial:
trials.append(trial)
else:
break
return trials
def process_trial_save(self, trial):
"""Simulates trial runner save."""
checkpoint = trial.saving_to
checkpoint_value = self.trial_executor.fetch_result(trial)[-1]
checkpoint.value = checkpoint_value
trial.on_checkpoint(checkpoint)
class RayExecutorQueueTest(unittest.TestCase):
def setUp(self):
self.cluster = Cluster(
initialize_head=True,
connect=True,
head_node_args={
"num_cpus": 1,
"_system_config": {
"num_heartbeats_timeout": 10
}
})
self.trial_executor = RayTrialExecutor(
queue_trials=True, refresh_period=0)
# Pytest doesn't play nicely with imports
_register_all()
def tearDown(self):
ray.shutdown()
self.cluster.shutdown()
_register_all() # re-register the evicted objects
def testQueueTrial(self):
"""Tests that reset handles NotImplemented properly."""
def create_trial(cpu, gpu=0):
return Trial("__fake", resources=Resources(cpu=cpu, gpu=gpu))
cpu_only = create_trial(1, 0)
self.assertTrue(self.trial_executor.has_resources_for_trial(cpu_only))
self.trial_executor.start_trial(cpu_only)
gpu_only = create_trial(0, 1)
self.assertTrue(self.trial_executor.has_resources_for_trial(gpu_only))
def testHeadBlocking(self):
# Once resource requests are deprecated, remove this test
os.environ["TUNE_PLACEMENT_GROUP_AUTO_DISABLED"] = "1"
def create_trial(cpu, gpu=0):
return Trial("__fake", resources=Resources(cpu=cpu, gpu=gpu))
gpu_trial = create_trial(1, 1)
self.assertTrue(self.trial_executor.has_resources_for_trial(gpu_trial))
self.trial_executor.start_trial(gpu_trial)
# TODO(rliaw): This behavior is probably undesirable, but right now
# trials with different resource requirements is not often used.
cpu_only_trial = create_trial(1, 0)
self.assertFalse(
self.trial_executor.has_resources_for_trial(cpu_only_trial))
self.cluster.add_node(num_cpus=1, num_gpus=1)
self.cluster.wait_for_nodes()
self.assertTrue(
self.trial_executor.has_resources_for_trial(cpu_only_trial))
self.trial_executor.start_trial(cpu_only_trial)
cpu_only_trial2 = create_trial(1, 0)
self.assertTrue(
self.trial_executor.has_resources_for_trial(cpu_only_trial2))
self.trial_executor.start_trial(cpu_only_trial2)
cpu_only_trial3 = create_trial(1, 0)
self.assertFalse(
self.trial_executor.has_resources_for_trial(cpu_only_trial3))
class RayExecutorPlacementGroupTest(unittest.TestCase):
def setUp(self):
self.head_cpus = 8
self.head_gpus = 4
self.head_custom = 16
self.cluster = Cluster(
initialize_head=True,
connect=True,
head_node_args={
"num_cpus": self.head_cpus,
"num_gpus": self.head_gpus,
"resources": {
"custom": self.head_custom
},
"_system_config": {
"num_heartbeats_timeout": 10
}
})
# Pytest doesn't play nicely with imports
_register_all()
def tearDown(self):
ray.shutdown()
self.cluster.shutdown()
_register_all() # re-register the evicted objects
def testResourcesAvailableNoPlacementGroup(self):
def train(config):
tune.report(metric=0, resources=ray.available_resources())
out = tune.run(
train,
resources_per_trial={
"cpu": 1,
"gpu": 1,
"custom_resources": {
"custom": 3
},
"extra_cpu": 3,
"extra_gpu": 1,
"extra_custom_resources": {
"custom": 4
},
})
# Only `cpu`, `gpu`, and `custom_resources` will be "really" reserved,
# the extra_* will just be internally reserved by Tune.
self.assertDictEqual({
key: val
for key, val in out.trials[0].last_result["resources"].items()
if key in ["CPU", "GPU", "custom"]
}, {
"CPU": self.head_cpus - 1.0,
"GPU": self.head_gpus - 1.0,
"custom": self.head_custom - 3.0
})
def testResourcesAvailableWithPlacementGroup(self):
def train(config):
tune.report(metric=0, resources=ray.available_resources())
head_bundle = {"CPU": 1, "GPU": 0, "custom": 4}
child_bundle = {"CPU": 2, "GPU": 1, "custom": 3}
placement_group_factory = PlacementGroupFactory(
[head_bundle, child_bundle, child_bundle])
out = tune.run(train, resources_per_trial=placement_group_factory)
available = {
key: val
for key, val in out.trials[0].last_result["resources"].items()
if key in ["CPU", "GPU", "custom"]
}
if not available:
self.skipTest("Warning: Ray reported no available resources, "
"but this is an error on the Ray core side. "
"Skipping this test for now.")
self.assertDictEqual(
available, {
"CPU": self.head_cpus - 5.0,
"GPU": self.head_gpus - 2.0,
"custom": self.head_custom - 10.0
})
def testPlacementGroupFactoryEquality(self):
"""
Test that two different placement group factory objects are considered
equal and evaluate to the same hash.
"""
from collections import Counter
pgf_1 = PlacementGroupFactory([{
"CPU": 2,
"GPU": 4,
"custom": 7
}, {
"GPU": 2,
"custom": 1,
"CPU": 3
}], "PACK", "no_name", None)
pgf_2 = PlacementGroupFactory(
[{
"custom": 7,
"GPU": 4,
"CPU": 2,
}, {
"custom": 1,
"GPU": 2,
"CPU": 3
}],
strategy="PACK",
name="no_name",
lifetime=None)
self.assertEqual(pgf_1, pgf_2)
# Hash testing
counter = Counter()
counter[pgf_1] += 1
counter[pgf_2] += 1
self.assertEqual(counter[pgf_1], 2)
self.assertEqual(counter[pgf_2], 2)
class LocalModeExecutorTest(RayTrialExecutorTest):
def setUp(self):
ray.init(local_mode=True)
self.trial_executor = RayTrialExecutor(queue_trials=False)
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
the-stack_0_16798 | from collections import namedtuple
from contextlib import contextmanager
import datetime
import re
from py._code.code import TerminalRepr, ReprFileLocation
import pytest
from pytestqt.qt_compat import qt_api
from pytestqt.utils import get_marker
class QtLoggingPlugin:
"""
Plugin responsible for installing a QtMessageHandler before each
test and augment reporting if the test failed with the messages captured.
"""
LOG_FAIL_OPTIONS = ["NO", "CRITICAL", "WARNING", "DEBUG", "INFO"]
def __init__(self, config):
self.config = config
def pytest_runtest_setup(self, item):
if get_marker(item, "no_qt_log"):
return
m = get_marker(item, "qt_log_ignore")
if m:
if not set(m.kwargs).issubset({"extend"}):
raise ValueError(
"Invalid keyword arguments in {!r} for "
"qt_log_ignore mark.".format(m.kwargs)
)
if m.kwargs.get("extend", True):
config_regexes = self.config.getini("qt_log_ignore")
ignore_regexes = config_regexes + list(m.args)
else:
ignore_regexes = m.args
else:
ignore_regexes = self.config.getini("qt_log_ignore")
item.qt_log_capture = _QtMessageCapture(ignore_regexes)
item.qt_log_capture._start()
@pytest.mark.hookwrapper
def pytest_runtest_makereport(self, item, call):
"""Add captured Qt messages to test item report if the call failed."""
outcome = yield
if not hasattr(item, "qt_log_capture"):
return
if call.when == "call":
report = outcome.get_result()
m = get_marker(item, "qt_log_level_fail")
if m:
log_fail_level = m.args[0]
else:
log_fail_level = self.config.getini("qt_log_level_fail")
assert log_fail_level in QtLoggingPlugin.LOG_FAIL_OPTIONS
# make test fail if any records were captured which match
# log_fail_level
if report.outcome != "failed":
for rec in item.qt_log_capture.records:
is_modeltest_error = (
rec.context is not None
and rec.context.category == "qt.modeltest"
and rec.matches_level("WARNING")
)
if (
rec.matches_level(log_fail_level) and not rec.ignored
) or is_modeltest_error:
report.outcome = "failed"
if report.longrepr is None:
report.longrepr = _QtLogLevelErrorRepr(
item, log_fail_level, is_modeltest_error
)
break
# if test has failed, add recorded messages to its terminal
# representation
if not report.passed:
long_repr = getattr(report, "longrepr", None)
if hasattr(long_repr, "addsection"): # pragma: no cover
log_format = self.config.getoption("qt_log_format")
context_format = None
if log_format is None:
context_format = "{rec.context.file}:{rec.context.function}:{rec.context.line}:\n"
log_format = " {rec.type_name}: {rec.message}"
lines = []
for rec in item.qt_log_capture.records:
suffix = " (IGNORED)" if rec.ignored else ""
if (
rec.context is not None
and (
rec.context.file is not None
or rec.context.function is not None
or rec.context.line != 0
)
and context_format is not None
):
context_line = context_format.format(rec=rec)
lines.append(context_line)
else:
log_format = log_format.lstrip()
line = log_format.format(rec=rec) + suffix
lines.append(line)
if lines:
long_repr.addsection("Captured Qt messages", "\n".join(lines))
item.qt_log_capture._stop()
del item.qt_log_capture
class _QtMessageCapture:
"""
Captures Qt messages when its `handle` method is installed using
qInstallMessageHandler, and stores them into `records` attribute.
:attr _records: list of Record instances.
:attr _ignore_regexes: list of regexes (as strings) that define if a record
should be ignored.
"""
def __init__(self, ignore_regexes):
self._records = []
self._ignore_regexes = ignore_regexes or []
self._previous_handler = None
def _start(self):
"""
Start receiving messages from Qt.
"""
previous_handler = qt_api.QtCore.qInstallMessageHandler(
self._handle_with_context
)
self._previous_handler = previous_handler
def _stop(self):
"""
Stop receiving messages from Qt, restoring the previously installed
handler.
"""
qt_api.QtCore.qInstallMessageHandler(self._previous_handler)
@contextmanager
def disabled(self):
"""
Context manager that temporarily disables logging capture while
inside it.
"""
self._stop()
try:
yield
finally:
self._start()
_Context = namedtuple("_Context", "file function line category")
def _append_new_record(self, msg_type, message, context):
"""
Creates a new Record instance and stores it.
:param msg_type: Qt message typ
:param message: message string, if bytes it will be converted to str.
:param context: QMessageLogContext object or None
"""
def to_unicode(s):
if isinstance(s, bytes):
s = s.decode("utf-8", "replace")
return s
message = to_unicode(message)
ignored = False
for regex in self._ignore_regexes:
if re.search(regex, message) is not None:
ignored = True
break
if context is not None:
context = self._Context(
to_unicode(context.file),
to_unicode(context.function),
context.line,
to_unicode(context.category),
)
self._records.append(Record(msg_type, message, ignored, context))
def _handle_with_context(self, msg_type, context, message):
"""
Method to be installed using qInstallMessageHandler,
stores each message into the `_records` attribute.
"""
self._append_new_record(msg_type, message, context=context)
@property
def records(self):
"""Access messages captured so far.
:rtype: list of `Record` instances.
"""
return self._records[:]
class Record:
"""Hold information about a message sent by one of Qt log functions.
:ivar str message: message contents.
:ivar Qt.QtMsgType type: enum that identifies message type
:ivar str type_name: ``type`` as string: ``"QtInfoMsg"``, ``"QtDebugMsg"``,
``"QtWarningMsg"`` or ``"QtCriticalMsg"``.
:ivar str log_type_name:
type name similar to the logging package: ``INFO``, ``DEBUG``,
``WARNING`` and ``CRITICAL``.
:ivar datetime.datetime when: when the message was captured
:ivar bool ignored: If this record matches a regex from the "qt_log_ignore"
option.
:ivar context: a namedtuple containing the attributes ``file``,
``function``, ``line``. Can be None if no context is available for the
message.
"""
def __init__(self, msg_type, message, ignored, context):
self._type = msg_type
self._message = message
self._type_name = self._get_msg_type_name(msg_type)
self._log_type_name = self._get_log_type_name(msg_type)
self._when = datetime.datetime.now()
self._ignored = ignored
self._context = context
message = property(lambda self: self._message)
type = property(lambda self: self._type)
type_name = property(lambda self: self._type_name)
log_type_name = property(lambda self: self._log_type_name)
when = property(lambda self: self._when)
ignored = property(lambda self: self._ignored)
context = property(lambda self: self._context)
@classmethod
def _get_msg_type_name(cls, msg_type):
"""
Return a string representation of the given QtMsgType enum
value.
"""
if not getattr(cls, "_type_name_map", None):
cls._type_name_map = {
qt_api.QtCore.QtMsgType.QtDebugMsg: "QtDebugMsg",
qt_api.QtCore.QtMsgType.QtWarningMsg: "QtWarningMsg",
qt_api.QtCore.QtMsgType.QtCriticalMsg: "QtCriticalMsg",
qt_api.QtCore.QtMsgType.QtFatalMsg: "QtFatalMsg",
qt_api.QtCore.QtMsgType.QtInfoMsg: "QtInfoMsg",
}
return cls._type_name_map[msg_type]
@classmethod
def _get_log_type_name(cls, msg_type):
"""
Return a string representation of the given QtMsgType enum
value in the same style used by the builtin logging package.
"""
if not getattr(cls, "_log_type_name_map", None):
cls._log_type_name_map = {
qt_api.QtCore.QtMsgType.QtDebugMsg: "DEBUG",
qt_api.QtCore.QtMsgType.QtWarningMsg: "WARNING",
qt_api.QtCore.QtMsgType.QtCriticalMsg: "CRITICAL",
qt_api.QtCore.QtMsgType.QtFatalMsg: "FATAL",
qt_api.QtCore.QtMsgType.QtInfoMsg: "INFO",
}
return cls._log_type_name_map[msg_type]
def matches_level(self, level):
assert level in QtLoggingPlugin.LOG_FAIL_OPTIONS
if level == "NO":
return False
elif level == "INFO":
return self.log_type_name in ("INFO", "DEBUG", "WARNING", "CRITICAL")
elif level == "DEBUG":
return self.log_type_name in ("DEBUG", "WARNING", "CRITICAL")
elif level == "WARNING":
return self.log_type_name in ("WARNING", "CRITICAL")
elif level == "CRITICAL":
return self.log_type_name in ("CRITICAL",)
else: # pragma: no cover
raise ValueError(f"log_fail_level unknown: {level}")
class _QtLogLevelErrorRepr(TerminalRepr):
"""
TerminalRepr of a test which didn't fail by normal means, but emitted
messages at or above the allowed level.
"""
def __init__(self, item, level, is_modeltest_error):
if is_modeltest_error:
msg = "Qt modeltester errors"
else:
msg = "Failure: Qt messages with level {0} or above emitted"
path, line_index, _ = item.location
self.fileloc = ReprFileLocation(
path, lineno=line_index + 1, message=msg.format(level.upper())
)
self.sections = []
def addsection(self, name, content, sep="-"):
self.sections.append((name, content, sep))
def toterminal(self, out):
self.fileloc.toterminal(out)
for name, content, sep in self.sections:
out.sep(sep, name)
out.line(content)
|
the-stack_0_16799 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""General utilities for Transport classes."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import time
from paramiko import ProxyCommand
from six.moves import range
from aiida.common.extendeddicts import FixedFieldsAttributeDict
class FileAttribute(FixedFieldsAttributeDict):
"""
A class, resembling a dictionary, to describe the attributes of a file,
that is returned by get_attribute().
Possible keys: st_size, st_uid, st_gid, st_mode, st_atime, st_mtime
"""
_valid_fields = (
'st_size',
'st_uid',
'st_gid',
'st_mode',
'st_atime',
'st_mtime',
)
class _DetachedProxyCommand(ProxyCommand):
"""Modifies paramiko's ProxyCommand by launching the process in a separate process group."""
def __init__(self, command_line): # pylint: disable=super-init-not-called
# Note that the super().__init__ MUST NOT be called here, otherwise
# two subprocesses will be created.
import os
from subprocess import Popen, PIPE
from shlex import split as shlsplit
self.cmd = shlsplit(command_line)
self.process = Popen(self.cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=0, preexec_fn=os.setsid)
self.timeout = None
def close(self):
try:
self.process.terminate()
# In case the process doesn't exist anymore
except OSError:
pass
for _ in range(10):
if self.process.poll() is not None:
break
time.sleep(0.2)
else:
try:
self.process.kill()
# In case the process doesn't exist anymore
except OSError:
pass
for _ in range(10):
if self.process.poll() is not None:
break
time.sleep(0.2)
def copy_from_remote_to_remote(transportsource, transportdestination, remotesource, remotedestination, **kwargs):
"""
Copy files or folders from a remote computer to another remote computer.
:param transportsource: transport to be used for the source computer
:param transportdestination: transport to be used for the destination computer
:param str remotesource: path to the remote source directory / file
:param str remotedestination: path to the remote destination directory / file
:param kwargs: keyword parameters passed to the final put,
except for 'dereference' that is passed to the initial get
.. note:: it uses the method transportsource.copy_from_remote_to_remote
"""
transportsource.copy_from_remote_to_remote(transportdestination, remotesource, remotedestination, **kwargs)
|
the-stack_0_16800 | import numpy as np
import scipy as sp
from scipy.linalg import block_diag
from qpsolvers import solve_qp
import sympy as sy
from sympy.physics import mechanics
from scipy.signal import cont2discrete
class SimModel(object):
def __init__(self, param=None, NX=None, NU=None):
assert param is not None
assert NX is not None
assert NU is not None
self.param = param
self.NX = NX
self.NU = NU
self.jacA, self.jacB = self.genLinModel()
self.force = self.genDynamicEquation()
def SYMPY_rh_eq(self):
raise NotImplementedError('SYMPY_rh_eq is not implemented')
def genJacobian(self):
MAT = self.SYMPY_rh_eq()
q = sy.symbols('q:{0}'.format(self.NX))
u = sy.symbols('u:{0}'.format(self.NU))
return MAT.jacobian(q), MAT.jacobian(u)
def genDynamicEquation(self):
q = sy.symbols("q:{0}".format(self.NX))
u = sy.symbols("u:{0}".format(self.NU))
return sy.lambdify([q,u], self.SYMPY_rh_eq(), [{'atan':np.arctan, 'atan2':np.arctan2}, "numpy"])
def genLinModel(self):
q = sy.symbols("q:{0}".format(self.NX))
u = sy.symbols("u:{0}".format(self.NU))
A, B = self.genJacobian()
return (sy.lambdify([q,u], np.squeeze(A), [{'atan':np.arctan, 'atan2':np.arctan2}, "numpy"]),
sy.lambdify([q,u], np.squeeze(B), [{'atan':np.arctan, 'atan2':np.arctan2}, "numpy"]))
def genDModel(self, x, dq, u, dT=0.1):
vector = np.hstack((x, dq))
f = self.force(vector, u).T.flatten()
A_c = np.array(self.jacA(vector, u))
B_c = np.array(self.jacB(vector, u))
g_c = f - A_c@vector - B_c@u
B = np.hstack((B_c, g_c.reshape((-1,1))))
A_d, B_d, _, _, _ = cont2discrete((A_c, B, 0, 0), dT)
g_d = B_d[:,self.NU]
B_d = B_d[:,0:self.NU]
return A_d, B_d, g_d
def PredictForwardEuler(self, x, dq, u, dt):
vector = np.hstack((x, dq))
d_vector = self.force(vector, u).T.flatten()
vector = vector + dt * d_vector
return vector[0:3], vector[3:6]
class NMPC():
def __init__(self, dT=0.02, time_horizon = 20,
H = None, J = None, q = None,
RH = None, RJ = None, r = None,
H_N = None, J_N = None, q_N = None,
dmodel = None,
G = None, h = None,
normalization_x = None,
normalization_u = None,
x_ubounds=[], x_lbounds=[],
u_ubounds=[], u_lbounds=[]):
assert H != None
assert J != None
assert H_N != None
assert J_N != None
assert dmodel != None
self.dT = dT
self.time_horizon = time_horizon
self.model = dmodel
self.x_l = np.asarray(x_lbounds,dtype=np.float64)
self.x_u = np.asarray(x_ubounds,dtype=np.float64)
self.u_l = np.asarray(u_lbounds,dtype=np.float64)
self.u_u = np.asarray(u_ubounds,dtype=np.float64)
self.NX = self.x_u.shape[0]
self.NU = self.u_u.shape[0]
class StaticStageCost():
def __init__(self, weight):
self.weight = np.asarray(weight)
def __call__(self, x_guess, u_guess, x_ref):
return self.weight
class StaticValueFunction():
def __init__(self, weight):
self.weight = np.asarray(weight)
def __call__(self, x_guess, x_ref):
return self.weight
self.H = H if callable(H) else StaticStageCost(H)
self.J = J if callable(J) else StaticStageCost(J)
self.H_N = H_N if callable(H_N) else StaticValueFunction(H_N)
self.J_N = J_N if callable(J_N) else StaticValueFunction(J_N)
self.R_H = RH if callable(RH) else StaticStageCost(RH)
self.R_J = RJ if callable(RJ) else StaticStageCost(RJ)
if q is None:
q = np.zeros(self.NX)
self.q = q
if r is None:
r = np.zeros(self.NU)
self.r = r
if G is None:
self.G = None
self.h = None
else:
assert h is not None
self.G = G if callable(H) else StaticStageCost(H)
self.h = h if callable(J) else StaticStageCost(J)
if normalization_x is not None:
self.Norm = np.diag(normalization_x*(time_horizon+1) + normalization_u*time_horizon)
self.Norm_inv = np.linalg.inv(self.Norm)
else:
self.Norm = None
self.Norm_inv = None
def iterate_NMPC(self, x_guess, u_guess, x_ref, verbose=False, warmstart=False):
T = self.time_horizon
X_DIM = self.NX*(T+1)
U_DIM = self.NU*(T)
P_Q_blocks = []
q_q_blocks = []
P_R_blocks = []
q_r_blocks = []
for k in range(T+1):
if k==T:
P_Q_blocks.append(self.H_N(x_guess[:,k], x_ref[:,k]))
q_q_blocks.append(self.J_N(x_guess[:,k], x_ref[:,k])+self.q)
else:
P_Q_blocks.append(self.H(x_guess[:,k], u_guess[:,k], x_ref[:,k]))
q_q_blocks.append(self.J(x_guess[:,k], u_guess[:,k], x_ref[:,k])+self.q)
P_R_blocks.append(self.R_H(x_guess[:,k], u_guess[:,k], x_ref[:,k]))
q_r_blocks.append(self.R_J(x_guess[:,k], u_guess[:,k], x_ref[:,k])+self.r)
P = block_diag(*P_Q_blocks,*P_R_blocks)
q = np.hstack(q_q_blocks+q_r_blocks)
P = 0.5*(P.T+P)
Ad, Bd, gd = zip(*[self.model(q[:3], q[3:], u, self.dT)
for q, u in zip(x_guess.T, u_guess.T)])
A = block_diag(*Ad)
B = block_diag(*Bd)
b = np.hstack((
x_guess[:,0],
- np.hstack(gd)
))
A = np.block([
[np.eye(self.NX), np.zeros((self.NX, X_DIM + U_DIM - self.NX))],
[A, np.zeros((X_DIM - self.NX, self.NX)), B]
])
A -= np.block([
[np.zeros((self.NX, X_DIM+U_DIM))],
[
np.zeros((X_DIM-self.NX, self.NX)),
np.eye(X_DIM - self.NX),
np.zeros_like(B)
]
])
### Track Constratint
G = [
self.G(x_g, x_r)
for x_g,x_r
in zip(x_guess.T, x_ref.T)
]
h_block = [
np.asarray(self.G(x_g, x_r))@x_g - np.asarray(self.h(x_g, x_r) )
for x_g, x_r
in zip(x_guess.T, x_ref.T)
]
G = np.hstack([
block_diag(*G),
np.zeros((T+1, U_DIM))
])
h = np.hstack(h_block)
x_l = np.tile(self.x_l, T+1)
# Set trust region
x_l[6::self.NX] = -0.2 + x_guess[6]
u_l = np.tile(self.u_l, T)
x_l = np.hstack((x_l, u_l))
x_u = np.tile(self.x_u, T+1)
# Set trust region
x_u[6::self.NX] = 0.2 + x_guess[6]
u_u = np.tile(self.u_u, T)
x_u = np.hstack((x_u, u_u))
#print([x.shape for x in [P, q, G, h, A, b, x_l, x_u]])
try:
if self.Norm is None:
ret = solve_qp(P, q, G, h, A, b, x_l, x_u, solver='osqp')
else:
init_val = [email protected]((x_guess.T.ravel(), u_guess.T.ravel()))
#print("Equation Const", np.all(A@init_val==b))
#print("InEquation Const", np.all(G@init_val<=h))
#print("Lower bound Const", np.all(init_val>=x_l))
#print("Upper bound Const", np.all(init_val<=x_u))
#print("")
ret = solve_qp(self.Norm@[email protected],
[email protected],
[email protected], h,
self.Norm_inv[:X_DIM,:X_DIM]@[email protected], self.Norm_inv[:X_DIM,:X_DIM]@b,
self.Norm_inv@x_l, self.Norm_inv@x_u,
initvals=init_val, solver='osqp')
if ret[0] is not None:
ret = self.Norm@ret
except Exception as e:
print(e)
return np.zeros_like(x_guess), np.zeros_like(u_guess), None
#if ret.dtype != np.object:
if ret is not None:
ret_x = ret[:X_DIM].reshape((-1, self.NX)).T
ret_u = ret[X_DIM:].reshape((-1, self.NU)).T
return ret_x, ret_u, 0.
else:
return x_guess, u_guess, None
|
the-stack_0_16802 | import pytest
from tornado import gen
from distributed import Executor, Scheduler
from distributed.diagnostics.progressbar import TextProgressBar, progress
from distributed.utils_test import (cluster, _test_cluster, loop, inc,
div, dec, cluster_center)
from time import time, sleep
def test_text_progressbar(capsys, loop):
with cluster(nanny=True) as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
futures = e.map(inc, range(10))
p = TextProgressBar(futures, interval=0.01, complete=True)
e.gather(futures)
start = time()
while p.status != 'finished':
sleep(0.01)
assert time() - start < 5
check_bar_completed(capsys)
assert p._last_response == {'all': 10,
'remaining': 0,
'status': 'finished'}
assert p.stream.closed()
def test_TextProgressBar_error(loop, capsys):
@gen.coroutine
def f(c, a, b):
s = Scheduler((c.ip, c.port), loop=loop)
yield s.sync_center()
done = s.start(0)
s.update_graph(tasks={'x': (div, 1, 0)},
keys=['x'],
dependencies={})
progress = TextProgressBar(['x'], scheduler=(s.ip, s.port),
start=False, interval=0.01)
yield progress.listen()
assert progress.status == 'error'
assert progress.stream.closed()
progress = TextProgressBar(['x'], scheduler=(s.ip, s.port),
start=False, interval=0.01)
yield progress.listen()
assert progress.status == 'error'
assert progress.stream.closed()
s.close()
yield done
_test_cluster(f, loop)
def test_TextProgressBar_empty(loop, capsys):
@gen.coroutine
def f(c, a, b):
s = Scheduler((c.ip, c.port), loop=loop)
yield s.sync_center()
done = s.start(0)
progress = TextProgressBar([], scheduler=(s.ip, s.port), start=False,
interval=0.01)
yield progress.listen()
assert progress.status == 'finished'
check_bar_completed(capsys)
s.close()
yield done
_test_cluster(f, loop)
def check_bar_completed(capsys, width=40):
out, err = capsys.readouterr()
bar, percent, time = [i.strip() for i in out.split('\r')[-1].split('|')]
assert bar == '[' + '#'*width + ']'
assert percent == '100% Completed'
def test_progress_function(loop, capsys):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
f = e.submit(lambda: 1)
g = e.submit(lambda: 2)
progress([[f], [[g]]], notebook=False)
check_bar_completed(capsys)
|
the-stack_0_16803 | import sys
import os.path
from setuptools import setup, find_packages
PACKAGE_NAME = 'arcana'
# Get version from module inside package
sys.path.insert(0, os.path.join(os.path.dirname(__file__),
PACKAGE_NAME))
from __about__ import __version__, install_requires, tests_require # noqa pylint: disable=no-name-in-module
sys.path.pop(0)
setup(
name=PACKAGE_NAME,
version=__version__,
author='Tom G. Close',
author_email='[email protected]',
packages=find_packages(),
url='https://github.com/monashbiomedicalimaging/arcana',
license='The Apache Software Licence 2.0',
description=(
'Abstracted repository-centric analysis framework'),
long_description=open('README.rst').read(),
install_requires=install_requires,
tests_require=tests_require,
extras_require={
'test': tests_require},
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Scientific/Engineering :: Medical Science Apps."],
keywords='repository analysis')
|
the-stack_0_16804 | """
All joins in a single app, inspired by
https://stackoverflow.com/questions/45990633/what-are-the-various-join-types-in-spark.
Used in Spark in Action 2e, http://jgp.net/sia
@author rambabu.posa
"""
import logging
from pyspark.sql import SparkSession
from pyspark.sql.types import (StructType, StructField,
IntegerType, StringType)
def create_left_df(spark):
schema = StructType([
StructField('id', IntegerType(), True),
StructField('value', StringType(), True)
])
rows = [
(1, "Value 1"),
(2, "Value 2"),
(3, "Value 3"),
(4, "Value 4")
]
return spark.createDataFrame(rows, schema)
def create_right_df(spark):
schema = StructType([
StructField('id', IntegerType(), True),
StructField('value', StringType(), True)
])
rows = [
(3, "Value 3"),
(4, "Value 4"),
(4, "Value 4_1"),
(5, "Value 5"),
(6, "Value 6")
]
return spark.createDataFrame(rows, schema)
def main(spark):
left_df = create_left_df(spark)
left_df.show()
right_df = create_right_df(spark)
right_df.show()
join_types = [
"inner",
"outer",
"full",
"full_outer",
"left",
"left_outer",
"right",
"right_outer",
"left_semi",
"left_anti",
"cross"
]
for join_type in join_types:
logging.warning(join_type.upper().join(" JOIN"))
df = left_df.join(right_df, left_df["id"] == right_df["id"], join_type)
df.orderBy(left_df["id"]).show()
logging.warning("CROSS JOIN (without a column")
df = left_df.crossJoin(right_df)
df.orderBy(left_df["id"]).show()
if __name__ == "__main__":
# Creates a session on a local master
spark = SparkSession.builder.appName("All joins!") \
.master("local[*]").getOrCreate()
# setting log level, update this as per your requirement
spark.sparkContext.setLogLevel("warn")
main(spark)
spark.stop() |
the-stack_0_16807 | """
Copyright (C) 2020-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from mo.front.common.partial_infer.utils import int64_array
from mo.front.extractor import bool_to_str
from mo.graph.graph import Node, Graph
from mo.ops.op import Op
class CTCLoss(Op):
op = 'CTCLoss'
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'type': self.op,
'op': self.op,
'version': 'opset4',
'type_infer': self.type_infer,
'infer': self.infer,
'in_ports_count': 5,
'out_ports_count': 1,
'preprocess_collapse_repeated': False,
'ctc_merge_repeated': True,
'unique': False
}
super().__init__(graph, mandatory_props, attrs)
def backend_attrs(self):
return [('preprocess_collapse_repeated', lambda node: bool_to_str(node, 'preprocess_collapse_repeated')),
('ctc_merge_repeated', lambda node: bool_to_str(node, 'ctc_merge_repeated')),
('unique', lambda node: bool_to_str(node, 'unique'))]
@staticmethod
def type_infer(node):
logits_type = node.in_port(0).get_data_type()
logit_length_type = node.in_port(1).get_data_type()
labels_type = node.in_port(2).get_data_type()
label_length_type = node.in_port(3).get_data_type()
blank_index_type = labels_type
if not node.in_port(4).disconnected():
blank_index_type = node.in_port(4).get_data_type()
assert logit_length_type == label_length_type and logit_length_type in [np.int64, np.int32], \
'Inputs with logits and labels lengths for node {} must be the same and int32 or int64, {} and {} found'.format(
node.soft_get('name'), logit_length_type, label_length_type)
assert labels_type == blank_index_type and labels_type in [np.int64, np.int32], \
'Inputs with labels and blank index for node {} must be the same and int32 or int64, {} and {} found'.format(
node.soft_get('name'), labels_type, blank_index_type)
node.out_port(0).set_data_type(logits_type)
@staticmethod
def infer(node: Node):
node_name = node.soft_get('name', node.id)
connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()]
assert len(connected_in_ports) in [4, 5], \
"Incorrect number of inputs for {} node".format(node_name)
logits_shape = node.in_port(0).data.get_shape()
logit_length_shape = node.in_port(1).data.get_shape()
labels_shape = node.in_port(2).data.get_shape()
label_length_shape = node.in_port(3).data.get_shape()
blank_index_shape = int64_array([])
if len(node.in_nodes()) == 5:
blank_index_shape = node.in_port(4).data.get_shape()
# check shapes of input tensors
assert len(logits_shape) == 3 and len(logit_length_shape) == 1 and len(labels_shape) == 2\
and len(label_length_shape) == 1 and len(blank_index_shape) == 0, \
'Incorrect rank of some input tensor for {} node'.format(node_name)
assert logits_shape[0] == logit_length_shape[0] and logits_shape[0] == labels_shape[0]\
and logits_shape[0] == label_length_shape[0], \
'Batch dimensions of input tensors must be the same for {} node'.format(node_name)
assert logits_shape[1] == labels_shape[1], \
'Time dimensions of input tensors must be the same for {} node'.format(node_name)
batch_size = logits_shape[0]
node.out_port(0).data.set_shape(int64_array([batch_size]))
|
the-stack_0_16809 | __author__ = 'the-kid89'
"""
A sample program that uses multiple intents and disambiguates by
intent confidence
try with the following:
PYTHONPATH=. python examples/multi_intent_parser.py "what's the weather like in tokyo"
PYTHONPATH=. python examples/multi_intent_parser.py "play some music by the clash"
"""
import json
import sys
from adapt.entity_tagger import EntityTagger
from adapt.tools.text.tokenizer import EnglishTokenizer
from adapt.tools.text.trie import Trie
from adapt.intent import IntentBuilder
from adapt.parser import Parser
from adapt.engine import DomainIntentDeterminationEngine
tokenizer = EnglishTokenizer()
trie = Trie()
tagger = EntityTagger(trie, tokenizer)
parser = Parser(tokenizer, tagger)
engine = DomainIntentDeterminationEngine()
engine.register_domain('Domain1')
engine.register_domain('Domain2')
# define vocabulary
weather_keyword = [
"weather"
]
for wk in weather_keyword:
engine.register_entity(wk, "WeatherKeyword", domain='Domain1')
weather_types = [
"snow",
"rain",
"wind",
"sleet",
"sun"
]
for wt in weather_types:
engine.register_entity(wt, "WeatherType", domain='Domain1')
locations = [
"Seattle",
"San Francisco",
"Tokyo"
]
for l in locations:
engine.register_entity(l, "Location", domain='Domain1')
# structure intent
weather_intent = IntentBuilder("WeatherIntent")\
.require("WeatherKeyword")\
.optionally("WeatherType")\
.require("Location")\
.build()
# define music vocabulary
artists = [
"third eye blind",
"the who",
"the clash",
"john mayer",
"kings of leon",
"adelle"
]
for a in artists:
engine.register_entity(a, "Artist", domain='Domain2')
music_verbs = [
"listen",
"hear",
"play"
]
for mv in music_verbs:
engine.register_entity(mv, "MusicVerb", domain='Domain2')
music_keywords = [
"songs",
"music"
]
for mk in music_keywords:
engine.register_entity(mk, "MusicKeyword", domain='Domain2')
music_intent = IntentBuilder("MusicIntent")\
.require("MusicVerb")\
.optionally("MusicKeyword")\
.optionally("Artist")\
.build()
engine.register_intent_parser(weather_intent, domain='Domain1')
engine.register_intent_parser(music_intent, domain='Domain2')
if __name__ == "__main__":
for intents in engine.determine_intent(' '.join(sys.argv[1:])):
print(intents)
|
the-stack_0_16812 | import ast
import datetime
import json
import random
import time
import uuid
# noinspection PyUnresolvedReferences
from uuid import UUID # for UUID as object parsing
from collections.abc import Iterable
from types import ModuleType
from typing import Union
from jinja2 import Template, UndefinedError
from catcher.utils import module_utils
from catcher.utils.logger import debug
from catcher.core.filters_factory import FiltersFactory
def merge_two_dicts(x, y):
if not x:
return y
if not y:
return x
return {**x, **y}
def report_override(variables: dict, override: dict):
existing = set(variables)
replace = set(override)
return list(existing.intersection(replace))
def try_get_objects(source: str or dict or list):
got = try_get_object(source) # "'[1,2,3]'" -> '[1,2,3]' -> [1,2,3]
got = try_get_object(got) # '[1,2,3]' -> [1,2,3]
if isinstance(got, dict):
return dict([(k, try_get_objects(v)) for k, v in got.items()])
if isinstance(got, list):
return [try_get_objects(v) for v in got]
return got
def try_get_object(source: str or dict or list):
if isinstance(source, str):
try: # try python term '{key: "value"}'
evaled = eval_datetime(source)
if isinstance(evaled, ModuleType) or callable(evaled): # for standalone 'string' var or 'id' bif
return source
source = evaled
except Exception:
try: # try json object '{"key" : "value"}'
source = json.loads(source)
except ValueError:
return source
return source
def fill_template_recursive(source: Union[dict, list, str], variables: dict, glob=None, globs_added=None) \
-> Union[dict, list, str]:
if isinstance(source, dict):
return dict([(fill_template_recursive(k, variables, glob, globs_added),
fill_template_recursive(v, variables, glob, globs_added)) for k, v in source.items()])
if isinstance(source, list):
return [fill_template_recursive(v, variables, glob, globs_added) for v in source]
return fill_template(source, variables, glob, globs_added)
def fill_template(source: str, variables: dict, isjson=False, glob=None, globs_added=None) -> str:
if not globs_added:
globs_added = set()
if isinstance(source, str):
source = render(source, inject_builtins(variables))
if isjson: # do not parse json string back to objects
return source
try:
evaled = format_datetime(eval_datetime(source, glob))
if not isinstance(evaled, ModuleType) and not callable(evaled): # for standalone 'string' var or 'id' bif
source = evaled
except NameError as e: # try to import missing package and rerun this code
if 'is not defined' in str(e):
name = str(e).split("'")[1]
if name not in globs_added:
# f.e. tzinfo=psycopg2.tz.FixedOffsetTimezone for datetime
glob = module_utils.add_package_to_globals(name, glob, warn_missing_package=False)
globs_added.add(name)
filled = fill_template(source, variables, isjson, glob=glob, globs_added=globs_added)
if not isinstance(filled, ModuleType) and not callable(filled):
return filled # for standalone 'string' var or 'id' bif
except Exception:
pass
return source
def fill_template_str(source: any, variables: dict) -> str:
rendered = render(str(source), inject_builtins(variables))
if rendered != source:
return fill_template_str(rendered, variables)
return rendered
def eval_datetime(astr, glob=None):
if glob is None:
glob = globals()
try:
tree = ast.parse(astr)
except SyntaxError:
raise ValueError(astr)
for node in ast.walk(tree):
if isinstance(node, (ast.Module, ast.Expr, ast.Dict, ast.Str,
ast.Attribute, ast.Num, ast.Name, ast.Load, ast.Tuple)): continue
if (isinstance(node, ast.Call)
and isinstance(node.func, ast.Attribute)
and node.func.attr == 'datetime'): continue
pass
return eval(astr, glob)
def format_datetime(iterable):
if not isinstance(iterable, Iterable) or isinstance(iterable, str):
if isinstance(iterable, datetime.datetime):
return iterable.strftime('%Y-%m-%d %H:%M:%S.%f')
return iterable
else:
if isinstance(iterable, dict):
return dict([(format_datetime(k), format_datetime(v)) for k, v in iterable.items()])
elif isinstance(iterable, tuple):
return tuple([format_datetime(i) for i in iterable])
return [format_datetime(i) for i in iterable]
def inject_builtins(variables: dict) -> dict:
variables_copy = dict(variables)
variables_copy['RANDOM_STR'] = str(uuid.uuid4())
variables_copy['RANDOM_INT'] = random.randint(-2147483648, 2147483648)
ts = round(time.time(), 6) # from timestamp uses rounding, so we should also use it here, to make them compatible
variables_copy['NOW_TS'] = ts
variables_copy['NOW_DT'] = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%dT%H:%M:%S0+0000')
return variables_copy
def render(source: str, variables: dict) -> str:
template = Template(source)
holder = FiltersFactory()
for filter_mod, value in holder.filters.items():
template.environment.filters[filter_mod] = value
for fun_mod, value in holder.functions.items():
template.globals[fun_mod] = value
try:
return template.render(variables)
except UndefinedError as e:
debug(e.message)
return source
|
the-stack_0_16813 | import pytest
from ..py_graph_t.SimpleGraph import SimpleGraph
from ..py_graph_t.exceptions.SimpleGraphException import (
VertexNotExistsException,
EdgeDuplicatedException,
EdgeNotFoundException,
VertexDuplicatedException,
CycleDetectedException
)
from ..py_graph_t.Graph import Graph
from ..py_graph_t.util.ValueBinding import ValueBinding
class TestGraph:
g = Graph()
def teste(self):
g = Graph()
g.vertices = dict()
g.edges = []
g.add_vertex("a")
g.add_vertex("b")
g.add_vertex("c")
g.add_edge("a", "b", name="s")
g.add_edge("b", "c", name="t")
g.add_edge("a", "a", name="d")
list_ = g.incidence_list()
test = ValueBinding("a", "s", 1)
assert list_[0].__eq__(test)
|
the-stack_0_16814 | import numpy as np
def calculateEarthRadius(lat_deg):
"""
IN RADIANS!!!
"""
lat_rad = np.deg2rad(lat_deg)
major = 6378137.0 # semi-major axis of the earth
minor = 6356752.3142 # semi-minor axis of the earth
radius = np.sqrt((((major**2)*np.cos(lat_rad))**2 + ((minor**2)*np.sin(lat_rad))**2)/
((major*np.cos(lat_rad))**2 + (minor*np.sin(lat_rad))**2)) # defines the radius of the earth at a specific point
return radius
|
the-stack_0_16816 | import os
import unittest
from unittest import mock
from collections import defaultdict
import sys
import pandas as pd
import numpy as np
from dataprofiler.profilers import NumericStatsMixin
from dataprofiler.profilers.profiler_options import NumericalOptions
test_root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
class TestColumn(NumericStatsMixin):
def __init__(self):
NumericStatsMixin.__init__(self)
self.match_count = 0
self.times = defaultdict(float)
def update(self, df_series):
pass
def _filter_properties_w_options(self, calculations, options):
pass
class TestNumericStatsMixin(unittest.TestCase):
@mock.patch.multiple(NumericStatsMixin, __abstractmethods__=set(),
_filter_properties_w_options=mock.MagicMock(
return_value=None),
create=True)
def test_base(self):
# validate requires NumericalOptions
with self.assertRaisesRegex(ValueError,
"NumericalStatsMixin parameter 'options' "
"must be of type NumericalOptions."):
profile = NumericStatsMixin(options='bad options')
try:
# validate doesn't fail
profile = NumericStatsMixin()
profile = NumericStatsMixin(NumericalOptions())
except Exception as e:
self.fail(e)
def test_check_float(self):
"""
Checks if number is float.
:return:
"""
true_asserts = [1.3, 1.345, -1.3, 0.03, 0.0, -0.0, 1, # numeric values
float("nan"), np.nan, # nan values
"1.3", "nan" # strings
]
for assert_val in true_asserts:
self.assertTrue(NumericStatsMixin.is_float(assert_val))
false_asserts = ["1.3a", "abc", "", "1.23.45"]
for assert_val in false_asserts:
self.assertFalse(NumericStatsMixin.is_float(assert_val))
def test_check_int(self):
"""
Checks if number is integer.
:return:
"""
true_asserts = [1, 1345, -13, 0, -0, # numeric values
"1" # strings
]
for assert_val in true_asserts:
self.assertTrue(NumericStatsMixin.is_int(assert_val))
false_asserts = [1.3, # float
float("nan"), np.nan, # nan value
"nan", "1a", "abc", "", "1.3" # strings
]
for assert_val in false_asserts:
self.assertFalse(NumericStatsMixin.is_int(assert_val))
def test_update_variance(self):
"""
Checks update variance
:return:
"""
num_profiler = TestColumn()
# test update variance
data1 = [-3.0, 2.0, 11.0]
mean1 = (-3.0 + 2.0 + 11.0) / 3
var1 = ((-3.0 - mean1) ** 2 + (2.0 - mean1)
** 2 + (11.0 - mean1) ** 2) / 2
count1 = len(data1)
num_profiler._biased_variance = num_profiler._update_variance(
mean1, var1 * 2 / 3, count1)
num_profiler.match_count = count1
num_profiler.sum = sum(data1)
self.assertAlmostEqual(var1, num_profiler.variance)
# test streaming update variance with new data
data2 = [-5.0, 5.0, 11.0]
mean2 = (-5.0 + 5.0 + 11.0) / 3
var2 = ((-5.0 - mean2) ** 2 + (5.0 - mean2)
** 2 + (11.0 - mean2) ** 2) / 2
count2 = len(data2)
num_profiler._biased_variance = num_profiler._update_variance(
mean2, var2 * 2 / 3, count2)
num_profiler.match_count += count2
num_profiler.sum += sum(data2)
var_from_profile_updated = num_profiler.variance
data_all = [-5.0, 5.0, 11.0, -3.0, 2.0, 11.0]
mean_all = (-5.0 + 5.0 + 11.0 - 3.0 + 2.0 + 11.0) / 6
var_all = ((-5.0 - mean_all) ** 2 + (5.0 - mean_all) ** 2 + \
(11.0 - mean_all) ** 2 + (-3.0 - mean_all) ** 2 + \
(2.0 - mean_all) ** 2 + (11.0 - mean_all) ** 2) / 5
self.assertAlmostEqual(var_all, var_from_profile_updated)
def test_update_variance_with_varying_data_length(self):
"""
Checks update variance
:return:
"""
# empty data
data1 = []
mean1, var1, count1 = 0, np.nan, 0
num_profiler = TestColumn()
num_profiler._biased_variance = num_profiler._update_variance(
mean1, var1, count1)
num_profiler.match_count = count1
num_profiler.sum = 0
self.assertTrue(num_profiler.variance is np.nan)
# data with 1 element
data2 = [5.0]
mean2, var2, count2 = 5.0, 0, 1
num_profiler = TestColumn()
num_profiler._biased_variance = num_profiler._update_variance(
mean2, var2, count2)
num_profiler.match_count += count2
num_profiler.sum += 5.0
self.assertTrue(num_profiler.variance is np.nan)
# data with multiple elements
data3 = [-5.0, 5.0, 11.0, -11.0]
mean3, count3 = 0, 4
var3 = ((-5.0 - mean3) ** 2 + (5.0 - mean3) ** 2 +
(11.0 - mean3) ** 2 + (-11.0 - mean3) ** 2) / 3
num_profiler = TestColumn()
num_profiler._biased_variance = num_profiler._update_variance(
mean3, var3 * 3 / 4, count3)
num_profiler.match_count += count3
num_profiler.sum += sum(data3)
self.assertEqual(var3, num_profiler.variance)
def test_update_variance_with_empty_data(self):
"""
Checks update variance
:return:
"""
num_profiler = TestColumn()
data1 = [-3.0, 2.0, 11.0]
mean1 = (-3.0 + 2.0 + 11.0) / 3
var1 = ((-3.0 - mean1) ** 2 + (2.0 - mean1)
** 2 + (11.0 - mean1) ** 2) / 2
count1 = len(data1)
num_profiler._biased_variance = num_profiler._update_variance(
mean1, var1 * 2 / 3, count1)
num_profiler.match_count = count1
num_profiler.sum = sum(data1)
self.assertEqual(var1, num_profiler.variance)
# test adding data which would not have anything
# data + empty
mean2, var2, count2 = 0, 0, 0
num_profiler._biased_variance = num_profiler._update_variance(
mean2, var2, count2)
num_profiler.match_count = count1
num_profiler.sum = sum(data1)
var_from_profile_updated = num_profiler.variance
# simulate not having data
mean_all, var_all = mean1, var1
self.assertEqual(var_all, var_from_profile_updated)
def test_timeit_merge(self):
"""
Checks profiles have been merged and timed
:return:
"""
num_profiler, other1, other2 = TestColumn(), TestColumn(), TestColumn()
mock_histogram = {
'bin_counts': np.array([1, 1, 1, 1]),
'bin_edges': np.array([2., 5.25, 8.5, 11.75, 15.])
}
other1.min, other1.max, other1._biased_variance, other1.sum, \
other1.num_zeros, other1.num_negatives = 0, 0, 0, 0, 0, 0
other2.min, other2.max, other2._biased_variance, other2.sum, \
other2.num_zeros, other2.num_negatives = 1, 1, 1, 1, 1, 1
# set auto as only histogram to merge
other1.histogram_selection = "auto"
other2.histogram_selection = "auto"
other1.histogram_bin_method_names = ['auto']
other2.histogram_bin_method_names = ['auto']
other1._stored_histogram['histogram'] = mock_histogram
other2._stored_histogram['histogram'] = mock_histogram
other1.histogram_selection = 'auto'
time_array = [float(i) for i in range(2, 0, -1)]
with mock.patch('time.time', side_effect=lambda: time_array.pop()):
# Validate that the times dictionary is empty
self.assertEqual(defaultdict(float), num_profiler.times)
# Validate profiles are merged and timed.
expected = defaultdict(float, {'histogram_and_quantiles': 1.0})
num_profiler._add_helper(other1, other2)
self.assertEqual(expected, num_profiler.times)
def test_timeit(self):
"""
Checks stat properties have been timed
:return:
"""
num_profiler = TestColumn()
# Dummy data to make min call
prev_dependent_properties = {"mean": 0,
"biased_variance": 0,
"biased_skewness": 0}
data = np.array([0, 0, 0, 0, 0])
df_series = pd.Series(data)
subset_properties = {"min": 0, "match_count": 0}
time_array = [float(i) for i in range(24, 0, -1)]
with mock.patch('time.time', side_effect=lambda: time_array.pop()):
# Validate that the times dictionary is empty
self.assertEqual(defaultdict(float), num_profiler.times)
# Validate _get_min is timed.
expected = defaultdict(float, {'min': 1.0})
num_profiler._get_min(
df_series,
prev_dependent_properties,
subset_properties)
self.assertEqual(expected, num_profiler.times)
# Validate _get_max is timed.
expected['max'] = 1.0
num_profiler._get_max(
df_series,
prev_dependent_properties,
subset_properties)
self.assertEqual(expected, num_profiler.times)
# Validate _get_sum is timed.
expected['sum'] = 1.0
num_profiler._get_sum(
df_series,
prev_dependent_properties,
subset_properties)
self.assertEqual(expected, num_profiler.times)
# Validate _get_variance is timed.
expected['variance'] = 1.0
num_profiler._get_variance(
df_series,
prev_dependent_properties,
subset_properties)
self.assertEqual(expected, num_profiler.times)
# Validate _get_skewness is timed
expected['skewness'] = 1.0
num_profiler._get_skewness(
df_series,
prev_dependent_properties,
subset_properties)
self.assertEqual(expected, num_profiler.times)
# Validate _get_kurtosis is timed
expected['kurtosis'] = 1.0
num_profiler._get_kurtosis(
df_series,
prev_dependent_properties,
subset_properties)
self.assertEqual(expected, num_profiler.times)
# Validate _get_histogram_and_quantiles is timed.
expected['histogram_and_quantiles'] = 1.0
num_profiler._get_histogram_and_quantiles(
df_series, prev_dependent_properties, subset_properties)
self.assertEqual(expected, num_profiler.times)
def test_histogram_bin_error(self):
num_profiler = TestColumn()
# Dummy data for calculating bin error
num_profiler._stored_histogram = {
"histogram": {
"bin_edges": np.array([0.0, 4.0, 8.0, 12.0, 16.0])
}
}
input_array = [0, 3, 5, 9, 11, 17]
sum_error = num_profiler._histogram_bin_error(input_array)
# Sum of errors should be difference of each input value to midpoint of bin squared
# bin_midpoints = [2, 6, 10, 14] ids = [1, 1, 2, 3, 3, 4]
assert sum_error == (2-0)**2 + (2-3)**2 + (6-5)**2 + \
(10-9)**2 + (10-11)**2 + (17-14)**2
# Max value test
input_array = [sys.float_info.max, 1.2e308, 1.3e308, 1.5e308]
num_profiler._stored_histogram = {
"histogram": {
"bin_edges": np.array([1e308, 1.2e308, 1.4e308, 1.6e308])
}
}
sum_error = num_profiler._histogram_bin_error(input_array)
assert sum_error == np.inf
# Min value test
input_array = [sys.float_info.min, -1.2e308, -1.3e308, -1.5e308]
num_profiler._stored_histogram = {
"histogram": {
"bin_edges": np.array([-1.6e308, -1.4e308, -1.2e308, -1e308])
}
}
sum_error = num_profiler._histogram_bin_error(input_array)
assert sum_error == np.inf
def test_get_best_histogram_profile(self):
num_profiler = TestColumn()
num_profiler._histogram_for_profile = mock.MagicMock(side_effect=[
("hist_1", 3),
("hist_2", 2),
("hist_3", 1)
])
num_profiler.histogram_selection = None
num_profiler.histogram_methods = {
'method_1': {
'total_loss': 0,
'current_loss': 0,
'histogram': None,
'suggested_bin_count': 3
},
'method_2': {
'total_loss': 0,
'current_loss': 0,
'histogram': None,
'suggested_bin_count': 3
},
'method_3': {
'total_loss': 0,
'current_loss': 0,
'histogram': None,
'suggested_bin_count': 3
}
}
best_histogram = num_profiler._get_best_histogram_for_profile()
assert best_histogram == "hist_3"
def test_get_best_histogram_profile_infinite_loss(self):
num_profiler = TestColumn()
num_profiler._histogram_for_profile = mock.MagicMock(return_value=("hist_1", 3))
num_profiler.histogram_selection = None
num_profiler.histogram_methods = {
'method_1': {
'total_loss': np.inf,
'current_loss': np.inf,
'histogram': None,
'suggested_bin_count': 3
},
}
best_histogram = num_profiler._get_best_histogram_for_profile()
assert best_histogram == "hist_1"
def test_num_zeros(self):
num_profiler = TestColumn()
# Dummy data to make num_zeros call
prev_dependent_properties = {"mean": 0}
subset_properties = {"num_zeros": 0}
df_series = pd.Series([])
num_profiler._get_num_zeros(df_series, prev_dependent_properties,
subset_properties)
self.assertEqual(subset_properties["num_zeros"], 0)
data = np.array([0, 0, 0, 0, 0])
df_series = pd.Series(data)
num_profiler._get_num_zeros(df_series, prev_dependent_properties,
subset_properties)
self.assertEqual(subset_properties["num_zeros"], 5)
data = np.array([000., 0.00, .000, 1.11234, 0, -1])
df_series = pd.Series(data)
num_profiler._get_num_zeros(df_series, prev_dependent_properties,
subset_properties)
self.assertEqual(subset_properties["num_zeros"], 4)
def test_num_negatives(self):
num_profiler = TestColumn()
# Dummy data to make num_negatives call
prev_dependent_properties = {"mean": 0}
subset_properties = {"num_negatives": 0}
df_series = pd.Series([])
num_profiler._get_num_negatives(df_series, prev_dependent_properties,
subset_properties)
self.assertEqual(subset_properties["num_negatives"], 0)
data = np.array([0, 0, 0, 0, 0])
df_series = pd.Series(data)
num_profiler._get_num_negatives(df_series, prev_dependent_properties,
subset_properties)
self.assertEqual(subset_properties["num_negatives"], 0)
data = np.array([1, 0, -.003, -16, -1., -24.45])
df_series = pd.Series(data)
num_profiler._get_num_negatives(df_series, prev_dependent_properties,
subset_properties)
self.assertEqual(subset_properties["num_negatives"], 4)
def test_timeit_num_zeros_and_negatives(self):
"""
Checks num_zeros and num_negatives have been timed
:return:
"""
num_profiler = TestColumn()
# Dummy data to make min call
prev_dependent_properties = {"mean": 0}
data = np.array([0, 0, 0, 0, 0])
df_series = pd.Series(data)
subset_properties = {"num_zeros": 0, "num_negatives": 0}
time_array = [float(i) for i in range(4, 0, -1)]
with mock.patch('time.time', side_effect=lambda: time_array.pop()):
# Validate that the times dictionary is empty
self.assertEqual(defaultdict(float), num_profiler.times)
# Validate _get_min is timed.
expected = defaultdict(float, {'num_zeros': 1.0})
num_profiler._get_num_zeros(
df_series,
prev_dependent_properties,
subset_properties)
self.assertEqual(expected, num_profiler.times)
# Validate _get_max is timed.
expected['num_negatives'] = 1.0
num_profiler._get_num_negatives(
df_series,
prev_dependent_properties,
subset_properties)
self.assertEqual(expected, num_profiler.times)
def test_merge_num_zeros_and_negatives(self):
"""
Checks num_zeros and num_negatives can be merged
:return:
"""
num_profiler, other1, other2 = TestColumn(), TestColumn(), TestColumn()
other1.num_zeros, other1.num_negatives = 3, 1
other2.num_zeros, other2.num_negatives = 7, 1
num_profiler._add_helper(other1, other2)
self.assertEqual(num_profiler.num_zeros, 10)
self.assertEqual(num_profiler.num_negatives, 2)
num_profiler, other1, other2 = TestColumn(), TestColumn(), TestColumn()
other1.num_zeros, other1.num_negatives = 0, 0
other2.num_zeros, other2.num_negatives = 0, 0
num_profiler._add_helper(other1, other2)
self.assertEqual(num_profiler.num_zeros, 0)
self.assertEqual(num_profiler.num_negatives, 0)
def test_profile(self):
num_profiler = TestColumn()
mock_profile = dict(
min=1.0,
max=1.0,
sum=1.0,
mean=0, # default
variance=np.nan, # default
skewness=np.nan, # default
kurtosis=np.nan, # default
stddev=np.nan, # default
histogram={
'bin_counts': np.array([1, 1, 1]),
'bin_edges': np.array([1.0, 2.0, 3.0, 4.0])
},
quantiles={
0: 2.0,
1: 3.0,
2: 4.0,
},
num_zeros=0, # default
num_negatives=0, # default
times=defaultdict(float), # default
)
num_profiler.match_count = 0
num_profiler.min = mock_profile['min']
num_profiler.max = mock_profile['max']
num_profiler.sum = mock_profile['sum']
num_profiler.histogram_selection = 'auto'
num_profiler.histogram_methods['auto']['histogram'] = \
mock_profile['histogram']
num_profiler.quantiles = mock_profile['quantiles']
num_profiler.times = mock_profile['times']
time_array = [float(i) for i in range(100, 0, -1)]
with mock.patch('time.time', side_effect=lambda: time_array.pop()):
# Validate that the times dictionary is empty
self.assertEqual(defaultdict(float), num_profiler.times)
profile = num_profiler.profile()
# pop out the histogram and quartiles to test separately from the
# rest of the dict as we need comparison with some precision
histogram = profile.pop('histogram')
expected_histogram = mock_profile.pop('histogram')
quartiles = profile.pop('quantiles')
expected_quartiles = mock_profile.pop('quantiles')
self.assertDictEqual(mock_profile, profile)
self.assertEqual(expected_histogram['bin_counts'].tolist(),
histogram['bin_counts'].tolist())
self.assertCountEqual(np.round(expected_histogram['bin_edges'], 12),
np.round(histogram['bin_edges'], 12))
self.assertAlmostEqual(expected_quartiles[0], quartiles[0])
self.assertAlmostEqual(expected_quartiles[1], quartiles[1])
self.assertAlmostEqual(expected_quartiles[2], quartiles[2])
def test_diff(self):
"""
Checks _diff_helper() works appropriately.
"""
other1, other2 = TestColumn(), TestColumn()
other1.min = 3
other1.max = 4
other1._biased_variance = 1
other1.sum = 6
other1.match_count = 10
other2.min = 3
other2.max = None
other2._biased_variance = 9
other2.sum = 6
other2.match_count = 20
# T-stat and Welch's df calculation can be found here:
# https://en.wikipedia.org/wiki/Welch%27s_t-test#Calculations
# Conservative df = min(count1, count2) - 1
# P-value is found using scipy: (1 - CDF(abs(t-stat))) * 2
expected_diff = {
'min': 'unchanged',
'max': [4, None],
'sum': 'unchanged',
'mean': 0.3,
'variance': 10 / 9 - (9 * 20 / 19),
'stddev': np.sqrt(10 / 9) - np.sqrt(9 * 20 / 19),
't-test': {
't-statistic': 0.3923009049186606,
'conservative': {
'df': 9,
'p-value': 0.7039643545772609
},
'welch': {
'df': 25.945257024943864,
'p-value': 0.6980401261750298
}
}
}
difference = other1.diff(other2)
self.assertDictEqual(expected_diff, difference)
# Invalid statistics
other1, other2 = TestColumn(), TestColumn()
other1.min = 3
other1.max = 4
other1._biased_variance = np.nan # NaN variance
other1.sum = 6
other1.match_count = 10
other2.min = 3
other2.max = None
other2._biased_variance = 9
other2.sum = 6
other2.match_count = 20
expected_diff = {
'min': 'unchanged',
'max': [4, None],
'sum': 'unchanged',
'mean': 0.3,
'variance': np.nan,
'stddev': np.nan,
't-test': {
't-statistic': None,
'conservative': {
'df': None,
'p-value': None
},
'welch': {
'df': None,
'p-value': None
}
}
}
expected_var = expected_diff.pop('variance')
expected_stddev = expected_diff.pop('stddev')
with self.assertWarns(RuntimeWarning, msg=
"Null value(s) found in mean and/or variance values. "
"T-test cannot be performed"):
difference = other1.diff(other2)
var = difference.pop('variance')
stddev = difference.pop('stddev')
self.assertDictEqual(expected_diff, difference)
self.assertTrue(np.isnan([expected_var, var, expected_stddev, stddev]).all())
# Insufficient match count
other1, other2 = TestColumn(), TestColumn()
other1.min = 3
other1.max = 4
other1._biased_variance = 1
other1.sum = 6
other1.match_count = 10
other2.min = 3
other2.max = None
other2._biased_variance = 9
other2.sum = 6
other2.match_count = 1 # Insufficient count
expected_diff = {
'min': 'unchanged',
'max': [4, None],
'sum': 'unchanged',
'mean': -5.4,
'variance': np.nan,
'stddev': np.nan,
't-test': {
't-statistic': None,
'conservative': {
'df': None,
'p-value': None
},
'welch': {
'df': None,
'p-value': None
}
}
}
expected_var = expected_diff.pop('variance')
expected_stddev = expected_diff.pop('stddev')
with self.assertWarns(RuntimeWarning, msg=
"Insufficient sample size. "
"T-test cannot be performed."):
difference = other1.diff(other2)
var = difference.pop('variance')
stddev = difference.pop('stddev')
self.assertDictEqual(expected_diff, difference)
self.assertTrue(np.isnan([expected_var, var, expected_stddev, stddev]).all())
# Small p-value
other1, other2 = TestColumn(), TestColumn()
other1.min = 3
other1.max = 4
other1._biased_variance = 1
other1.sum = 6
other1.match_count = 10
other2.min = 3
other2.max = None
other2._biased_variance = 9
other2.sum = 60
other2.match_count = 20
expected_diff = {
'min': 'unchanged',
'max': [4, None],
'sum': -54,
'mean': -2.4,
'variance': 10 / 9 - (9 * 20 / 19),
'stddev': np.sqrt(10 / 9) - np.sqrt(9 * 20 / 19),
't-test': {
't-statistic': -3.138407239349285,
'conservative': {
'df': 9,
'p-value': 0.011958658754358975
},
'welch': {
'df': 25.945257024943864,
'p-value': 0.004201616692122823
}
}
}
difference = other1.diff(other2)
self.assertDictEqual(expected_diff, difference)
# Assert type error is properly called
with self.assertRaises(TypeError) as exc:
other1.diff("Inproper input")
self.assertEqual(str(exc.exception),
"Unsupported operand type(s) for diff: 'TestColumn' and"
" 'str'")
|
the-stack_0_16819 | from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.dirname(path.abspath(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, 'datadog_checks', 'redis_sentinel', '__about__.py')) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_dependencies():
dep_file = path.join(HERE, 'requirements.in')
if not path.isfile(dep_file):
return []
with open(dep_file, encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog-checks-base>=4.2.0'
setup(
name='datadog-redis_sentinel',
version=ABOUT['__version__'],
description='The Redis_sentinel check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent redis_sentinel check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-extras',
# Author details
author='krasnoukhov',
# License
license='BSD-3-Clause',
# See https://pypi.org/classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.7',
],
# The package we're going to ship
packages=['datadog_checks.redis_sentinel'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
extras_require={'deps': get_dependencies()},
# Extra files to ship with the wheel package
include_package_data=True,
)
|
the-stack_0_16820 | # Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common layers used for modeling."""
from typing import Optional, Tuple
import tensorflow as tf
class PadLayer(tf.keras.layers.Layer):
"""Implements circular and regular padding."""
def __init__(self, padding: int, circular_pad: bool = False, **kwargs):
"""Instantiates a PadLayer.
Args:
padding: Size of padding in pixels.
circular_pad: If true, uses circular padding along the width dimension.
**kwargs: Additional arguments passed to tf.keras.layers.Layer.
"""
super().__init__(**kwargs)
self.padding = padding
self.circular_pad = circular_pad
def call(self, inputs: tf.Tensor, training=None) -> tf.Tensor:
"""Implements forward pass for padding.
Args:
inputs: tf.Tensor input of shape (N, H, W, C).
training: Whether the layer is in training mode.
Returns:
tf.Tensor, the normalized output.
"""
batch_size, height, width, channels = inputs.shape
left_pad = tf.zeros((batch_size, height, self.padding, channels),
dtype=inputs.dtype)
right_pad = tf.zeros((batch_size, height, self.padding, channels),
dtype=inputs.dtype)
if self.circular_pad:
left_pad = inputs[:, :, -self.padding:, :]
right_pad = inputs[:, :, :self.padding, :]
top_pad = tf.zeros(
(batch_size, self.padding, width + self.padding * 2, channels),
dtype=inputs.dtype)
bottom_pad = tf.zeros(
(batch_size, self.padding, width + self.padding * 2, channels),
dtype=inputs.dtype)
padded_tensor = tf.concat([left_pad, inputs, right_pad], axis=2)
padded_tensor = tf.concat([bottom_pad, padded_tensor, top_pad], axis=1)
return padded_tensor
class Bottleneck(tf.keras.Model):
"""ResNet bottleneck block."""
def __init__(self,
filters: int = 128,
strides: int = 1,
expansion: int = 4,
downsample=None,
circular_pad: bool = False):
super(Bottleneck, self).__init__()
self.shortcut = None
self.main = tf.keras.Sequential([
tf.keras.layers.Conv2D(
filters, kernel_size=1, strides=1, padding='SAME'),
tf.keras.layers.experimental.SyncBatchNormalization(),
tf.keras.layers.ReLU(),
PadLayer(1, circular_pad=circular_pad),
tf.keras.layers.Conv2D(
filters, kernel_size=3, strides=strides, padding='VALID'),
tf.keras.layers.experimental.SyncBatchNormalization(),
tf.keras.layers.ReLU(),
tf.keras.layers.Conv2D(
expansion * filters, kernel_size=1, strides=1, padding='SAME'),
tf.keras.layers.experimental.SyncBatchNormalization(),
])
self.relu = tf.keras.layers.ReLU()
self.downsample = downsample
def call(self, x: tf.Tensor, training=None) -> tf.Tensor:
residual = x
out = self.main(x)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SpectralConv(tf.keras.layers.Conv2D):
"""Convolution with spectral normalization applied to weights.
From "Spectral Normalization for Generative Adversarial Networks"
https://arxiv.org/abs/1802.05957
"""
def build(self, input_shape):
was_built = self.built
tf.keras.layers.Conv2D.build(self, input_shape)
self.built = was_built
output_dims = self.kernel.shape[-1]
self.u = self.add_weight(
name=self.name + '_u',
shape=[1, output_dims],
dtype=tf.float32,
initializer=tf.initializers.TruncatedNormal(),
trainable=False,
aggregation=tf.VariableAggregation.MEAN)
if not isinstance(self.padding, (list, tuple)):
self.padding = self.padding.upper()
self.built = True
def call(self, feature, training=None):
"""Forward pass applying spectral normalized convolution.
Args:
feature: Float tensor of shape (N, H, W, C), representing input feature.
training: Represents whether the layer is in training mode.
Returns:
out: Float tensor of shape (N, H, W, output_dims), representing output
feature after applying a spectral normalized convolution.
"""
# For preventing division by 0.
eps = 1e-10
# Flatten weight matrix.
w_shape = self.kernel.shape
w = tf.reshape(self.kernel, [-1, w_shape[-1]])
# One step of power iteration.
v = tf.matmul(self.u, w, transpose_b=True)
v_hat = v / (tf.norm(v) + eps)
u = tf.matmul(v_hat, w)
u_hat = u / (tf.norm(u) + eps)
u_hat = tf.stop_gradient(u_hat)
v_hat = tf.stop_gradient(v_hat)
sigma = tf.matmul(tf.matmul(v_hat, w), u_hat, transpose_b=True)
if training:
self.u.assign(u_hat)
w_norm = w / (sigma + eps)
w_norm = tf.reshape(w_norm, w_shape)
out = tf.nn.conv2d(
input=feature,
filters=w_norm,
strides=self.strides,
dilations=self.dilation_rate,
padding=self.padding)
if self.use_bias:
out = out + self.bias
if self.activation:
out = self.activation(out)
return out
class PartialConv(tf.keras.layers.Conv2D):
"""Partial 2D convolution.
From "Image inpainting for irregular holes using partial convolutions.",
Liu et al., ECCV 2018.
"""
def build(self, input_shape):
was_built = self.built
tf.keras.layers.Conv2D.build(self, input_shape)
self.built = was_built
ks_height, ks_width, _, _ = self.kernel.shape
self.weight_mask_updater = tf.ones((ks_height, ks_width, 1, 1))
self.slide_window_size = ks_height * ks_width * 1
self.built = True
def call(self,
feature: tf.Tensor,
mask: Optional[tf.Tensor] = None,
training=None) -> Tuple[tf.Tensor, tf.Tensor]:
"""Forward pass applying partial convolution.
Args:
feature: Float tensor of shape (N, H, W, C) representing input feature.
mask: Binary float tensor of shape (N, H, W, 1) representing valid pixels.
training: Represents whether the layer is in training mode.
Returns:
out: Float tensor of shape (N, H, W, output_dims), representing output
feature after applying a partial convolution.
"""
if mask is None:
mask = tf.ones((feature.shape[0], feature.shape[1], feature.shape[2], 1))
eps = 1e-6
update_mask = tf.nn.conv2d(
mask,
self.weight_mask_updater,
strides=self.strides,
padding=self.padding.upper())
mask_ratio = self.slide_window_size / (update_mask + eps)
update_mask = tf.clip_by_value(update_mask, 0, 1)
mask_ratio = mask_ratio * update_mask
mask = tf.stop_gradient(mask)
update_mask = tf.stop_gradient(update_mask)
mask_ratio = tf.stop_gradient(mask_ratio)
out = feature * mask
out = tf.nn.conv2d(
input=out,
filters=self.kernel,
strides=self.strides,
padding=self.padding.upper())
if self.bias is not None:
bias = tf.reshape(self.bias, (1, 1, 1, -1))
out = (out - bias) * mask_ratio + bias
out = out * update_mask
else:
out = out * mask_ratio
return out, update_mask
class ResStack(tf.keras.Model):
"""Single ResNet stack consisting of multiple Bottleneck blocks."""
def __init__(self,
inplanes: int,
planes: int,
blocks: int,
strides: int = 1,
expansion: int = 4,
circular_pad: bool = False):
super(ResStack, self).__init__()
downsample = None
if strides != 1 or inplanes != planes * expansion:
downsample = tf.keras.Sequential([
tf.keras.layers.Conv2D(
planes * expansion,
kernel_size=1,
strides=strides,
padding='SAME',
use_bias=False),
tf.keras.layers.experimental.SyncBatchNormalization()
])
block_models = [
Bottleneck(
planes,
strides=strides,
expansion=expansion,
downsample=downsample,
circular_pad=circular_pad)
]
for _ in range(blocks - 1):
block_models.append(
Bottleneck(planes, expansion=expansion, circular_pad=circular_pad))
self.block = tf.keras.Sequential(block_models)
def call(self, x: tf.Tensor, training=None) -> tf.Tensor:
return self.block(x)
class TransBasicBlock(tf.keras.Model):
"""Bottleneck block with transposed convolutions.
This block performs upsampling if required.
"""
def __init__(self,
inplanes: int,
planes: int,
blocks: int,
strides: int = 1,
upsample=None,
circular_pad: bool = False):
super(TransBasicBlock, self).__init__()
conv2 = None
if upsample is not None and strides != 1:
conv2 = tf.keras.layers.Conv2DTranspose(
planes,
kernel_size=3,
strides=strides,
padding='SAME',
output_padding=1,
use_bias=False)
else:
conv2 = tf.keras.Sequential([
PadLayer(1, circular_pad=circular_pad),
tf.keras.layers.Conv2D(
planes,
kernel_size=3,
strides=strides,
padding='VALID',
use_bias=False)
])
self.main = tf.keras.Sequential([
PadLayer(1, circular_pad=circular_pad),
tf.keras.layers.Conv2D(
inplanes, kernel_size=3, strides=1, padding='VALID',
use_bias=False),
tf.keras.layers.experimental.SyncBatchNormalization(),
tf.keras.layers.ReLU(),
conv2,
tf.keras.layers.experimental.SyncBatchNormalization(),
])
self.upsample = upsample
self.relu = tf.keras.layers.ReLU()
def call(self, x: tf.Tensor, training=None) -> tf.Tensor:
residual = x
out_x = self.main(x)
if self.upsample is not None:
residual = self.upsample(x)
out_x += residual
out_x = self.relu(out_x)
return out_x
class ResStackTranspose(tf.keras.Model):
"""ResNet stack consisting of transposed blocks.
This stack performs upsampling if required (if strides > 1).
"""
def __init__(self,
inplanes: int,
planes: int,
blocks: int,
strides: int = 1,
circular_pad: bool = False):
super(ResStackTranspose, self).__init__()
upsample = None
if strides != 1:
upsample = tf.keras.Sequential([
tf.keras.layers.Conv2DTranspose(
planes,
kernel_size=2,
strides=strides,
padding='VALID',
use_bias=False),
tf.keras.layers.experimental.SyncBatchNormalization()
])
elif inplanes != planes:
upsample = tf.keras.Sequential([
tf.keras.layers.Conv2D(
planes, kernel_size=1, strides=strides, use_bias=False),
tf.keras.layers.experimental.SyncBatchNormalization()
])
block_models = []
for _ in range(blocks - 1):
block_models.append(
TransBasicBlock(
inplanes, inplanes, blocks, circular_pad=circular_pad))
block_models += [
TransBasicBlock(
inplanes,
planes,
blocks,
strides,
upsample=upsample,
circular_pad=circular_pad)
]
self.block = tf.keras.Sequential(block_models)
def call(self, x: tf.Tensor, training=None) -> tf.Tensor:
return self.block(x)
|
the-stack_0_16826 | # -*- coding: utf-8 -*-
#
# Sphinx documentation build configuration file
import re
import sphinx
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.extlinks',
'sphinx.ext.viewcode']
master_doc = 'contents'
templates_path = ['_templates']
exclude_patterns = ['_build']
project = 'Sphinx'
copyright = '2007-2016, Georg Brandl and the Sphinx team'
version = sphinx.__released__
release = version
show_authors = True
html_theme = 'sphinx13'
html_theme_path = ['_themes']
modindex_common_prefix = ['sphinx.']
html_static_path = ['_static']
html_sidebars = {'index': ['indexsidebar.html', 'searchbox.html']}
html_additional_pages = {'index': 'index.html'}
html_use_opensearch = 'http://sphinx-doc.org'
htmlhelp_basename = 'Sphinxdoc'
epub_theme = 'epub'
epub_basename = 'sphinx'
epub_author = 'Georg Brandl'
epub_publisher = 'http://sphinx-doc.org/'
epub_scheme = 'url'
epub_identifier = epub_publisher
epub_pre_files = [('index.xhtml', 'Welcome')]
epub_post_files = [('install.xhtml', 'Installing Sphinx'),
('develop.xhtml', 'Sphinx development')]
epub_exclude_files = ['_static/opensearch.xml', '_static/doctools.js',
'_static/jquery.js', '_static/searchtools.js',
'_static/underscore.js', '_static/basic.css',
'search.html', '_static/websupport.js']
epub_fix_images = False
epub_max_image_width = 0
epub_show_urls = 'inline'
epub_use_index = False
epub_guide = (('toc', 'contents.xhtml', u'Table of Contents'),)
epub_description = 'Sphinx documentation generator system manual'
latex_documents = [('contents', 'sphinx.tex', 'Sphinx Documentation',
'Georg Brandl', 'manual', 1)]
latex_logo = '_static/sphinx.png'
latex_elements = {
'fontpkg': '\\usepackage{palatino}',
'passoptionstopackages': '\\PassOptionsToPackage{svgnames}{xcolor}',
'printindex': '\\footnotesize\\raggedright\\printindex',
}
latex_show_urls = 'footnote'
autodoc_member_order = 'groupwise'
todo_include_todos = True
extlinks = {'duref': ('http://docutils.sourceforge.net/docs/ref/rst/'
'restructuredtext.html#%s', ''),
'durole': ('http://docutils.sourceforge.net/docs/ref/rst/'
'roles.html#%s', ''),
'dudir': ('http://docutils.sourceforge.net/docs/ref/rst/'
'directives.html#%s', '')}
man_pages = [
('contents', 'sphinx-all', 'Sphinx documentation generator system manual',
'Georg Brandl', 1),
('man/sphinx-build', 'sphinx-build', 'Sphinx documentation generator tool',
'', 1),
('man/sphinx-quickstart', 'sphinx-quickstart', 'Sphinx documentation '
'template generator', '', 1),
('man/sphinx-apidoc', 'sphinx-apidoc', 'Sphinx API doc generator tool',
'', 1),
]
texinfo_documents = [
('contents', 'sphinx', 'Sphinx Documentation', 'Georg Brandl',
'Sphinx', 'The Sphinx documentation builder.', 'Documentation tools',
1),
]
# We're not using intersphinx right now, but if we did, this would be part of
# the mapping:
intersphinx_mapping = {'python': ('https://docs.python.org/2/', None)}
# Sphinx document translation with sphinx gettext feature uses these settings:
locale_dirs = ['locale/']
gettext_compact = False
# -- Extension interface -------------------------------------------------------
from sphinx import addnodes # noqa
event_sig_re = re.compile(r'([a-zA-Z-]+)\s*\((.*)\)')
def parse_event(env, sig, signode):
m = event_sig_re.match(sig)
if not m:
signode += addnodes.desc_name(sig, sig)
return sig
name, args = m.groups()
signode += addnodes.desc_name(name, name)
plist = addnodes.desc_parameterlist()
for arg in args.split(','):
arg = arg.strip()
plist += addnodes.desc_parameter(arg, arg)
signode += plist
return name
def setup(app):
from sphinx.ext.autodoc import cut_lines
from sphinx.util.docfields import GroupedField
app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))
app.add_object_type('confval', 'confval',
objname='configuration value',
indextemplate='pair: %s; configuration value')
fdesc = GroupedField('parameter', label='Parameters',
names=['param'], can_collapse=True)
app.add_object_type('event', 'event', 'pair: %s; event', parse_event,
doc_field_types=[fdesc])
|
the-stack_0_16827 | from typing import List
from src.utils.normalizer import text_normalize
from src.utils.tokenizer import sentences_seg
class SentenceHandler(object):
def sentence_processor(self, sentences,
min_length: int = 4,
max_length: int = 128) -> List[str]:
"""
Processes a given spacy document and turns them into sentences.
:param doc: The document to use from spacy.
:param min_length: The minimum token length a sentence should be to be considered.
:param max_length: The maximum token length a sentence should be to be considered(long more will be truncated).
:return: Sentences.
"""
to_return = []
for s in sentences:
num_token = len(s.split())
if num_token > max_length:
num_split = num_token//max_length
if num_token%max_length > 0:
num_split += 1
sent_size = num_token//num_split
for i in range(num_split):
start = i*sent_size
end = start + sent_size
if i == num_split - 1:
end = num_token
to_return.append(" ".join(s.split()[start:end]))
elif num_token > min_length:
to_return.append(s)
return to_return
def process(self, body: str,
min_length: int = 4,
max_length: int = 128) -> List[str]:
"""
Processes the content sentences.
:param body: The raw string body to process
:param min_length: Minimum token length that the sentences must be
:param max_length: Max length token that the sentences mus fall under(long more will be truncated)
:return: Returns a list of sentences.
"""
sentences = sentences_seg(text_normalize(body))
return self.sentence_processor(sentences, min_length, max_length)
def __call__(self, body: str,
min_length: int = 4,
max_length: int = 128) -> List[str]:
"""
Processes the content sentences.
:param body: The raw string body to process
:param min_length: Minimum token length that the sentences must be
:param max_length: Max token length that the sentences mus fall under(long more will be truncated)
:return: Returns a list of sentences.
"""
return self.process(body, min_length, max_length) |
the-stack_0_16829 | import wx, numpy as np
from .boxutil import cross, multiply, lay, mat
from .imutil import mix_img
from .mark import drawmark
from time import time
class Canvas (wx.Panel):
scales = [0.03125, 0.0625, 0.125, 0.25, 0.5, 0.75, 1, 1.5, 2, 3, 4, 5, 8, 10, 15, 20, 30, 50]
def __init__(self, parent, autofit=False):
wx.Panel.__init__ ( self, parent, id = wx.ID_ANY, pos = wx.DefaultPosition, size = wx.DefaultSize, style = wx.TAB_TRAVERSAL )
self.img = None
self.back = None
self.mode = 'set'
self.winbox = None
self.conbox = None
self.oribox = None
self.outbak = None
self.outimg = None
self.outrgb = None
self.outbmp = None
self.outint = None
self.buffer = None
lut = np.arange(256*3)
lut.shape = (256,3)
lut = lut.astype(np.uint8)
self.lut = lut
self.rg = (0, 255)
self.cn = 0
self._lut = lut
self._rg = (0, 255)
self._cn = 0
self.marks = {}
self.scaidx = 6
self.autofit = autofit
self.scrbox = wx.DisplaySize()
self.bindEvents()
def bindEvents(self):
for event, handler in [ \
(wx.EVT_SIZE, self.on_size),
(wx.EVT_MOUSE_EVENTS, self.on_mouseevent),
(wx.EVT_IDLE, self.on_idle),
(wx.EVT_PAINT, self.on_paint)]:
self.Bind(event, handler)
def on_mouseevent(self, me):
if me.ButtonDown():
if me.GetButton()==1:
self.oldxy = me.GetX(), me.GetY()
if me.GetButton()==3:
self.fit()
wheel = np.sign(me.GetWheelRotation())
if wheel!=0:
if wheel == 1:
self.zoomout(me.GetX(), me.GetY())
if wheel == -1:
self.zoomin(me.GetX(), me.GetY())
if me.Dragging():
x, y = self.oldxy
self.move(me.GetX()-x, me.GetY()-y)
self.oldxy = me.GetX(), me.GetY()
def initBuffer(self):
box = self.GetClientSize()
self.buffer = wx.Bitmap(*box)
self.winbox = [0, 0, *box]
def fit(self):
oriw = self.oribox[2]-self.oribox[0]
orih = self.oribox[3]-self.oribox[1]
if not self.autofit: a,b,c,d = self.winbox
else:
(a,b),(c,d) = (0,0), self.scrbox
c, d = c*0.9, d*0.9
for i in self.scales[6::-1]:
if oriw*i<c-a and orih*i<d-b: break
self.scaidx = self.scales.index(i)
self.zoom(i, 0, 0)
self.update()
def set_img(self, img):
self.img = img
shp = list(img.shape[1::-1])
if self.oribox and self.oribox[2:] == shp: return
self.conbox = [0, 0, *shp]
self.oribox = [0, 0, *shp]
#if self.conbox is None: self.fit()
def set_back(self, back):
self.back = back
def set_rg(self, rg, b=False):
if b: self._rg = rg
else: self.rg = rg
def set_lut(self, lut, b=False):
if b: self._lut = lut
else: self.lut = lut
def set_cn(self, cn, b=False):
if b: self._cn = cn
else: self.cn = cn
def set_mode(self, mode): self.mode = mode
@property
def scale(self):
conw = self.conbox[2]-self.conbox[0]
oriw = self.oribox[2]-self.oribox[0]
conh = self.conbox[3]-self.conbox[1]
orih = self.oribox[3]-self.oribox[1]
l1, l2 = conw**2+conh**2, oriw**2+orih**2
return l1**0.5 / l2**0.5
def move(self, dx, dy):
arr = np.array(self.conbox)
arr = arr.reshape((2,2))+(dx, dy)
self.conbox = arr.ravel().tolist()
self.update()
def on_size(self, event):
if self.img is None: return
self.initBuffer()
self.update()
def on_idle(self, event):pass
def on_paint(self, event):
if self.buffer is None: return
wx.BufferedPaintDC(self, self.buffer)
def draw_image(self, dc, img, back, mode):
out, bak, rgb = self.outimg, self.outbak, self.outrgb
csbox = cross(self.winbox, self.conbox)
shp = csbox[3]-csbox[1], csbox[2]-csbox[0]
o, m = mat(self.oribox, self.conbox, csbox)
shp = tuple(np.array(shp).round().astype(np.int))
if out is None or (out.shape, out.dtype) != (shp, img.dtype):
self.outimg = np.zeros(shp, dtype=img.dtype)
if not back is None and (
bak is None or (bak.shape, bak.dtype) != (shp, back.dtype)):
self.outbak = np.zeros(shp, dtype=back.dtype)
if rgb is None or rgb.shape[:2] != shp:
self.outrgb = np.zeros(shp+(3,), dtype=np.uint8)
self.outint = np.zeros(shp, dtype=np.uint8)
buf = memoryview(self.outrgb)
self.outbmp = wx.Bitmap.FromBuffer(*shp[::-1], buf)
#if not back is None: print('has back image')
mix_img(back, m, o, shp, self.outbak,
self.outrgb, self.outint,
self._rg, self._lut, cns=self._cn, mode='set')
mix_img(self.img, m, o, shp, self.outimg,
self.outrgb, self.outint,
self.rg, self.lut, cns=self.cn, mode=self.mode)
self.outbmp.CopyFromBuffer(memoryview(self.outrgb))
dc.DrawBitmap(self.outbmp, *csbox[:2])
def update(self):
start = time()
lay(self.winbox, self.conbox)
dc = wx.BufferedDC(wx.ClientDC(self), self.buffer)
dc.Clear()
self.draw_image(dc, self.img, self.back, 0)
for i in self.marks:
if self.marks[i] is None: continue
if callable(self.marks[i]):
self.marks[i](dc, self.to_panel_coor, k = self.scale)
else:
drawmark(dc, self.to_panel_coor, self.marks[i], k=self.scale)
dc.UnMask()
print('frame rate:',int(1/max(0.001, time()-start)))
def center(self, x, y, coord='win'):
if coord=='data':
x,y = self.to_panel_coor(x, y)
dx = (self.winbox[2]-self.winbox[0])/2 - x
dy = (self.winbox[3]-self.winbox[1])/2 - y
for i,j in zip((0,1,2,3),(dx,dy,dx,dy)):
self.conbox[i] += j
lay(self.winbox, self.conbox)
def zoom(self, k, x, y, coord='win'):
if coord=='data':
x,y = self.to_panel_coor(x, y)
box = np.array(self.conbox).reshape((2,2))
box = (box - (x,y)) / self.scale * k + (x, y)
self.conbox = box.ravel().tolist()
lay(self.winbox, self.conbox)
if not self.autofit: return
a,b,c,d = self.conbox
if c-a<self.scrbox[0]*0.9 and d-b<self.scrbox[1]*0.9:
self.SetInitialSize((c-a+4, d-b+4))
def zoomout(self, x, y, coord='win', grade=True):
self.scaidx = min(self.scaidx + 1, len(self.scales)-1)
self.zoom(self.scales[self.scaidx], x, y, coord)
self.update()
def zoomin(self, x, y, coord='win'):
self.scaidx = max(self.scaidx - 1, 0)
self.zoom(self.scales[self.scaidx], x, y, coord)
self.update()
def to_data_coor(self, x, y):
x = (x - self.conbox[0])/self.scale
y = (y - self.conbox[1])/self.scale
return x, y
def to_panel_coor(self, x, y):
x = x * self.scale + self.conbox[0]
y = y * self.scale + self.conbox[1]
return x, y
def __del__(self):
self.img = self.back = None
print('========== canvas del')
if __name__=='__main__':
msk = np.zeros((512,512), dtype=np.uint8)
msk[100:200,100:200] = 1
msk[200:300,200:300] = 2
msk[300:400,300:400] = 3
lut = np.array([(0,0,0),(255,0,0),(0,255,0),(0,0,255)], dtype=np.uint8)
from skimage.data import astronaut, camera
app = wx.App()
frame = wx.Frame(None)
canvas = Canvas(frame)
canvas.set_img(msk)
canvas.set_lut(lut)
canvas.set_cn(0)
canvas.set_back(astronaut())
canvas.set_cn('rgb', 1)
canvas.set_mode('msk')
x = np.arange(512)
y = np.sin(x/30) * 100 + 256
canvas.marks['line'] = {'type':'line', 'lw':3, 'body':np.array([x,y]).T.tolist()}
frame.Show(True)
app.MainLoop()
|
the-stack_0_16831 | # Problem 217: Contains Duplicate
class Solution:
# Approach 1 - Using sort
def containsDuplicate(self, nums) -> bool:
nums.sort()
for index in range(len(nums)-1):
if nums[index] == nums[index+1]:
return True
return False
# Approach 2 - Using built-in count method
def containsDuplicate2(self, nums) -> bool:
for num in nums:
if nums.count(num) > 1:
return True
return False
# Approach 3 - Using visited list
def containsDuplicate3(self, nums) -> bool:
foo = []
for num in nums:
if num in foo:
return True
else:
foo.append(num)
return False
# Approach 4 - Using Set
def containsDuplicate4(self, nums: List[int]) -> bool:
return True if len(set(nums)) < len(nums) else False
# Test
solution = Solution()
# Expected: True
nums = [1,2,3,4,5,6,1]
print(solution.containsDuplicate(nums)) |
the-stack_0_16832 | from appinit.lib.db import Manager
class PermissionManager(object):
def __init__(self, session):
self.manager = Manager()
self.db = self.manager.db('appinit')
self.session = session
# self.settings = settings
def get_application_uids(self, application, permission):
app = PermissionsApplication(self.db, application)
return app.get_uids(permission)
def list_user_permissions(self):
user = PermissionsUser(self.db, self.session.uid)
return user.list_permissions()
def get_application(self, app=None):
permissions = self.list_user_permissions()
if app == None:
return permissions
else:
if app in permissions:
return permissions[app]
else:
return []
class Permission(object):
def __init__(self):
pass
class PermissionsApplication(object):
def __init__(self, db, application):
self.application = application
self.db = db
# users.permissions.get
def get_uids(permission):
pipeline = [
{ "$match": {"application": self.application}},
{ "$unwind": "$permissions" },
{ "$group":
{
"_id": "$permissions",
"uids": {
"$addToSet": "$uid",
},
}
},
{ "$match":
{
"_id": permission,
}
},
]
cursor = self.db.permissions.aggregate(pipeline)
if cursor != None:
for i in cursor:
return i['uids']
return None
# permissions.applications.add
def add(self, uid, permission):
cursor = self.db.permissions.find_one({
"application": self.application,
"uid": uid,
})
if cursor is None:
add_user.call(uid=uid, application=self.application)
self.db.permissions.update(
{
"application": self.application,
"uid": kwargs["uid"]
},
{
"$push": {
"permissions": permission
}
}
)
return get_application.call(application=self.application)
class PermissionsUser(object):
def __init__(self, db, uid):
self.uid = uid
self.db = db
self.manager = Manager()
def list_permissions(self):
permissions = {}
apps = self.manager.get_application()
apps.append({"name": "system"})
for app in apps:
if app['name'] != "system":
list_name = app['api']['name'].split("_")
camel_case = ''.join([list_name[x].title() for x in range(1, len(list_name))])
name = list_name[0] + camel_case
else:
name = app['name']
permissions[name] = {}
all_permissions = self.db.permissions.find({"application": app['name']}).distinct("permissions")
user_permissions = self.db.permissions.find_one({"uid": self.uid, "application": app['name']})
if user_permissions != None:
all_true = False
if user_permissions['application'] == app['name']:
all_true = "admin" in user_permissions['permissions']
for p in user_permissions['permissions']:
key = 'is_' + p
if all_true:
permissions[name][key] = True
elif p in all_permissions:
permissions[name][key] = True
else:
permissions[name][key] = False
return permissions
class PermissionsModule(object):
def __init__(self):
pass |
the-stack_0_16833 | # Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oslo_messaging
ATTR_NOT_SPECIFIED = object()
class Mapping(object):
def __init__(self, mapping):
self.direct_mapping = mapping
self.reverse_mapping = {}
for key, value in mapping.items():
self.reverse_mapping[value] = key
_SINGLETON_MAPPING = Mapping({
ATTR_NOT_SPECIFIED: "@@**ATTR_NOT_SPECIFIED**@@",
})
class KingbirdSerializer(oslo_messaging.Serializer):
def __init__(self, base=None):
super(KingbirdSerializer, self).__init__()
self._base = base
def serialize_entity(self, context, entity):
if isinstance(entity, dict):
for key, value in entity.items():
entity[key] = self.serialize_entity(context, value)
elif isinstance(entity, list):
for i, item in enumerate(entity):
entity[i] = self.serialize_entity(context, item)
elif entity in _SINGLETON_MAPPING.direct_mapping:
entity = _SINGLETON_MAPPING.direct_mapping[entity]
if self._base is not None:
entity = self._base.serialize_entity(context, entity)
return entity
def deserialize_entity(self, context, entity):
if isinstance(entity, dict):
for key, value in entity.items():
entity[key] = self.deserialize_entity(context, value)
elif isinstance(entity, list):
for i, item in enumerate(entity):
entity[i] = self.deserialize_entity(context, item)
elif entity in _SINGLETON_MAPPING.reverse_mapping:
entity = _SINGLETON_MAPPING.reverse_mapping[entity]
if self._base is not None:
entity = self._base.deserialize_entity(context, entity)
return entity
def serialize_context(self, context):
if self._base is not None:
context = self._base.serialize_context(context)
return context
def deserialize_context(self, context):
if self._base is not None:
context = self._base.deserialize_context(context)
return context
|
the-stack_0_16834 | import sys
import matplotlib.pyplot as plt
from imtoolkit import Parameters, IMCode, IdealRayleighChannel, CoherentMLDSimulator
plt.switch_backend('agg')
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['markers.fillstyle'] = 'none'
def simulateAMI(argstr):
params = Parameters(argstr)
code = IMCode(params.dm, params.M, params.K, params.Q, params.mod, params.L, meanPower=1)
channel = IdealRayleighChannel(params.ITi, params.M, params.N)
sim = CoherentMLDSimulator(code.codes, channel)
return sim.simulateAMIParallel(params, outputFile=False, printValue=False)
if __name__ == '__main__':
fig, ax = plt.subplots()
ax.set_xlabel("SNR [dB]")
ax.set_ylabel("AMI [bit/symbol]")
ax.set_xlim(-20, 20)
ax.set_ylim(0, 4)
ax.tick_params(pad = 8)
ret = simulateAMI("AMIP_sim=coh_code=index_dm=dic_M=4_K=4_Q=1_L=2_mod=PSK_N=4_ITo=1_ITi=1e4_snrfrom=-20.00_to=20.00_len=21")
ax.plot(ret["snr_dB"], ret["ami"], color="k", marker="s", linestyle="-", label="BLAST")
ret = simulateAMI("AMIP_sim=coh_code=index_dm=opt_M=4_K=1_Q=4_L=4_mod=PSK_N=4_ITo=1_ITi=1e4_snrfrom=-20.00_to=20.00_len=21")
ax.plot(ret["snr_dB"], ret["ami"], color="r", marker="o", linestyle="-", label="Spatial modulation")
handles, labels = ax.get_legend_handles_labels()
legend = ax.legend(handles, labels, loc="best", frameon=True)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('white')
#plt.show()
plt.savefig(sys.argv[0].replace(".py", ".svg"))
|
the-stack_0_16835 | import comms as comms
import datetime as datetime
import json as json
import uuid as uuid
import time as time
class DeviceTable:
def __init__(self, sys_settings_fname, device_table_fname):
self.device_table_fname = device_table_fname
self.sub_list = [b'new_arp_pkt']
self.comms = comms.Comms(sys_settings_fname)
self.comms.set_subscriptions(self.sub_list)
self.device_list = load_device_table(device_table_fname)
self.device_lut = update_device_lut(self.device_list)
self.comms.send_msg('new_table', self.device_list)
self.last_new_table_pub_t = 0
def run_device_table_routine(self):
msg = self.comms.recv_msg()
if msg:
self.process_message(msg)
time.sleep(0.1)
def process_message(self, msg):
payload = json.loads(msg[1].decode('utf-8'))
src_mac = payload['sender_mac_as_str_with_colons']
src_ip = payload['sender_ip_as_str_with_dots']
pub_new_table_flag = False
now_iso_fmt = datetime.datetime.now().isoformat()
if src_mac in self.device_lut.keys():
self.device_lut[src_mac]['last_seen'] = now_iso_fmt
device_ip = self.device_lut[src_mac]['ip']
if device_ip != src_ip:
pub_new_table_flag = True
self.device_lut[src_mac]['ip'] = src_ip
else:
new_device = {
'id': str(uuid.uuid4()),
'mac': src_mac,
'ip': src_ip,
'last_seen': now_iso_fmt
}
self.device_list.append(new_device)
self.device_lut = update_device_lut(self.device_list)
pub_new_table_flag = True
# TODO: save data to database
save_device_table(self.device_table_fname, self.device_list)
if pub_new_table_flag or time.time() - self.last_new_table_pub_t > 5.0:
self.last_new_table_pub_t = time.time()
self.comms.send_msg('new_table', self.device_list)
def clean_up(self):
self.comms.close_pub_sub()
def load_device_table(device_table_fname):
# TODO: Load from db
with open(device_table_fname, 'r') as f:
tmp_dict = json.load(f)
return tmp_dict['devices']
def save_device_table(device_table_fname, device_list):
with open(device_table_fname, 'w') as f:
out_dict = {
'devices': device_list
}
json.dump(out_dict, f, indent=2)
def update_device_lut(device_list):
device_lut = {
dev['mac']: dev for dev in device_list
}
return device_lut
def main():
mt = DeviceTable('sys_settings.json', 'device_table.json')
is_running = True
print('Starting device table manager...')
while is_running:
try:
mt.run_device_table_routine()
except KeyboardInterrupt:
print('Closing device table manager.')
is_running = False
mt.clean_up()
if __name__ == '__main__':
main()
|
the-stack_0_16836 | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='color',
parent_name='scatter3d.marker.colorbar.title.font',
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'style'),
**kwargs
)
|
the-stack_0_16838 | import sys, os
DEFAULT_VERS = "310"
SOURCE_DIR = "source"
HOOK_MAGIC = "// hook_from "
buildVersion = None
patchConfig = {
"build_id" : {},
"nso_load_addr" : {},
}
def initConfig():
configPath = os.path.join(PATCH_CONFIG_DIR, buildVersion + PATCH_CONFIG_EXTENSION)
# read config file
with open(configPath) as configFile:
curConfigName = None
for line in configFile:
line = line.strip()
configNameLineMatch = re.match(r'\[(.+)\]', line)
if configNameLineMatch:
curConfigName = configNameLineMatch.group(1)
continue
if '=' in line:
configNSO, configValue = line.split('=', 1)
if not configNSO.isalnum():
continue
if '+' in configValue:
print("genPatch.py error:", line, "awaits implementation")
sys.exit(-1)
patchConfig[curConfigName][configNSO] = configValue
def calcJump(from_addr_str, dest_func, vers=DEFAULT_VERS):
from_addr = int(from_addr_str, 16)
dest_func = dest_func + "("
mapFilePath = "build" + vers + "/skyline" + vers + ".map"
with open(mapFilePath, 'r') as f:
mapFile = f.read()
foundPos = mapFile.find(dest_func) - 34
foundLine = mapFile[foundPos:mapFile.find("\n", foundPos)]
print("Found:")
print(foundLine)
func_addr = int(foundLine[:foundLine.find(" ")], 0)
jump_offset = patchConfig["nso_load_addr"]["subsdk1"] + func_addr - from_addr
print("Jump needed: " + hex(jump_offset))
initConfig()
if len(sys.argv) > 3:
calcJump(sys.argv[1], sys.argv[2], sys.argv[3])
elif len(sys.argv) > 2:
calcJump(sys.argv[1], sys.argv[2])
else:
hasOutput = False
for root, subdirs, files in os.walk(SOURCE_DIR):
for file in files:
with open(root+"/"+file, 'r') as f:
file_iter = iter(f.readlines())
for line in file_iter:
if HOOK_MAGIC in line:
hook_addr = line[len(HOOK_MAGIC):-1]
line = next(file_iter)
hook_func = line[:line.find('(')]
hook_func = hook_func[hook_func.rfind(' ') + 1:]
calcJump(hook_addr, hook_func)
hasOutput = True
if not hasOutput:
print("Usage: %s [from addr] [to func name] (s2 vers, like '310')" % sys.argv[0])
|
the-stack_0_16840 | import rlkit.misc.hyperparameter as hyp
from rlkit.demos.source.dict_to_mdp_path_loader import EncoderDictToMDPPathLoader
from rlkit.launchers.experiments.ashvin.awac_rig import awac_rig_experiment
from rlkit.launchers.launcher_util import run_experiment
from rlkit.launchers.arglauncher import run_variants
from rlkit.torch.sac.policies import GaussianPolicy, GaussianMixturePolicy
from rlkit.envs.encoder_wrappers import PresamplingEncoderWrappedEnv
from sawyer_control.envs.sawyer_grip import SawyerGripEnv
#from sawyer_control.envs.sawyer_grip_stub import SawyerGripEnv
from rlkit.torch.networks import Clamp
from rlkit.torch.vae.vq_vae import VQ_VAE
from rlkit.torch.vae.vq_vae_trainer import VQ_VAETrainer
from rlkit.torch.grill.common import train_vqvae
path_func = lambda name: '/media/ashvin/data2/data/fixed_data_overtrained/'+ name
mini_demos = [
dict(path=path_func('fixed_drawer_demos.npy'), obs_dict=True, is_demo=True, data_split=0.25),
dict(path=path_func('fixed_pot_demos.npy'), obs_dict=True, is_demo=True, data_split=0.25),
dict(path=path_func('fixed_pnp_demos.npy'), obs_dict=True, is_demo=True, data_split=0.25),
dict(path=path_func('fixed_tray_demos.npy'), obs_dict=True, is_demo=True, data_split=0.25),
]
all_demos = [
dict(path=path_func('fixed_drawer_demos.npy'), obs_dict=True, is_demo=True,),
dict(path=path_func('fixed_pot_demos.npy'), obs_dict=True, is_demo=True,),
dict(path=path_func('fixed_pnp_demos.npy'), obs_dict=True, is_demo=True,),
dict(path=path_func('fixed_tray_demos.npy'), obs_dict=True, is_demo=True,),
]
if __name__ == "__main__":
variant = dict(
imsize=48,
env_class=SawyerGripEnv,
env_kwargs=dict(
action_mode='position',
config_name='ashvin_config',
reset_free=False,
position_action_scale=0.05,
max_speed=0.4,
step_sleep_time=0.2,
crop_version_str="crop_val_torch",
),
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256, ],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
),
qf_kwargs=dict(
hidden_sizes=[256, 256],
),
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3e-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=False,
alpha=0,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=0, #25001 #HERE
policy_weight_decay=1e-4,
q_weight_decay=0,
rl_weight=1.0,
use_awr_update=True,
use_reparam_update=False,
compute_bc=True,
reparam_weight=0.0,
awr_weight=1.0,
bc_weight=0.0,
reward_transform_kwargs=None,
terminal_transform_kwargs=None,
),
max_path_length=75, #50
algo_kwargs=dict(
batch_size=1024, #1024
num_epochs=101, #1001
num_eval_steps_per_epoch=600, #500
num_expl_steps_per_train_loop=600, #500
num_trains_per_train_loop=600, #500
min_num_steps_before_training=150, #150
),
replay_buffer_kwargs=dict(
fraction_future_context=0.6,
fraction_distribution_context=0.1, # TODO: Try less?
max_size=int(5E5), # HERE# HERE# HERE# HERE# HERE# HERE# HERE# HERE# HERE (DOUBLE CHECK THAT DEMOS FIT!!!!)
),
demo_replay_buffer_kwargs=dict(
fraction_future_context=0.6,
fraction_distribution_context=0.1, # TODO: Try less?
),
reward_kwargs=dict(
reward_type='sparse',
epsilon=1.0,
),
observation_key='latent_observation',
desired_goal_key='latent_desired_goal',
save_video=True,
save_video_kwargs=dict(
save_video_period=1,
pad_color=0,
),
encoder_wrapper=PresamplingEncoderWrappedEnv, # Uncomment if using pixelcnn
reset_keys_map=dict(
image_observation="initial_latent_state"
),
path_loader_class=EncoderDictToMDPPathLoader,
path_loader_kwargs=dict(
recompute_reward=True,
),
renderer_kwargs=dict(
create_image_format='HWC',
output_image_format='CWH',
flatten_image=True,
width=48,
height=48,
),
add_env_demos=False,
add_env_offpolicy_data=False,
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
evaluation_goal_sampling_mode="presampled_images",
exploration_goal_sampling_mode="presampled_conditional_prior",
train_vae_kwargs=dict(
imsize=48,
beta=1,
beta_schedule_kwargs=dict(
x_values=(0, 250),
y_values=(0, 100),
),
num_epochs=1501, #1501
embedding_dim=5,
dump_skew_debug_plots=False,
decoder_activation='sigmoid',
use_linear_dynamics=False,
generate_vae_dataset_kwargs=dict(
N=1000,
n_random_steps=2,
test_p=.9,
dataset_path={
'train': 'demos/icra2021/dataset_v1_train.npy',
'test': 'demos/icra2021/dataset_v1_test.npy',
},
augment_data=False,
use_cached=False,
show=False,
oracle_dataset=False,
oracle_dataset_using_set_to_goal=False,
non_presampled_goal_img_is_garbage=False,
random_rollout_data=True,
random_rollout_data_set_to_goal=True,
conditional_vae_dataset=True,
save_trajectories=False,
enviorment_dataset=False,
tag="ccrig_tuning_orig_network",
),
vae_trainer_class=VQ_VAETrainer,
vae_class=VQ_VAE,
vae_kwargs=dict(
input_channels=3,
imsize=48,
),
algo_kwargs=dict(
key_to_reconstruct='x_t',
start_skew_epoch=5000,
is_auto_encoder=False,
batch_size=128,
lr=1e-3,
skew_config=dict(
method='vae_prob',
power=0,
),
weight_decay=0.0,
skew_dataset=False,
priority_function_kwargs=dict(
decoder_distribution='gaussian_identity_variance',
sampling_method='importance_sampling',
num_latents_to_sample=10,
),
use_parallel_dataloading=False,
),
save_period=10,
),
train_model_func=train_vqvae,
presampled_goal_kwargs=dict(
eval_goals='/media/ashvin/data2/data/val/v1/curr_goal_eval_goals.pkl',
expl_goals=None,
),
launcher_config=dict(
unpack_variant=True,
region='us-west-1',
),
logger_config=dict(
snapshot_mode='gap',
snapshot_gap=1,
),
pickle_paths=True,
pretrained_vae_path=path_func('best_vqvae.pt'),
pretrained_algo_path='/home/ashvin/data/ashvin/icra2021/final/fixed-agent-overtrained/run18/id0/itr_1.pt',
#pretrained_algo_path=path_func('pretrained_agent_eps.pt'), #pretrained_agent.pt (fixed data polciy), pretrained_agent_eps.pt (fixed data polciy), drawer_agent.pt (drawer data policy)
)
search_space = {
"seed": range(1),
'path_loader_kwargs.demo_paths': [mini_demos], #CHANGED
'deterministc_eval': [False],
'reward_kwargs.epsilon': [1.0,], #1.75 is mean
'trainer_kwargs.beta': [0.3],
'num_pybullet_objects':[None],
'policy_kwargs.min_log_std': [-6],
'trainer_kwargs.awr_weight': [1.0],
'trainer_kwargs.awr_use_mle_for_vf': [True],
'trainer_kwargs.awr_sample_actions': [False],
'trainer_kwargs.clip_score': [2],
'trainer_kwargs.awr_min_q': [True],
'trainer_kwargs.reward_transform_kwargs': [None, ],
'trainer_kwargs.terminal_transform_kwargs': [dict(m=0, b=0)],
'qf_kwargs.output_activation': [Clamp(max=0)],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
if variant['pretrained_algo_path'] == path_func('pretrained_agent.pt'):
variant['reward_kwargs'] == dict(reward_type='sparse', epsilon=2.0)
if variant['pretrained_algo_path'] == path_func('pretrained_agent_eps.pt'):
variant['reward_kwargs'] == dict(reward_type='sparse', epsilon=1.0)
if variant['pretrained_algo_path'] == path_func('drawer_agent.pt'):
variant['reward_kwargs'] == dict(reward_type='sparse', epsilon=1.0)
variants.append(variant)
run_variants(awac_rig_experiment, variants, run_id=21) #HERE
|
the-stack_0_16841 | from . import cursor, db
class College():
def __init__(
self,
code: str = None,
name: str = None) -> None:
self.code = code
self.name = name
def get_all(self, page_num: int = None, item_per_page: int = None, paginate: bool = True) -> list:
if not paginate:
return self.college_list()
offset = (page_num - 1) * item_per_page
query = f'''
SELECT college.code, college.name, COUNT(*) AS courses, enrolled.student as enrolled
FROM college
JOIN course
ON college.code = course.college
LEFT JOIN (SELECT collegecode, COUNT(*) as student
FROM students
GROUP BY collegecode) enrolled
ON college.code = enrolled.collegecode
GROUP BY college.code
LIMIT {item_per_page} OFFSET {offset}
'''
cursor.execute(query)
result = cursor.fetchall()
colleges = [list(college) for college in result]
all_colleges = self.college_list()
for college in all_colleges:
if college[0] not in [code[0] for code in colleges]:
colleges.append([college[0], college[1], None, None])
return colleges
@staticmethod
def get_total() -> int:
query = '''SELECT * FROM college'''
cursor.execute(query)
result = cursor.fetchall()
total = len(result)
return total
def college_list(self) -> list:
query = '''
SELECT code, name
FROM college;
'''
cursor.execute(query)
result = cursor.fetchall()
colleges = [list(college) for college in result]
return colleges
@staticmethod
def get_departments() -> list:
query = '''
SELECT college.code, course.name
FROM college
JOIN course
ON college.code = course.college
'''
cursor.execute(query)
result = cursor.fetchall()
departments = [list(department) for department in result]
return departments
def search(self, keyword: str = None, field: str = None) -> list:
keyword = keyword.upper()
colleges = self.get_all(paginate=False)
result = []
if field is None:
result = self.search_by_field(colleges, keyword, 'all')
elif field == 'code':
result = self.search_by_field(colleges, keyword, 'code')
elif field == 'name':
result = self.search_by_field(colleges, keyword, 'name')
elif field == 'coursecount':
result = self.search_by_field(colleges, keyword, 'coursecount')
elif field == 'studentcount':
result = self.search_by_field(colleges, keyword, 'studentcount')
return result
@staticmethod
def search_by_field(rows: list = None, keyword: str = None, field: str = None) -> list:
result = []
for row in rows:
row_allcaps = [str(cell).upper() for cell in row]
if field == 'all':
if keyword in row_allcaps:
result.append(row)
if field == 'code':
if keyword == row_allcaps[0]:
result.append(row)
return result
elif field == 'name':
if keyword == row_allcaps[1]:
result.append(row)
elif field == 'coursecount':
if keyword in row_allcaps[2]:
result.append(row)
elif field == 'studentcount':
if keyword in row_allcaps[3]:
result.append(row)
return result
def add_new(self) -> None:
query = f'''
INSERT INTO college (
code,
name)
VALUES (
'{self.code}',
'{self.name}')
'''
cursor.execute(query)
db.commit()
return None
@staticmethod
def delete(code: str = None) -> None:
query = f'''
DELETE FROM college
WHERE code='{code}'
'''
cursor.execute(query)
db.commit()
return None
def update(self) -> None:
query = f'''
UPDATE college
SET
code = '{self.code}',
name = '{self.name}'
WHERE
code = '{self.code}'
'''
cursor.execute(query)
db.commit()
return None
@staticmethod
def get_collegecode_for(course_name: str = None) -> str:
query = f'''
SELECT code
FROM college
WHERE name = '{course_name}'
'''
cursor.execute(query)
code = cursor.fetchone()[0]
return code
@staticmethod
def get_collegecodes() -> list:
query = '''
SELECT code
FROM college
'''
cursor.execute(query)
result = cursor.fetchall()
CODES = [code[0] for code in result]
return CODES
|
the-stack_0_16842 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import os
import shutil
import sys
import tempfile
from pyflink.dataset import ExecutionEnvironment
from pyflink.table import BatchTableEnvironment, TableConfig
def word_count():
content = "line Licensed to the Apache Software Foundation ASF under one " \
"line or more contributor license agreements See the NOTICE file " \
"line distributed with this work for additional information " \
"line regarding copyright ownership The ASF licenses this file " \
"to you under the Apache License Version the " \
"License you may not use this file except in compliance " \
"with the License"
t_config = TableConfig()
env = ExecutionEnvironment.get_execution_environment()
t_env = BatchTableEnvironment.create(env, t_config)
# register Results table in table environment
tmp_dir = tempfile.gettempdir()
result_path = tmp_dir + '/result'
if os.path.exists(result_path):
try:
if os.path.isfile(result_path):
os.remove(result_path)
else:
shutil.rmtree(result_path)
except OSError as e:
logging.error("Error removing directory: %s - %s.", e.filename, e.strerror)
logging.info("Results directory: %s", result_path)
sink_ddl = """
create table Results(
word VARCHAR,
`count` BIGINT
) with (
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '{}'
)
""".format(result_path)
t_env.execute_sql(sink_ddl)
elements = [(word, 1) for word in content.split(" ")]
t_env.from_elements(elements, ["word", "count"]) \
.group_by("word") \
.select("word, count(1) as count") \
.insert_into("Results")
t_env.execute("word_count")
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
word_count()
|
the-stack_0_16843 | class Element:
def __init__(self, *children):
self.parent = None
self.children = []
for child in children:
self.append(child)
def append(self, element):
"""Append a given element as a last child of this element"""
if isinstance(element, Element):
element.parent = self
self.children.append(element)
return self
def prepend(self, element):
"""Prepend a given element as a first child of this element"""
if isinstance(element, Element):
element.parent = self
self.children.insert(0, element)
return self
def wrap(self, element):
"""Wrap this element in a given element"""
deepest = element
while deepest.first_child:
deepest = deepest.first_child
deepest.append(self)
return element
def dump(self, indent=2):
def do_dump(element, depth=0):
yield '{indent}{element}'.format(
indent=' ' * (indent * depth),
element=repr(element)
)
if isinstance(element, Element):
for child in element:
yield from do_dump(child, depth + 1)
return '\n'.join(do_dump(self))
@property
def first_child(self):
return self.children[0] if self.children else None
@property
def last_child(self):
return self.children[-1] if self.children else None
def __iter__(self):
return iter(self.children)
def __eq__(self, other):
if type(self) is not type(other):
return False
mine = dict(vars(self))
del mine['parent']
others = dict(vars(other))
del others['parent']
return mine == others
def __repr__(self):
identifier = '{:04x}'.format(id(self))
keys = vars(self).keys()
keys -= {'parent', 'children'}
keys = filter(lambda key: not key.startswith('_'), keys)
mappings = [
'{}={}'.format(key, repr(getattr(self, key)))
for key in sorted(keys)
]
return '{name}#{identifier}({mappings})'.format(
name=self.__class__.__name__,
identifier=identifier[-4:],
mappings=', '.join(mappings)
)
|
the-stack_0_16844 | from typing import Any, Dict, List, Optional
import aiohttp
from spare.cmds.units import units
from spare.consensus.block_record import BlockRecord
from spare.rpc.farmer_rpc_client import FarmerRpcClient
from spare.rpc.full_node_rpc_client import FullNodeRpcClient
from spare.rpc.wallet_rpc_client import WalletRpcClient
from spare.util.config import load_config
from spare.util.default_root import DEFAULT_ROOT_PATH
from spare.util.ints import uint16
from spare.util.misc import format_bytes
from spare.util.misc import format_minutes
from spare.util.network import is_localhost
SECONDS_PER_BLOCK = (24 * 3600) / 4608
async def get_harvesters(farmer_rpc_port: Optional[int]) -> Optional[Dict[str, Any]]:
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if farmer_rpc_port is None:
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
plots = await farmer_client.get_harvesters()
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if farmer is running at {farmer_rpc_port}")
else:
print(f"Exception from 'harvester' {e}")
return None
farmer_client.close()
await farmer_client.await_closed()
return plots
async def get_blockchain_state(rpc_port: Optional[int]) -> Optional[Dict[str, Any]]:
blockchain_state = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if rpc_port is None:
rpc_port = config["full_node"]["rpc_port"]
client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
blockchain_state = await client.get_blockchain_state()
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if full node is running at {rpc_port}")
else:
print(f"Exception from 'full node' {e}")
client.close()
await client.await_closed()
return blockchain_state
async def get_average_block_time(rpc_port: Optional[int]) -> float:
try:
blocks_to_compare = 500
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if rpc_port is None:
rpc_port = config["full_node"]["rpc_port"]
client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
blockchain_state = await client.get_blockchain_state()
curr: Optional[BlockRecord] = blockchain_state["peak"]
if curr is None or curr.height < (blocks_to_compare + 100):
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
while curr is not None and curr.height > 0 and not curr.is_transaction_block:
curr = await client.get_block_record(curr.prev_hash)
if curr is None:
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
past_curr = await client.get_block_record_by_height(curr.height - blocks_to_compare)
while past_curr is not None and past_curr.height > 0 and not past_curr.is_transaction_block:
past_curr = await client.get_block_record(past_curr.prev_hash)
if past_curr is None:
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
client.close()
await client.await_closed()
return (curr.timestamp - past_curr.timestamp) / (curr.height - past_curr.height)
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if full node is running at {rpc_port}")
else:
print(f"Exception from 'full node' {e}")
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
async def get_wallets_stats(wallet_rpc_port: Optional[int]) -> Optional[Dict[str, Any]]:
amounts = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if wallet_rpc_port is None:
wallet_rpc_port = config["wallet"]["rpc_port"]
wallet_client = await WalletRpcClient.create(self_hostname, uint16(wallet_rpc_port), DEFAULT_ROOT_PATH, config)
amounts = await wallet_client.get_farmed_amount()
#
# Don't catch any exceptions, the caller will handle it
#
finally:
wallet_client.close()
await wallet_client.await_closed()
return amounts
async def is_farmer_running(farmer_rpc_port: Optional[int]) -> bool:
is_running = False
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if farmer_rpc_port is None:
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
await farmer_client.get_connections()
is_running = True
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if farmer is running at {farmer_rpc_port}")
else:
print(f"Exception from 'farmer' {e}")
farmer_client.close()
await farmer_client.await_closed()
return is_running
async def get_challenges(farmer_rpc_port: Optional[int]) -> Optional[List[Dict[str, Any]]]:
signage_points = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if farmer_rpc_port is None:
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
signage_points = await farmer_client.get_signage_points()
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if farmer is running at {farmer_rpc_port}")
else:
print(f"Exception from 'farmer' {e}")
farmer_client.close()
await farmer_client.await_closed()
return signage_points
async def challenges(farmer_rpc_port: Optional[int], limit: int) -> None:
signage_points = await get_challenges(farmer_rpc_port)
if signage_points is None:
return None
signage_points.reverse()
if limit != 0:
signage_points = signage_points[:limit]
for signage_point in signage_points:
print(
(
f"Hash: {signage_point['signage_point']['challenge_hash']} "
f"Index: {signage_point['signage_point']['signage_point_index']}"
)
)
async def summary(
rpc_port: Optional[int],
wallet_rpc_port: Optional[int],
harvester_rpc_port: Optional[int],
farmer_rpc_port: Optional[int],
) -> None:
all_harvesters = await get_harvesters(farmer_rpc_port)
blockchain_state = await get_blockchain_state(rpc_port)
farmer_running = await is_farmer_running(farmer_rpc_port)
wallet_not_ready: bool = False
wallet_not_running: bool = False
amounts = None
try:
amounts = await get_wallets_stats(wallet_rpc_port)
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
wallet_not_running = True
else:
wallet_not_ready = True
print("Farming status: ", end="")
if blockchain_state is None:
print("Not available")
elif blockchain_state["sync"]["sync_mode"]:
print("Syncing")
elif not blockchain_state["sync"]["synced"]:
print("Not synced or not connected to peers")
elif not farmer_running:
print("Not running")
else:
print("Farming")
if amounts is not None:
print(f"Total spare farmed: {amounts['farmed_amount'] / units['spare']}")
print(f"User transaction fees: {amounts['fee_amount'] / units['spare']}")
print(f"Block rewards: {(amounts['farmer_reward_amount'] + amounts['pool_reward_amount']) / units['spare']}")
print(f"Last height farmed: {amounts['last_height_farmed']}")
class PlotStats:
total_plot_size = 0
total_plots = 0
if all_harvesters is not None:
harvesters_local: dict = {}
harvesters_remote: dict = {}
for harvester in all_harvesters["harvesters"]:
ip = harvester["connection"]["host"]
if is_localhost(ip):
harvesters_local[harvester["connection"]["node_id"]] = harvester
else:
if ip not in harvesters_remote:
harvesters_remote[ip] = {}
harvesters_remote[ip][harvester["connection"]["node_id"]] = harvester
def process_harvesters(harvester_peers_in: dict):
for harvester_peer_id, plots in harvester_peers_in.items():
total_plot_size_harvester = sum(map(lambda x: x["file_size"], plots["plots"]))
PlotStats.total_plot_size += total_plot_size_harvester
PlotStats.total_plots += len(plots["plots"])
print(f" {len(plots['plots'])} plots of size: {format_bytes(total_plot_size_harvester)}")
if len(harvesters_local) > 0:
print(f"Local Harvester{'s' if len(harvesters_local) > 1 else ''}")
process_harvesters(harvesters_local)
for harvester_ip, harvester_peers in harvesters_remote.items():
print(f"Remote Harvester{'s' if len(harvester_peers) > 1 else ''} for IP: {harvester_ip}")
process_harvesters(harvester_peers)
print(f"Plot count for all harvesters: {PlotStats.total_plots}")
print("Total size of plots: ", end="")
print(format_bytes(PlotStats.total_plot_size))
else:
print("Plot count: Unknown")
print("Total size of plots: Unknown")
if blockchain_state is not None:
print("Estimated network space: ", end="")
print(format_bytes(blockchain_state["space"]))
else:
print("Estimated network space: Unknown")
minutes = -1
if blockchain_state is not None and all_harvesters is not None:
proportion = PlotStats.total_plot_size / blockchain_state["space"] if blockchain_state["space"] else -1
minutes = int((await get_average_block_time(rpc_port) / 60) / proportion) if proportion else -1
if all_harvesters is not None and PlotStats.total_plots == 0:
print("Expected time to win: Never (no plots)")
else:
print("Expected time to win: " + format_minutes(minutes))
if amounts is None:
if wallet_not_running:
print("For details on farmed rewards and fees you should run 'spare start wallet' and 'spare wallet show'")
elif wallet_not_ready:
print("For details on farmed rewards and fees you should run 'spare wallet show'")
else:
print("Note: log into your key using 'spare wallet show' to see rewards for each key")
|
the-stack_0_16846 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scanning code to find music in a library."""
import dataclasses
import mimetypes
import os
import pathlib
from typing import Iterable
import mutagen
from pepper_music_player.metadata import entity
from pepper_music_player.metadata import tag
@dataclasses.dataclass(frozen=True)
class File:
"""A file in the music library.
Attributes:
filename: Absolute filename.
dirname: Absolute name of the directory containing the file.
basename: Name of the file, relative to dirname.
"""
filename: str
dirname: str
basename: str
@dataclasses.dataclass(frozen=True)
class AudioFile(File):
"""An audio file.
Attributes:
track: The track in the file.
"""
track: entity.Track
@dataclasses.dataclass(frozen=True)
class ImageFile(File):
"""An image file.
Attributes:
image: The image in the file.
"""
image: entity.Image
def _read_audio_tags(dirname: str, basename: str, filename: str) -> tag.Tags:
"""Returns tags read from an audio file."""
file_info = mutagen.File(filename, easy=True)
return tag.Tags({
**(file_info.tags or {}),
tag.BASENAME: (basename,),
tag.DIRNAME: (dirname,),
tag.FILENAME: (filename,),
tag.DURATION_SECONDS: (str(file_info.info.length),),
}).derive()
def _read_image_tags(dirname: str, basename: str, filename: str) -> tag.Tags:
"""Returns tags read from an image file."""
# TODO(#61): Actually read more tags (e.g., width and height) from the file
# itself.
return tag.Tags({
tag.BASENAME: (basename,),
tag.DIRNAME: (dirname,),
tag.FILENAME: (filename,),
}).derive()
def scan(root_dirname: str) -> Iterable[File]:
"""Scans a directory."""
# TODO: Keep track of errors with os.walk(onerror=...)
# TODO: Catch and handle per-file errors.
for dirname, _, basenames in os.walk(os.path.abspath(root_dirname)):
dirpath = pathlib.Path(dirname)
for basename in basenames:
filepath = dirpath.joinpath(basename)
mime, _ = mimetypes.guess_type(filepath.as_uri())
mime_major, _, _ = (mime or '').partition('/')
if mime_major == 'audio':
yield AudioFile(
filename=str(filepath),
dirname=dirname,
basename=basename,
track=entity.Track(
tags=_read_audio_tags(dirname=dirname,
basename=basename,
filename=str(filepath))),
)
elif mime_major == 'image':
yield ImageFile(
filename=str(filepath),
dirname=dirname,
basename=basename,
image=entity.Image(tags=_read_image_tags(
dirname=dirname,
basename=basename,
filename=str(filepath),
)),
)
else:
yield File(filename=str(filepath),
dirname=dirname,
basename=basename)
|
the-stack_0_16849 | # -*- coding:utf-8 -*-
import time
import requests
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from lxml import etree
def run(url):
# 设置无头浏览器,字符编码,请求头等信息,防止反爬虫检测
options = Options()
options.add_argument('--headless')
options.add_argument('lang=zh_CN.UTF-8')
UserAgent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36'
options.add_argument('User-Agent=' + UserAgent)
browser = webdriver.Chrome()
browser.get(url)
res = etree.HTML(browser.page_source)
# 提取文章页的链接并爬取
article_urls = res.xpath('//div[@class="article-list"]/div/h4/a/@href')
for article_url in article_urls:
browser.get(article_url)
article_result = etree.HTML(browser.page_source)
title = article_result.xpath('//h1[@class="title-article"]/text()')[0]
publish_time = article_result.xpath('//div[@class="bar-content"]/span[@class="time"]/text()')[0]
print(publish_time, title)
browser.close()
if __name__ == '__main__':
start = time.time()
for i in range(1, 2): # 建立任务链接
url = 'https://blog.csdn.net/cui_yonghua/article/list/1'
run(url=url)
print('time cost:{}'.format(time.time() - start)) |
the-stack_0_16850 | import logging
from dirtyfields import DirtyFieldsMixin
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.fields import ArrayField
from django.core.exceptions import ValidationError
from django.db import IntegrityError, models
from django.utils.functional import cached_property
from django.utils import timezone
from guardian.models import GroupObjectPermissionBase, UserObjectPermissionBase
from include import IncludeManager
from framework.celery_tasks.handlers import enqueue_task
from osf.models.base import BaseModel, GuidMixin
from osf.models.mixins import GuardianMixin, TaxonomizableMixin
from osf.models.validators import validate_title
from osf.utils.fields import NonNaiveDateTimeField
from website.exceptions import NodeStateError
from website.util import api_v2_url
from website.search.exceptions import SearchUnavailableError
logger = logging.getLogger(__name__)
class CollectionSubmission(TaxonomizableMixin, BaseModel):
primary_identifier_name = 'guid___id'
class Meta:
order_with_respect_to = 'collection'
unique_together = ('collection', 'guid')
collection = models.ForeignKey('Collection', on_delete=models.CASCADE)
guid = models.ForeignKey('Guid', on_delete=models.CASCADE)
creator = models.ForeignKey('OSFUser')
collected_type = models.CharField(blank=True, max_length=31)
status = models.CharField(blank=True, max_length=31)
@cached_property
def _id(self):
return '{}-{}'.format(self.guid._id, self.collection._id)
@classmethod
def load(cls, data, select_for_update=False):
try:
cgm_id, collection_id = data.split('-')
except ValueError:
raise ValueError('Invalid CollectionSubmission object <_id {}>'.format(data))
else:
if cgm_id and collection_id:
try:
if isinstance(data, basestring):
return (cls.objects.get(guid___id=cgm_id, collection__guids___id=collection_id) if not select_for_update
else cls.objects.filter(guid___id=cgm_id, collection__guids___id=collection_id).select_for_update().get())
except cls.DoesNotExist:
return None
return None
def update_index(self):
if self.collection.is_public:
from website.search.search import update_collected_metadata
try:
update_collected_metadata(self.guid._id, collection_id=self.collection.id)
except SearchUnavailableError as e:
logger.exception(e)
def remove_from_index(self):
from website.search.search import update_collected_metadata
try:
update_collected_metadata(self.guid._id, collection_id=self.collection.id, op='delete')
except SearchUnavailableError as e:
logger.exception(e)
def save(self, *args, **kwargs):
kwargs.pop('old_subjects', None) # Not indexing this, trash it
ret = super(CollectionSubmission, self).save(*args, **kwargs)
self.update_index()
return ret
class Collection(DirtyFieldsMixin, GuidMixin, BaseModel, GuardianMixin):
objects = IncludeManager()
groups = {
'read': ('read_collection', ),
'write': ('read_collection', 'write_collection', ),
'admin': ('read_collection', 'write_collection', 'admin_collection', )
}
group_format = 'collections_{self.id}_{group}'
class Meta:
permissions = (
('read_collection', 'Read Collection'),
('write_collection', 'Write Collection'),
('admin_collection', 'Admin Collection'),
)
provider = models.ForeignKey('AbstractProvider', blank=True, null=True, on_delete=models.CASCADE)
creator = models.ForeignKey('OSFUser')
guid_links = models.ManyToManyField('Guid', through=CollectionSubmission, related_name='collections')
collected_types = models.ManyToManyField(
'contenttypes.ContentType',
related_name='+',
limit_choices_to={
'model__in': ['abstractnode', 'basefilenode', 'collection', 'preprintservice']
})
title = models.CharField(max_length=200, validators=[validate_title])
collected_type_choices = ArrayField(models.CharField(max_length=31), blank=True, default=list)
status_choices = ArrayField(models.CharField(max_length=31), blank=True, default=list)
is_public = models.BooleanField(default=False, db_index=True)
is_promoted = models.BooleanField(default=False, db_index=True)
is_bookmark_collection = models.BooleanField(default=False, db_index=True)
deleted = NonNaiveDateTimeField(null=True, blank=True)
def __unicode__(self):
return '{self.title!r}, with guid {self._id!r}'.format(self=self)
@property
def url(self):
return '/{}/'.format(self._id)
def get_absolute_url(self):
return self.absolute_api_v2_url
@property
def absolute_api_v2_url(self):
return api_v2_url('/collections{}'.format(self.url))
@property
def linked_nodes_self_url(self):
return '{}relationships/linked_nodes/'.format(self.absolute_api_v2_url)
@property
def linked_registrations_self_url(self):
return '{}relationships/linked_registrations/'.format(self.absolute_api_v2_url)
@property
def linked_nodes_related_url(self):
return '{}linked_nodes/'.format(self.absolute_api_v2_url)
@property
def linked_registrations_related_url(self):
return '{}linked_registrations/'.format(self.absolute_api_v2_url)
@classmethod
def bulk_update_search(cls, cgms, op='update', index=None):
from website import search
try:
search.search.bulk_update_collected_metadata(cgms, op=op, index=index)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
def save(self, *args, **kwargs):
first_save = self.id is None
if self.is_bookmark_collection:
if first_save and self.creator.collection_set.filter(is_bookmark_collection=True, deleted__isnull=True).exists():
raise IntegrityError('Each user cannot have more than one Bookmark collection.')
if self.title != 'Bookmarks':
# Bookmark collections are always named 'Bookmarks'
self.title = 'Bookmarks'
saved_fields = self.get_dirty_fields() or []
ret = super(Collection, self).save(*args, **kwargs)
if first_save:
# Set defaults for M2M
self.collected_types = ContentType.objects.filter(app_label='osf', model__in=['abstractnode', 'collection'])
# Set up initial permissions
self.update_group_permissions()
self.get_group('admin').user_set.add(self.creator)
elif 'is_public' in saved_fields:
from website.collections.tasks import on_collection_updated
enqueue_task(on_collection_updated.s(self._id))
return ret
def has_permission(self, user, perm):
return user.has_perms(self.groups[perm], self)
def collect_object(self, obj, collector, collected_type=None, status=None):
""" Adds object to collection, creates CollectionSubmission reference
Performs type / metadata validation. User permissions checked in view.
:param GuidMixin obj: Object to collect. Must be of a ContentType specified in collected_types
:param OSFUser collector: User doing the collecting
:param str collected_type: Metadata "type" of submission, validated against collected_type_choices
:param str status: Metadata "status" of submission, validated against status_choices
:return: CollectionSubmission object or raise exception
"""
collected_type = collected_type or ''
status = status or ''
if self.collected_type_choices and collected_type not in self.collected_type_choices:
raise ValidationError('"{}" is not an acceptable "type" for this collection'.format(collected_type))
if self.status_choices and status not in self.status_choices:
raise ValidationError('"{}" is not an acceptable "status" for this collection'.format(status))
if not any([isinstance(obj, t.model_class()) for t in self.collected_types.all()]):
# Not all objects have a content_type_pk, have to look the other way.
# Ideally, all objects would, and we could do:
# self.content_types.filter(id=obj.content_type_pk).exists()
raise ValidationError('"{}" is not an acceptable "ContentType" for this collection'.format(ContentType.objects.get_for_model(obj).model))
# Unique together -- self and guid
if self.collectionsubmission_set.filter(guid=obj.guids.first()).exists():
raise ValidationError('Object already exists in collection.')
cgm = self.collectionsubmission_set.create(guid=obj.guids.first(), creator=collector)
cgm.collected_type = collected_type
cgm.status = status
cgm.save()
return cgm
def remove_object(self, obj):
""" Removes object from collection
:param obj: object to remove from collection, if it exists. Acceptable types- CollectionSubmission, GuidMixin
"""
if isinstance(obj, CollectionSubmission):
if obj.collection == self:
obj.remove_from_index()
self.collectionsubmission_set.filter(id=obj.id).delete()
return
else:
cgm = self.collectionsubmission_set.get(guid=obj.guids.first())
if cgm:
cgm.remove_from_index()
cgm.delete()
return
raise ValueError('Node link does not belong to the requested node.')
def delete(self):
""" Mark collection as deleted
"""
if self.is_bookmark_collection:
# Not really the right exception to raise, but it's for back-compatibility
# TODO: Use a more correct exception and catch it in the necessary places
raise NodeStateError('Bookmark collections may not be deleted.')
self.deleted = timezone.now()
if self.is_public:
self.bulk_update_search(list(self.collectionsubmission_set.all()), op='delete')
self.save()
class CollectionUserObjectPermission(UserObjectPermissionBase):
content_object = models.ForeignKey(Collection, on_delete=models.CASCADE)
class CollectionGroupObjectPermission(GroupObjectPermissionBase):
content_object = models.ForeignKey(Collection, on_delete=models.CASCADE)
|
the-stack_0_16854 | class Solution:
def findContentChildren(self, g: List[int], s: List[int]) -> int:
g.sort()
s.sort()
indexChild = 0
for itemCookie in s:
if itemCookie >= g[indexChild]:
indexChild += 1
if indexChild > len(g) - 1:
break
return indexChild
|
the-stack_0_16855 | """ Onmt NMT Model base class definition """
import torch.nn as nn
class NMTModel(nn.Module):
"""
Core trainable object in OpenNMT. Implements a trainable interface
for a simple, generic encoder + decoder model.
Args:
encoder (:obj:`EncoderBase`): an encoder object
decoder (:obj:`RNNDecoderBase`): a decoder object
multi<gpu (bool): setup for multigpu support
"""
def __init__(self, encoder, decoder, multigpu=False):
self.multigpu = multigpu
super(NMTModel, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, src, tgt, lengths, dec_state=None):
"""Forward propagate a `src` and `tgt` pair for training.
Possible initialized with a beginning decoder state.
Args:
src (:obj:`Tensor`):
a source sequence passed to encoder.
typically for inputs this will be a padded :obj:`LongTensor`
of size `[len x batch x features]`. however, may be an
image or other generic input depending on encoder.
tgt (:obj:`LongTensor`):
a target sequence of size `[tgt_len x batch]`.
lengths(:obj:`LongTensor`): the src lengths, pre-padding `[batch]`.
dec_state (:obj:`DecoderState`, optional): initial decoder state
Returns:
(:obj:`FloatTensor`, `dict`, :obj:`onmt.Models.DecoderState`):
* decoder output `[tgt_len x batch x hidden]`
* dictionary attention dists of `[tgt_len x batch x src_len]`
* final decoder state
"""
tgt = tgt[:-1] # exclude last target from inputs
enc_final, memory_bank = self.encoder(src, lengths)
enc_state = \
self.decoder.init_decoder_state(src, memory_bank, enc_final)
decoder_outputs, dec_state, attns = \
self.decoder(tgt, memory_bank,
enc_state if dec_state is None
else dec_state,
memory_lengths=lengths)
if self.multigpu:
# Not yet supported on multi-gpu
dec_state = None
attns = None
return memory_bank, decoder_outputs, attns, dec_state
|
the-stack_0_16857 | """Geographical extracts of natural increase, nom and nim
"""
from pathlib import Path
import pandas as pd
import data
import file_paths
from data import read_abs_data, read_abs_meta_data
DATA_ABS_PATH = Path.home() / "Documents/Analysis/Australian economy/Data/ABS"
def read_3101():
series_id = data.series_id_3101()
return data.read_abs_data(series_id=series_id)
def nom(df=None):
"""Exract NOM data
Parameters
----------
df : [type], optional
[description], by default None
"""
if df is None:
df = read_3101()
return df.net_overseas_migration
def nom_year_ending(df_nom=None):
"""Return year ending nom
Parameters
----------
nom : [type], optional
[description], by default None
"""
if df_nom is None:
df_nom = read_3101()
return df_nom.net_overseas_migration.rolling(4).sum().dropna()
def nom_year_ending_annual(df_nom=None, quarter="A-Jun"):
"""Return year ending for a given quarter
Parameters
----------
df_nom : Pandas series, optional
contains nom in sub-annual data
"""
if df_nom is None:
df_nom = nom()
# check there are 4 quarters that match the periodicity of "quarter"
# find the first quart to match ending quarter, and remove elements to the subsequent quarter
for i, date_ in enumerate(df_nom.index[:4]):
if date_.strftime("%b") == quarter[-3:]:
idx = i + 1
df_nom = df_nom.iloc[idx:]
break
if df_nom.index[3].strftime("%b") != quarter[-3:]:
print("1st DATE VALUE IS NOT A FULL YEAR")
nom_annual = df_nom.resample(quarter).sum()
# remove last year if not full year (ie nom last period == quarter parameter)
if df_nom.index[-1].strftime("%b") != quarter[-3:]:
nom_annual = nom_annual.iloc[:-1]
return nom_annual
def component_shares_between_dates(df):
"""
Calculate the nom and natural contribution to population growth over the period covered
by the dataframe.
Parameters
----------
df: a dataframe of ABS 3101, with column names already cleaned
(ie lower cased, and joined with "_")
Returns:
None but prints out a summary of population increase and component contributions
"""
if not isinstance(df.index, pd.DatetimeIndex):
raise ValueError("Chris - the dataframe does not have a time series index")
idx_erp_start = df.first_valid_index()
# Sum of components must start from 2nd period - components in first period
# contribute to the start ERP only
idx_component_start = df.iloc[1:].first_valid_index()
idx_erp_end = df.last_valid_index()
pop_delta = (
df.loc[idx_erp_end].estimated_resident_population
- df.loc[idx_erp_start].estimated_resident_population
)
pop_deta_pct_increase = (
pop_delta / df.loc[idx_erp_start].estimated_resident_population
)
nom = df.loc[idx_component_start:].net_overseas_migration.sum()
natural_increase = df.loc[idx_component_start:].natural_increase.sum()
components = nom + natural_increase
nom_share = nom / components
natural_increase_share = natural_increase / components
print(f"Between {idx_erp_start:%Y-%m-%d} and {idx_erp_end:%Y-%m-%d}:\n")
print(
f"Population increased {pop_delta * 1000:,.0f} ({pop_deta_pct_increase:.1%}) people.\n"
)
print(
f"{nom_share:.1%} from NOM, {natural_increase_share:.1%} from natural increase."
)
return
def annual_population_components(df=None, month=6):
"""
TODO: read in 3101 rather than passing in as df
Calculate annual nom and natural increase components over the period covered by a 3101 dataframe.
Parameters
----------
df: a dataframe of ABS 3101, with column names already cleaned
(ie lower cased, and joined with "_")
Returns:
a dataframe
"""
if df is None:
df = read_3101()
ERP = df[df.index.month == month].estimated_resident_population
ERP_flow = ERP.diff()
ERP_flow.name = "ERP_flow"
NOM = df.net_overseas_migration.rolling(4).sum()
NOM = NOM[NOM.index.month == month]
natural = df.natural_increase.rolling(4).sum()
natural = natural[natural.index.month == month]
population = pd.concat([ERP, ERP_flow, natural, NOM], axis=1)
## Adjust nom for period 1996 through 2005
# population.loc["1996":"2005", "net_overseas_migration"] = population.loc["1996":"2005", "net_overseas_migration"] * 1.25
population = population.assign(
NI_and_NOM=lambda x: x[["natural_increase", "net_overseas_migration"]].sum(
axis=1
)
)
# adjust NOM and natural increase to be correct levels of ERP - apportion intercensal equally
nom_intercensal_NOM_share = (
population.net_overseas_migration / population.NI_and_NOM
)
population = population.assign(
nom_adj=lambda x: nom_intercensal_NOM_share * x.ERP_flow
).assign(
natural_increase_adj=lambda x: (1 - nom_intercensal_NOM_share) * x.ERP_flow
)
return population
def get_pop_by_age(region=None, gender=None):
filepath = file_paths.abs_data_folder / "3101 age by year by gender.parquet"
df = pd.read_parquet(filepath)
if region:
### need to generalise for multiple regions..pass list etc
if region in df.region.unique():
df = df[df.region == region]
else:
raise ValueError(f"{region} is not in list of regions: {', '.join(sorted(df.region.unique()))}")
if gender:
if gender in df.gender.unique():
df = df[df.gender == gender]
else:
raise ValueError(f"{region} is not in list of regions: {', '.join(sorted(df.gender.unique()))}")
return df
|
the-stack_0_16858 | import os
import sys
from setuptools import setup
from distutils.command.install_data import install_data
from distutils.command.install import INSTALL_SCHEMES
if sys.version_info < (2, 6):
raise Exception("redisrollforward requires Python 2.6 or higher.")
package_name = 'redisrollforward'
package_fullname = 'python-%s' % package_name
root_dir = os.path.split(os.path.abspath(__file__))[0]
package_dir = os.path.join(root_dir, package_name)
def get_module():
if root_dir not in sys.path:
sys.path.insert(0,root_dir)
return __import__(package_name)
mod = get_module()
# Try to import lib build
try:
from extensions.setup import libparams
except ImportError:
libparams = None
def read(fname):
return open(os.path.join(root_dir, fname)).read()
def requirements():
req = read('requirements.txt').replace('\r','').split('\n')
result = []
for r in req:
r = r.replace(' ','')
if r:
result.append(r)
return result
class osx_install_data(install_data):
def finalize_options(self):
self.set_undefined_options('install', ('install_lib', 'install_dir'))
install_data.finalize_options(self)
# Tell distutils to put the data_files in platform-specific installation
# locations. See here for an explanation:
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
def get_rel_dir(d,base,res=''):
if d == base:
return res
br,r = os.path.split(d)
if res:
r = os.path.join(r,res)
return get_rel_dir(br,base,r)
packages, data_files = [], []
pieces = fullsplit(root_dir)
if pieces[-1] == '':
len_root_dir = len(pieces) - 1
else:
len_root_dir = len(pieces)
for dirpath, _, filenames in os.walk(package_dir):
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)[len_root_dir:]))
elif filenames and not dirpath.endswith('__pycache__'):
rel_dir = get_rel_dir(dirpath, package_dir)
data_files.extend((os.path.join(rel_dir, f) for f in filenames))
if len(sys.argv) > 1 and sys.argv[1] == 'bdist_wininst':
for file_info in data_files:
file_info[0] = '\\PURELIB\\%s' % file_info[0]
def run_setup(params=None, argv=None):
params = params or {'cmdclass': {}}
if sys.platform == "darwin":
params['cmdclass']['install_data'] = osx_install_data
else:
params['cmdclass']['install_data'] = install_data
argv = argv if argv is not None else sys.argv
if len(argv) > 1:
if argv[1] == 'install' and sys.version_info >= (3,0):
packages.remove('redisrollforward.fallback')
params.update({'name': package_fullname,
'version': mod.__version__,
'author': mod.__author__,
'author_email': mod.__contact__,
'url': mod.__homepage__,
'license': mod.__license__,
'description': mod.__doc__,
'long_description': read('README.rst'),
'packages': packages,
'package_data': {package_name: data_files},
'classifiers': mod.CLASSIFIERS,
'install_requires': requirements()})
setup(**params)
def status_msgs(*msgs):
print('*' * 75)
for msg in msgs:
print(msg)
print('*' * 75)
run_setup()
status_msgs("redisrollforward build succeeded.")
#EOF
|
the-stack_0_16859 | from Utils.utils import get_logger
import pydicom as dicom
import numpy as np
from numpy.linalg import norm
import os
from pydicom.pixel_data_handlers.util import apply_modality_lut
logger = get_logger(__name__)
class DCMreaderVMLa:
def __init__(self, folder_name):
self.broken = False
self.ch2_frames = []
self.ch3_frames = []
self.ch4_frames = []
self.ch2_file_paths = []
self.ch3_file_paths = []
self.ch4_file_paths = []
self.ch2_frames_matrice = None
self.ch3_frames_matrice = None
self.ch4_frames_matrice = None
dcm_files = sorted(os.listdir(folder_name))
for idx, file in enumerate(dcm_files):
if file.find('.dcm') != -1:
try:
temp_ds = dicom.dcmread(os.path.join(folder_name, file))
self.classifyLaFrame(temp_ds, os.path.join(folder_name, file))
except Exception as ex:
print('Couldnt read file: {}'.format(os.path.join(folder_name, file)))
print('Failed due to: ')
print(ex)
self.broken = True
return
if len(self.ch2_frames) == 0 and len(self.ch3_frames) == 0 and len(self.ch4_frames) == 0:
self.broken = True
logger.warning("There are no frames. This folder should be deleted. Path: {}".format(folder_name))
else:
self.loadMatrices()
def classifyLaFrame(self, ds, file_path):
orientationDirCosines = ds.data_element('ImageOrientationPatient')
orientNPArray = np.cross(orientationDirCosines[0:3], orientationDirCosines[3:6])
ch2_direction = np.array([0.7692, 0.6184, 0.0081])
ch3_direction = np.array([0.7335, 0.1403, 0.6574])
ch4_direction = np.array([0.0144, -0.5744, 0.7982])
windowedFrame = apply_modality_lut(ds.pixel_array, ds)
cosOfAngle_ch2 = np.dot(orientNPArray, ch2_direction) / norm(ch2_direction) / norm(orientNPArray)
cosOfAngle_ch3 = np.dot(orientNPArray, ch3_direction) / norm(ch3_direction) / norm(orientNPArray)
cosOfAngle_ch4 = np.dot(orientNPArray, ch4_direction) / norm(ch4_direction) / norm(orientNPArray)
cosofAngles = [abs(cosOfAngle_ch2), abs(cosOfAngle_ch3), abs(cosOfAngle_ch4)]
minIdx = np.argmax(cosofAngles)
if minIdx == 0:
self.ch2_frames.append(windowedFrame)
self.ch2_file_paths.append(file_path)
return
if minIdx == 1:
self.ch3_frames.append(windowedFrame)
self.ch3_file_paths.append(file_path)
return
if minIdx == 2:
self.ch4_frames.append(windowedFrame)
self.ch4_file_paths.append(file_path)
return
def loadMatrices(self):
if len(self.ch2_frames) > 0:
size_h, size_w = self.ch2_frames[0].shape
self.ch2_frames_matrice = np.ones((len(self.ch2_frames), size_h, size_w))
for i in range(len(self.ch2_frames)):
if self.ch2_frames[i].shape == self.ch2_frames[0].shape:
self.ch2_frames_matrice[i] = self.ch2_frames[i]
else:
logger.error('Wrong shape at {}'.format(self.ch2_file_paths[i]))
if len(self.ch3_frames) > 0:
size_h, size_w = self.ch3_frames[0].shape
self.ch3_frames_matrice = np.ones((len(self.ch3_frames), size_h, size_w))
for i in range(len(self.ch3_frames)):
if self.ch3_frames[i].shape == self.ch3_frames[0].shape:
self.ch3_frames_matrice[i] = self.ch3_frames[i]
else:
logger.error('Wrong shape at {}'.format(self.ch3_file_paths[i]))
if len(self.ch4_frames) > 0:
size_h, size_w = self.ch4_frames[0].shape
self.ch4_frames_matrice = np.ones((len(self.ch4_frames), size_h, size_w))
for i in range(len(self.ch4_frames)):
if self.ch4_frames[i].shape == self.ch4_frames[0].shape:
self.ch4_frames_matrice[i] = self.ch4_frames[i]
else:
logger.error('Wrong shape at {}'.format(self.ch4_file_paths[i]))
def isBroken(self):
return self.broken
|
the-stack_0_16864 | #!/usr/bin/env python3
# Problem 6: Sum square difference
# https://projecteuler.net/problem=6
import sys
def euler006(bound):
numbers_sum = 0
squares_sum = 0
for number in range(1, bound + 1):
numbers_sum += number
squares_sum += number ** 2
return numbers_sum ** 2 - squares_sum
def parse_input(lines):
return int(lines[0].strip())
if __name__ == "__main__":
print(euler006(parse_input(sys.stdin.readlines())))
|
the-stack_0_16865 | import win32debug, sys, os
string_obj = "TestString123"
big_string_obj = string_obj * 100
bytes_obj = b"TestBytes123" # Python 2: str, Python 3: bytes
unicode_obj = u"TestUnicode" # Python 2: unicode, Python 3: str
byte_array_object = bytearray(b'TestBytearray123')
int_obj = int(1)
long_obj = 123456789012345678901234567890123456789012345678901234567890
float_obj = 3.1415
complex_obj = complex(1.5, -2.25)
bool_true_obj = True
bool_false_obj = False
none_obj = None
type_obj = dict
not_implemented_obj = NotImplemented
def test_function(x):
"""Some DocString"""
return x*x
func_obj = test_function
list_obj = [string_obj, int_obj, long_obj]
tuple_obj = (string_obj, int_obj, long_obj)
set_obj = { string_obj, int_obj, long_obj }
dict_obj = {
"string_obj": string_obj,
"int_obj": int_obj,
"long_obj": long_obj,
}
win32debug.dump_process("object_types.dmp")
|
the-stack_0_16867 | """Handles all VCS (version control) support"""
from __future__ import absolute_import
import errno
import logging
import os
import shutil
import sys
from pip._internal.exceptions import BadCommand
from pip._internal.utils.misc import (
display_path, backup_dir, call_subprocess, rmtree, ask_path_exists,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._vendor.six.moves.urllib import parse as urllib_parse
if MYPY_CHECK_RUNNING:
from typing import ( # noqa: F401
Any, Dict, Iterable, List, Mapping, Optional, Text, Tuple, Type
)
from pip._internal.utils.ui import SpinnerInterface # noqa: F401
AuthInfo = Tuple[Optional[str], Optional[str]]
__all__ = ['vcs']
logger = logging.getLogger(__name__)
class RemoteNotFoundError(Exception):
pass
class RevOptions(object):
"""
Encapsulates a VCS-specific revision to install, along with any VCS
install options.
Instances of this class should be treated as if immutable.
"""
def __init__(self, vcs, rev=None, extra_args=None):
# type: (VersionControl, Optional[str], Optional[List[str]]) -> None
"""
Args:
vcs: a VersionControl object.
rev: the name of the revision to install.
extra_args: a list of extra options.
"""
if extra_args is None:
extra_args = []
self.extra_args = extra_args
self.rev = rev
self.vcs = vcs
def __repr__(self):
return '<RevOptions {}: rev={!r}>'.format(self.vcs.name, self.rev)
@property
def arg_rev(self):
# type: () -> Optional[str]
if self.rev is None:
return self.vcs.default_arg_rev
return self.rev
def to_args(self):
# type: () -> List[str]
"""
Return the VCS-specific command arguments.
"""
args = [] # type: List[str]
rev = self.arg_rev
if rev is not None:
args += self.vcs.get_base_rev_args(rev)
args += self.extra_args
return args
def to_display(self):
# type: () -> str
if not self.rev:
return ''
return ' (to revision {})'.format(self.rev)
def make_new(self, rev):
# type: (str) -> RevOptions
"""
Make a copy of the current instance, but with a new rev.
Args:
rev: the name of the revision for the new object.
"""
return self.vcs.make_rev_options(rev, extra_args=self.extra_args)
class VcsSupport(object):
_registry = {} # type: Dict[str, Type[VersionControl]]
schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn']
def __init__(self):
# type: () -> None
# Register more schemes with urlparse for various version control
# systems
urllib_parse.uses_netloc.extend(self.schemes)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(self.schemes)
super(VcsSupport, self).__init__()
def __iter__(self):
return self._registry.__iter__()
@property
def backends(self):
# type: () -> List[Type[VersionControl]]
return list(self._registry.values())
@property
def dirnames(self):
# type: () -> List[str]
return [backend.dirname for backend in self.backends]
@property
def all_schemes(self):
# type: () -> List[str]
schemes = [] # type: List[str]
for backend in self.backends:
schemes.extend(backend.schemes)
return schemes
def register(self, cls):
# type: (Type[VersionControl]) -> None
if not hasattr(cls, 'name'):
logger.warning('Cannot register VCS %s', cls.__name__)
return
if cls.name not in self._registry:
self._registry[cls.name] = cls
logger.debug('Registered VCS backend: %s', cls.name)
def unregister(self, cls=None, name=None):
# type: (Optional[Type[VersionControl]], Optional[str]) -> None
if name in self._registry:
del self._registry[name]
elif cls in self._registry.values():
del self._registry[cls.name]
else:
logger.warning('Cannot unregister because no class or name given')
def get_backend_type(self, location):
# type: (str) -> Optional[Type[VersionControl]]
"""
Return the type of the version control backend if found at given
location, e.g. vcs.get_backend_type('/path/to/vcs/checkout')
"""
for vc_type in self._registry.values():
if vc_type.controls_location(location):
logger.debug('Determine that %s uses VCS: %s',
location, vc_type.name)
return vc_type
return None
def get_backend(self, name):
# type: (str) -> Optional[Type[VersionControl]]
name = name.lower()
if name in self._registry:
return self._registry[name]
return None
vcs = VcsSupport()
class VersionControl(object):
name = ''
dirname = ''
repo_name = ''
# List of supported schemes for this Version Control
schemes = () # type: Tuple[str, ...]
# Iterable of environment variable names to pass to call_subprocess().
unset_environ = () # type: Tuple[str, ...]
default_arg_rev = None # type: Optional[str]
def __init__(self, url=None, *args, **kwargs):
self.url = url
super(VersionControl, self).__init__(*args, **kwargs)
def get_base_rev_args(self, rev):
"""
Return the base revision arguments for a vcs command.
Args:
rev: the name of a revision to install. Cannot be None.
"""
raise NotImplementedError
def make_rev_options(self, rev=None, extra_args=None):
# type: (Optional[str], Optional[List[str]]) -> RevOptions
"""
Return a RevOptions object.
Args:
rev: the name of a revision to install.
extra_args: a list of extra options.
"""
return RevOptions(self, rev, extra_args=extra_args)
@classmethod
def _is_local_repository(cls, repo):
# type: (str) -> bool
"""
posix absolute paths start with os.path.sep,
win32 ones start with drive (like c:\\folder)
"""
drive, tail = os.path.splitdrive(repo)
return repo.startswith(os.path.sep) or bool(drive)
def export(self, location):
"""
Export the repository at the url to the destination location
i.e. only download the files, without vcs informations
"""
raise NotImplementedError
def get_netloc_and_auth(self, netloc, scheme):
"""
Parse the repository URL's netloc, and return the new netloc to use
along with auth information.
Args:
netloc: the original repository URL netloc.
scheme: the repository URL's scheme without the vcs prefix.
This is mainly for the Subversion class to override, so that auth
information can be provided via the --username and --password options
instead of through the URL. For other subclasses like Git without
such an option, auth information must stay in the URL.
Returns: (netloc, (username, password)).
"""
return netloc, (None, None)
def get_url_rev_and_auth(self, url):
# type: (str) -> Tuple[str, Optional[str], AuthInfo]
"""
Parse the repository URL to use, and return the URL, revision,
and auth info to use.
Returns: (url, rev, (username, password)).
"""
scheme, netloc, path, query, frag = urllib_parse.urlsplit(url)
if '+' not in scheme:
raise ValueError(
"Sorry, {!r} is a malformed VCS url. "
"The format is <vcs>+<protocol>://<url>, "
"e.g. svn+http://myrepo/svn/MyApp#egg=MyApp".format(url)
)
# Remove the vcs prefix.
scheme = scheme.split('+', 1)[1]
netloc, user_pass = self.get_netloc_and_auth(netloc, scheme)
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
url = urllib_parse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev, user_pass
def make_rev_args(self, username, password):
"""
Return the RevOptions "extra arguments" to use in obtain().
"""
return []
def get_url_rev_options(self, url):
# type: (str) -> Tuple[str, RevOptions]
"""
Return the URL and RevOptions object to use in obtain() and in
some cases export(), as a tuple (url, rev_options).
"""
url, rev, user_pass = self.get_url_rev_and_auth(url)
username, password = user_pass
extra_args = self.make_rev_args(username, password)
rev_options = self.make_rev_options(rev, extra_args=extra_args)
return url, rev_options
def normalize_url(self, url):
# type: (str) -> str
"""
Normalize a URL for comparison by unquoting it and removing any
trailing slash.
"""
return urllib_parse.unquote(url).rstrip('/')
def compare_urls(self, url1, url2):
# type: (str, str) -> bool
"""
Compare two repo URLs for identity, ignoring incidental differences.
"""
return (self.normalize_url(url1) == self.normalize_url(url2))
def fetch_new(self, dest, url, rev_options):
"""
Fetch a revision from a repository, in the case that this is the
first fetch from the repository.
Args:
dest: the directory to fetch the repository to.
rev_options: a RevOptions object.
"""
raise NotImplementedError
def switch(self, dest, url, rev_options):
"""
Switch the repo at ``dest`` to point to ``URL``.
Args:
rev_options: a RevOptions object.
"""
raise NotImplementedError
def update(self, dest, url, rev_options):
"""
Update an already-existing repo to the given ``rev_options``.
Args:
rev_options: a RevOptions object.
"""
raise NotImplementedError
def is_commit_id_equal(self, dest, name):
"""
Return whether the id of the current commit equals the given name.
Args:
dest: the repository directory.
name: a string name.
"""
raise NotImplementedError
def obtain(self, dest):
# type: (str) -> None
"""
Install or update in editable mode the package represented by this
VersionControl object.
Args:
dest: the repository directory in which to install or update.
"""
url, rev_options = self.get_url_rev_options(self.url)
if not os.path.exists(dest):
self.fetch_new(dest, url, rev_options)
return
rev_display = rev_options.to_display()
if self.is_repository_directory(dest):
existing_url = self.get_remote_url(dest)
if self.compare_urls(existing_url, url):
logger.debug(
'%s in %s exists, and has correct URL (%s)',
self.repo_name.title(),
display_path(dest),
url,
)
if not self.is_commit_id_equal(dest, rev_options.rev):
logger.info(
'Updating %s %s%s',
display_path(dest),
self.repo_name,
rev_display,
)
self.update(dest, url, rev_options)
else:
logger.info('Skipping because already up-to-date.')
return
logger.warning(
'%s %s in %s exists with URL %s',
self.name,
self.repo_name,
display_path(dest),
existing_url,
)
prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',
('s', 'i', 'w', 'b'))
else:
logger.warning(
'Directory %s already exists, and is not a %s %s.',
dest,
self.name,
self.repo_name,
)
# https://github.com/python/mypy/issues/1174
prompt = ('(i)gnore, (w)ipe, (b)ackup ', # type: ignore
('i', 'w', 'b'))
logger.warning(
'The plan is to install the %s repository %s',
self.name,
url,
)
response = ask_path_exists('What to do? %s' % prompt[0], prompt[1])
if response == 'a':
sys.exit(-1)
if response == 'w':
logger.warning('Deleting %s', display_path(dest))
rmtree(dest)
self.fetch_new(dest, url, rev_options)
return
if response == 'b':
dest_dir = backup_dir(dest)
logger.warning(
'Backing up %s to %s', display_path(dest), dest_dir,
)
shutil.move(dest, dest_dir)
self.fetch_new(dest, url, rev_options)
return
# Do nothing if the response is "i".
if response == 's':
logger.info(
'Switching %s %s to %s%s',
self.repo_name,
display_path(dest),
url,
rev_display,
)
self.switch(dest, url, rev_options)
def unpack(self, location):
# type: (str) -> None
"""
Clean up current location and download the url repository
(and vcs infos) into location
"""
if os.path.exists(location):
rmtree(location)
self.obtain(location)
@classmethod
def get_src_requirement(cls, location, project_name):
"""
Return a string representing the requirement needed to
redownload the files currently present in location, something
like:
{repository_url}@{revision}#egg={project_name}-{version_identifier}
"""
raise NotImplementedError
@classmethod
def get_remote_url(cls, location):
"""
Return the url used at location
Raises RemoteNotFoundError if the repository does not have a remote
url configured.
"""
raise NotImplementedError
@classmethod
def get_revision(cls, location):
"""
Return the current commit id of the files at the given location.
"""
raise NotImplementedError
@classmethod
def run_command(
cls,
cmd, # type: List[str]
show_stdout=True, # type: bool
cwd=None, # type: Optional[str]
on_returncode='raise', # type: str
extra_ok_returncodes=None, # type: Optional[Iterable[int]]
command_desc=None, # type: Optional[str]
extra_environ=None, # type: Optional[Mapping[str, Any]]
spinner=None # type: Optional[SpinnerInterface]
):
# type: (...) -> Optional[Text]
"""
Run a VCS subcommand
This is simply a wrapper around call_subprocess that adds the VCS
command name, and checks that the VCS is available
"""
cmd = [cls.name] + cmd
try:
return call_subprocess(cmd, show_stdout, cwd,
on_returncode=on_returncode,
extra_ok_returncodes=extra_ok_returncodes,
command_desc=command_desc,
extra_environ=extra_environ,
unset_environ=cls.unset_environ,
spinner=spinner)
except OSError as e:
# errno.ENOENT = no such file or directory
# In other words, the VCS executable isn't available
if e.errno == errno.ENOENT:
raise BadCommand(
'Cannot find command %r - do you have '
'%r installed and in your '
'PATH?' % (cls.name, cls.name))
else:
raise # re-raise exception if a different error occurred
@classmethod
def is_repository_directory(cls, path):
# type: (str) -> bool
"""
Return whether a directory path is a repository directory.
"""
logger.debug('Checking in %s for %s (%s)...',
path, cls.dirname, cls.name)
return os.path.exists(os.path.join(path, cls.dirname))
@classmethod
def controls_location(cls, location):
# type: (str) -> bool
"""
Check if a location is controlled by the vcs.
It is meant to be overridden to implement smarter detection
mechanisms for specific vcs.
This can do more than is_repository_directory() alone. For example,
the Git override checks that Git is actually available.
"""
return cls.is_repository_directory(location)
|
the-stack_0_16868 | # -*- coding: utf-8 -*-
class Solution:
def lengthOfLIS(self, nums):
if not nums:
return 0
result = [1] * len(nums)
for i in range(len(nums)):
partial = result[i]
for j in range(i):
if nums[i] > nums[j]:
partial = max(partial, 1 + result[j])
result[i] = partial
return max(result)
if __name__ == '__main__':
solution = Solution()
assert 4 == solution.lengthOfLIS([10, 9, 2, 5, 3, 7, 101, 18])
|
the-stack_0_16870 | """Holder for the (test kind, list of tests) pair with additional metadata their execution."""
from __future__ import absolute_import
import itertools
import threading
import time
from . import report as _report
from . import summary as _summary
from .. import config as _config
from .. import selector as _selector
def synchronized(method):
"""Provide decorator to enfore instance lock ownership when calling the method."""
def synced(self, *args, **kwargs):
"""Sync an instance lock."""
lock = getattr(self, "_lock")
with lock:
return method(self, *args, **kwargs)
return synced
class Suite(object): # pylint: disable=too-many-instance-attributes
"""A suite of tests of a particular kind (e.g. C++ unit tests, dbtests, jstests)."""
def __init__(self, suite_name, suite_config, suite_options=_config.SuiteOptions.ALL_INHERITED):
"""Initialize the suite with the specified name and configuration."""
self._lock = threading.RLock()
self._suite_name = suite_name
self._suite_config = suite_config
self._suite_options = suite_options
self.test_kind = self.get_test_kind_config()
self.tests, self.excluded = self._get_tests_for_kind(self.test_kind)
self.return_code = None # Set by the executor.
self._suite_start_time = None
self._suite_end_time = None
self._test_start_times = []
self._test_end_times = []
self._reports = []
# We keep a reference to the TestReports from the currently running jobs so that we can
# report intermediate results.
self._partial_reports = None
def _get_tests_for_kind(self, test_kind):
"""Return the tests to run based on the 'test_kind'-specific filtering policy."""
test_info = self.get_selector_config()
# The mongos_test doesn't have to filter anything, the test_info is just the arguments to
# the mongos program to be used as the test case.
if test_kind == "mongos_test":
mongos_options = test_info # Just for easier reading.
if not isinstance(mongos_options, dict):
raise TypeError("Expected dictionary of arguments to mongos")
return [mongos_options], []
tests, excluded = _selector.filter_tests(test_kind, test_info)
if _config.ORDER_TESTS_BY_NAME:
return sorted(tests, key=str.lower), sorted(excluded, key=str.lower)
return tests, excluded
def get_name(self):
"""Return the name of the test suite."""
return self._suite_name
def get_display_name(self):
"""Return the name of the test suite with a unique identifier for its SuiteOptions."""
if self.options.description is None:
return self.get_name()
return "{} ({})".format(self.get_name(), self.options.description)
def get_selector_config(self):
"""Return the "selector" section of the YAML configuration."""
if "selector" not in self._suite_config:
return {}
selector = self._suite_config["selector"].copy()
if self.options.include_tags is not None:
if "include_tags" in selector:
selector["include_tags"] = {
"$allOf": [
selector["include_tags"],
self.options.include_tags,
]
}
elif "exclude_tags" in selector:
selector["exclude_tags"] = {
"$anyOf": [
selector["exclude_tags"],
{"$not": self.options.include_tags},
]
}
else:
selector["include_tags"] = self.options.include_tags
return selector
def get_executor_config(self):
"""Return the "executor" section of the YAML configuration."""
return self._suite_config["executor"]
def get_test_kind_config(self):
"""Return the "test_kind" section of the YAML configuration."""
return self._suite_config["test_kind"]
@property
def options(self):
"""Get the options."""
return self._suite_options.resolve()
def with_options(self, suite_options):
"""Return a Suite instance with the specified resmokelib.config.SuiteOptions."""
return Suite(self._suite_name, self._suite_config, suite_options)
@synchronized
def record_suite_start(self):
"""Record the start time of the suite."""
self._suite_start_time = time.time()
@synchronized
def record_suite_end(self):
"""Record the end time of the suite."""
self._suite_end_time = time.time()
@synchronized
def record_test_start(self, partial_reports):
"""Record the start time of an execution.
The result is stored in the TestReports for currently running jobs.
"""
self._test_start_times.append(time.time())
self._partial_reports = partial_reports
@synchronized
def record_test_end(self, report):
"""Record the end time of an execution."""
self._test_end_times.append(time.time())
self._reports.append(report)
self._partial_reports = None
@synchronized
def get_active_report(self):
"""Return the partial report of the currently running execution, if there is one."""
if not self._partial_reports:
return None
return _report.TestReport.combine(*self._partial_reports)
@synchronized
def get_reports(self):
"""Return the list of reports.
If there's an execution currently in progress, then a report for the partial results
is included in the returned list.
"""
if self._partial_reports is not None:
return self._reports + [self.get_active_report()]
return self._reports
@synchronized
def summarize(self, sb):
"""Append a summary of the suite onto the string builder 'sb'."""
if not self._reports and not self._partial_reports:
sb.append("No tests ran.")
summary = _summary.Summary(0, 0.0, 0, 0, 0, 0)
elif not self._reports and self._partial_reports:
summary = self.summarize_latest(sb)
elif len(self._reports) == 1 and not self._partial_reports:
summary = self._summarize_execution(0, sb)
else:
summary = self._summarize_repeated(sb)
summarized_group = " %ss: %s" % (self.test_kind, "\n ".join(sb))
if summary.num_run == 0:
sb.append("Suite did not run any tests.")
return
# Override the 'time_taken' attribute of the summary if we have more accurate timing
# information available.
if self._suite_start_time is not None and self._suite_end_time is not None:
time_taken = self._suite_end_time - self._suite_start_time
summary = summary._replace(time_taken=time_taken)
sb.append("%d test(s) ran in %0.2f seconds"
" (%d succeeded, %d were skipped, %d failed, %d errored)" % summary)
sb.append(summarized_group)
@synchronized
def summarize_latest(self, sb):
"""Return a summary of the latest execution of the suite.
Also append a summary of that execution onto the string builder 'sb'.
If there's an execution currently in progress, then the partial
summary of that execution is appended to 'sb'.
"""
if self._partial_reports is None:
return self._summarize_execution(-1, sb)
active_report = _report.TestReport.combine(*self._partial_reports)
# Use the current time as the time that this suite finished running.
end_time = time.time()
return self._summarize_report(active_report, self._test_start_times[-1], end_time, sb)
def _summarize_repeated(self, sb):
"""Return the summary information of all executions.
Also append each execution's summary onto the string builder 'sb' and
information of how many repetitions there were.
"""
reports = self.get_reports() # Also includes the combined partial reports.
num_iterations = len(reports)
start_times = self._test_start_times[:]
end_times = self._test_end_times[:]
if self._partial_reports:
end_times.append(time.time()) # Add an end time in this copy for the partial reports.
total_time_taken = end_times[-1] - start_times[0]
sb.append("Executed %d times in %0.2f seconds:" % (num_iterations, total_time_taken))
combined_summary = _summary.Summary(0, 0.0, 0, 0, 0, 0)
for iteration in xrange(num_iterations):
# Summarize each execution as a bulleted list of results.
bulleter_sb = []
summary = self._summarize_report(reports[iteration], start_times[iteration],
end_times[iteration], bulleter_sb)
combined_summary = _summary.combine(combined_summary, summary)
for (i, line) in enumerate(bulleter_sb):
# Only bullet first line, indent others.
prefix = "* " if i == 0 else " "
sb.append(prefix + line)
return combined_summary
def _summarize_execution(self, iteration, sb):
"""Return the summary information of the execution given by 'iteration'.
Also append a summary of that execution onto the string builder 'sb'.
"""
return self._summarize_report(self._reports[iteration], self._test_start_times[iteration],
self._test_end_times[iteration], sb)
def _summarize_report(self, report, start_time, end_time, sb):
"""Return the summary information of the execution.
The summary is for 'report' that started at 'start_time' and finished at 'end_time'.
Also append a summary of that execution onto the string builder 'sb'.
"""
time_taken = end_time - start_time
# Tests that were interrupted are treated as failures because (1) the test has already been
# started and therefore isn't skipped and (2) the test has yet to finish and therefore
# cannot be said to have succeeded.
num_failed = report.num_failed + report.num_interrupted
num_run = report.num_succeeded + report.num_errored + num_failed
num_skipped = len(self.tests) + report.num_dynamic - num_run
if report.num_succeeded == num_run and num_skipped == 0:
sb.append("All %d test(s) passed in %0.2f seconds." % (num_run, time_taken))
return _summary.Summary(num_run, time_taken, num_run, 0, 0, 0)
summary = _summary.Summary(num_run, time_taken, report.num_succeeded, num_skipped,
num_failed, report.num_errored)
sb.append("%d test(s) ran in %0.2f seconds"
" (%d succeeded, %d were skipped, %d failed, %d errored)" % summary)
if num_failed > 0:
sb.append("The following tests failed (with exit code):")
for test_info in itertools.chain(report.get_failed(), report.get_interrupted()):
sb.append(" %s (%d)" % (test_info.test_id, test_info.return_code))
if report.num_errored > 0:
sb.append("The following tests had errors:")
for test_info in report.get_errored():
sb.append(" %s" % (test_info.test_id))
return summary
@staticmethod
def log_summaries(logger, suites, time_taken):
"""Log summary of all suites."""
sb = []
sb.append("Summary of all suites: %d suites ran in %0.2f seconds" % (len(suites),
time_taken))
for suite in suites:
suite_sb = []
suite.summarize(suite_sb)
sb.append(" %s: %s" % (suite.get_display_name(), "\n ".join(suite_sb)))
logger.info("=" * 80)
logger.info("\n".join(sb))
|
the-stack_0_16871 | from .Apps import all_apps, App
from .Projects import all_projects
from .Projects import Projects
from .Pods import all_pods
from .utils.oc import oc
import random
import logging
import time
class Task:
def __init__(self,config,task):
self.config = config
self.task = task
self.templates = config["appTemplates"]
self.logger = logging.getLogger('reliability')
random.seed()
def execute(self):
all_apps.init()
all_projects.init()
# all_projects = Projects()
# all_projects.projects = 2
# all_projects.max_projects = 9
# all_projects.projects = {'cakephp-mysql-example-0': {"app": None, "name": 'cakephp-mysql-example-0'}, 'nodejs-mongodb-example-1':{"app": None, "name": 'nodejs-mongodb-example-1'}}
all_pods.init()
resource = self.task["resource"]
action = self.task["action"]
if resource == "projects":
if action == "create":
self.logger.debug("create projects")
quantity = self.task["quantity"]
for i in range(0, quantity):
project_base_name = random.choice(self.templates)["template"]
new_project = all_projects.add(project_base_name)
if new_project != None:
app = App(project_base_name, new_project.name, project_base_name, project_base_name)
new_project.app = app
all_apps.add(app)
elif action == "delete":
self.logger.debug("delete projects")
projects = list(all_projects.projects.keys())
project_to_delete = random.choice(projects)
all_projects.delete(project_to_delete)
elif action == "check":
self.logger.debug("check projects")
all_projects.check_projects()
elif action == "modify":
for project_key in all_projects.projects.keys():
all_projects.projects[project_key].modify()
elif resource == "apps":
if action == "build":
self.logger.debug("Build apps")
if len(all_apps.apps) > 0:
apps = list(all_apps.apps.keys())
app_to_build_key = random.choice(apps)
app_to_build = all_apps.apps[app_to_build_key]
app_to_build.build()
elif action == "scaleUp":
self.logger.debug("ScaleUp apps")
all_apps.init()
for app_key in all_apps.apps.keys():
all_apps.apps[app_key].scale_up()
elif action =="scaleDown":
self.logger.debug("ScaleDown apps")
for app_key in all_apps.apps.keys():
all_apps.apps[app_key].scale_down()
time.sleep(30)
elif action == "visit":
self.logger.debug("Visit Apps")
for app_key in all_apps.apps.keys():
all_apps.apps[app_key].visit()
elif resource == "pods":
if action == "check":
self.logger.debug("Check pods")
all_pods.check()
elif resource == "session" :
if action == "login":
result, rc = oc("login -u " + self.task["user"] + " -p " + self.task["password"])
if rc !=0 :
self.logger.error("Login failed")
|
the-stack_0_16872 | import contextlib
import sys
import os
import torch
import unittest
from torchvision import io
from torchvision.datasets.video_utils import VideoClips, unfold
from common_utils import get_tmp_dir
@contextlib.contextmanager
def get_list_of_videos(num_videos=5, sizes=None, fps=None):
with get_tmp_dir() as tmp_dir:
names = []
for i in range(num_videos):
if sizes is None:
size = 5 * (i + 1)
else:
size = sizes[i]
if fps is None:
f = 5
else:
f = fps[i]
data = torch.randint(0, 255, (size, 300, 400, 3), dtype=torch.uint8)
name = os.path.join(tmp_dir, "{}.mp4".format(i))
names.append(name)
io.write_video(name, data, fps=f)
yield names
class Tester(unittest.TestCase):
def test_unfold(self):
a = torch.arange(7)
r = unfold(a, 3, 3, 1)
expected = torch.tensor([
[0, 1, 2],
[3, 4, 5],
])
self.assertTrue(r.equal(expected))
r = unfold(a, 3, 2, 1)
expected = torch.tensor([
[0, 1, 2],
[2, 3, 4],
[4, 5, 6]
])
self.assertTrue(r.equal(expected))
r = unfold(a, 3, 2, 2)
expected = torch.tensor([
[0, 2, 4],
[2, 4, 6],
])
self.assertTrue(r.equal(expected))
@unittest.skipIf(not io.video._av_available(), "this test requires av")
def test_video_clips(self):
with get_list_of_videos(num_videos=3) as video_list:
video_clips = VideoClips(video_list, 5, 5, num_workers=2)
self.assertEqual(video_clips.num_clips(), 1 + 2 + 3)
for i, (v_idx, c_idx) in enumerate([(0, 0), (1, 0), (1, 1), (2, 0), (2, 1), (2, 2)]):
video_idx, clip_idx = video_clips.get_clip_location(i)
self.assertEqual(video_idx, v_idx)
self.assertEqual(clip_idx, c_idx)
video_clips = VideoClips(video_list, 6, 6)
self.assertEqual(video_clips.num_clips(), 0 + 1 + 2)
for i, (v_idx, c_idx) in enumerate([(1, 0), (2, 0), (2, 1)]):
video_idx, clip_idx = video_clips.get_clip_location(i)
self.assertEqual(video_idx, v_idx)
self.assertEqual(clip_idx, c_idx)
video_clips = VideoClips(video_list, 6, 1)
self.assertEqual(video_clips.num_clips(), 0 + (10 - 6 + 1) + (15 - 6 + 1))
for i, v_idx, c_idx in [(0, 1, 0), (4, 1, 4), (5, 2, 0), (6, 2, 1)]:
video_idx, clip_idx = video_clips.get_clip_location(i)
self.assertEqual(video_idx, v_idx)
self.assertEqual(clip_idx, c_idx)
@unittest.skipIf(not io.video._av_available(), "this test requires av")
def test_video_clips_custom_fps(self):
with get_list_of_videos(num_videos=3, sizes=[12, 12, 12], fps=[3, 4, 6]) as video_list:
num_frames = 4
for fps in [1, 3, 4, 10]:
video_clips = VideoClips(video_list, num_frames, num_frames, fps, num_workers=2)
for i in range(video_clips.num_clips()):
video, audio, info, video_idx = video_clips.get_clip(i)
self.assertEqual(video.shape[0], num_frames)
self.assertEqual(info["video_fps"], fps)
self.assertEqual(info, {"video_fps": fps})
# TODO add tests checking that the content is right
def test_compute_clips_for_video(self):
video_pts = torch.arange(30)
# case 1: single clip
num_frames = 13
orig_fps = 30
duration = float(len(video_pts)) / orig_fps
new_fps = 13
clips, idxs = VideoClips.compute_clips_for_video(video_pts, num_frames, num_frames,
orig_fps, new_fps)
resampled_idxs = VideoClips._resample_video_idx(int(duration * new_fps), orig_fps, new_fps)
self.assertEqual(len(clips), 1)
self.assertTrue(clips.equal(idxs))
self.assertTrue(idxs[0].equal(resampled_idxs))
# case 2: all frames appear only once
num_frames = 4
orig_fps = 30
duration = float(len(video_pts)) / orig_fps
new_fps = 12
clips, idxs = VideoClips.compute_clips_for_video(video_pts, num_frames, num_frames,
orig_fps, new_fps)
resampled_idxs = VideoClips._resample_video_idx(int(duration * new_fps), orig_fps, new_fps)
self.assertEqual(len(clips), 3)
self.assertTrue(clips.equal(idxs))
self.assertTrue(idxs.flatten().equal(resampled_idxs))
if __name__ == '__main__':
unittest.main()
|
the-stack_0_16873 | # Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to generate sample bazelrc file."""
import os
from string import Template
from util import get_git_root
GIT_ROOT = get_git_root()
BAZELRC_DIR = os.path.join(GIT_ROOT, "bazelrc")
LATEST_BAZELRC_LINK = BAZELRC = os.path.join(BAZELRC_DIR, "latest.bazelrc")
LICENCE_TPL = os.path.join(GIT_ROOT, "release", "license.tpl")
BAZELRC_TPL = os.path.join(GIT_ROOT, "release", "bazelrc.tpl")
def create_bazelrc_and_update_link(bazel_version):
"""Creates new sample .bazelrc file and update latest.bazelrc symlink.
Example bazelrc files can be found in directory bazelrc/.
There is one sample bazelrc file Bazel version. bazelrc/latest.bazelrc should
always be symlinked to the .bazelrc file for the latest version of Bazel.
If the file already exists in this repo, the script will delete it and
generate new one.
Args:
bazel_version: string, the version of Bazel used to generate the configs.
"""
bazelrc_path = os.path.join(
BAZELRC_DIR, "bazel-{version}.bazelrc".format(version=bazel_version))
# Remove old version of this .bazelrc file.
if os.path.exists(bazelrc_path):
os.remove(bazelrc_path)
with open(bazelrc_path, "w") as bazelrc_file:
# Write license header.
with open(LICENCE_TPL, "r") as license_header:
bazelrc_file.write(license_header.read())
# Write sample .bazelrc body.
with open(BAZELRC_TPL, "r") as tpl_file:
tpl = Template(tpl_file.read()).substitute(BAZEL_VERSION=bazel_version)
bazelrc_file.write(tpl)
# Update latest.bazelrc link
if os.path.exists(LATEST_BAZELRC_LINK):
os.remove(LATEST_BAZELRC_LINK)
os.symlink(os.path.basename(bazelrc_path), LATEST_BAZELRC_LINK)
|
the-stack_0_16874 | # -*- coding: utf-8 -*-
"""Tools for inspecting Python objects.
Uses syntax highlighting for presenting the various information elements.
Similar in spirit to the inspect module, but all calls take a name argument to
reference the name under which an object is being read.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
__all__ = ['Inspector','InspectColors']
# stdlib modules
import ast
import inspect
from inspect import signature
import linecache
import warnings
import os
from textwrap import dedent
import types
import io as stdlib_io
from typing import Union
# IPython's own
from IPython.core import page
from IPython.lib.pretty import pretty
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils import PyColorize
from IPython.utils import openpy
from IPython.utils import py3compat
from IPython.utils.dir2 import safe_hasattr
from IPython.utils.path import compress_user
from IPython.utils.text import indent
from IPython.utils.wildcard import list_namespace
from IPython.utils.wildcard import typestr2type
from IPython.utils.coloransi import TermColors, ColorScheme, ColorSchemeTable
from IPython.utils.py3compat import cast_unicode
from IPython.utils.colorable import Colorable
from IPython.utils.decorators import undoc
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
def pylight(code):
return highlight(code, PythonLexer(), HtmlFormatter(noclasses=True))
# builtin docstrings to ignore
_func_call_docstring = types.FunctionType.__call__.__doc__
_object_init_docstring = object.__init__.__doc__
_builtin_type_docstrings = {
inspect.getdoc(t) for t in (types.ModuleType, types.MethodType,
types.FunctionType, property)
}
_builtin_func_type = type(all)
_builtin_meth_type = type(str.upper) # Bound methods have the same type as builtin functions
#****************************************************************************
# Builtin color schemes
Colors = TermColors # just a shorthand
InspectColors = PyColorize.ANSICodeColors
#****************************************************************************
# Auxiliary functions and objects
# See the messaging spec for the definition of all these fields. This list
# effectively defines the order of display
info_fields = ['type_name', 'base_class', 'string_form', 'namespace',
'length', 'file', 'definition', 'docstring', 'source',
'init_definition', 'class_docstring', 'init_docstring',
'call_def', 'call_docstring',
# These won't be printed but will be used to determine how to
# format the object
'ismagic', 'isalias', 'isclass', 'found', 'name'
]
def object_info(**kw):
"""Make an object info dict with all fields present."""
infodict = {k:None for k in info_fields}
infodict.update(kw)
return infodict
def get_encoding(obj):
"""Get encoding for python source file defining obj
Returns None if obj is not defined in a sourcefile.
"""
ofile = find_file(obj)
# run contents of file through pager starting at line where the object
# is defined, as long as the file isn't binary and is actually on the
# filesystem.
if ofile is None:
return None
elif ofile.endswith(('.so', '.dll', '.pyd')):
return None
elif not os.path.isfile(ofile):
return None
else:
# Print only text files, not extension binaries. Note that
# getsourcelines returns lineno with 1-offset and page() uses
# 0-offset, so we must adjust.
with stdlib_io.open(ofile, 'rb') as buffer: # Tweaked to use io.open for Python 2
encoding, lines = openpy.detect_encoding(buffer.readline)
return encoding
def getdoc(obj) -> Union[str,None]:
"""Stable wrapper around inspect.getdoc.
This can't crash because of attribute problems.
It also attempts to call a getdoc() method on the given object. This
allows objects which provide their docstrings via non-standard mechanisms
(like Pyro proxies) to still be inspected by ipython's ? system.
"""
# Allow objects to offer customized documentation via a getdoc method:
try:
ds = obj.getdoc()
except Exception:
pass
else:
if isinstance(ds, str):
return inspect.cleandoc(ds)
docstr = inspect.getdoc(obj)
return docstr
def getsource(obj, oname='') -> Union[str,None]:
"""Wrapper around inspect.getsource.
This can be modified by other projects to provide customized source
extraction.
Parameters
----------
obj : object
an object whose source code we will attempt to extract
oname : str
(optional) a name under which the object is known
Returns
-------
src : unicode or None
"""
if isinstance(obj, property):
sources = []
for attrname in ['fget', 'fset', 'fdel']:
fn = getattr(obj, attrname)
if fn is not None:
encoding = get_encoding(fn)
oname_prefix = ('%s.' % oname) if oname else ''
sources.append(''.join(('# ', oname_prefix, attrname)))
if inspect.isfunction(fn):
sources.append(dedent(getsource(fn)))
else:
# Default str/repr only prints function name,
# pretty.pretty prints module name too.
sources.append(
'%s%s = %s\n' % (oname_prefix, attrname, pretty(fn))
)
if sources:
return '\n'.join(sources)
else:
return None
else:
# Get source for non-property objects.
obj = _get_wrapped(obj)
try:
src = inspect.getsource(obj)
except TypeError:
# The object itself provided no meaningful source, try looking for
# its class definition instead.
try:
src = inspect.getsource(obj.__class__)
except (OSError, TypeError):
return None
except OSError:
return None
return src
def is_simple_callable(obj):
"""True if obj is a function ()"""
return (inspect.isfunction(obj) or inspect.ismethod(obj) or \
isinstance(obj, _builtin_func_type) or isinstance(obj, _builtin_meth_type))
@undoc
def getargspec(obj):
"""Wrapper around :func:`inspect.getfullargspec`
In addition to functions and methods, this can also handle objects with a
``__call__`` attribute.
DEPRECATED: Deprecated since 7.10. Do not use, will be removed.
"""
warnings.warn('`getargspec` function is deprecated as of IPython 7.10'
'and will be removed in future versions.', DeprecationWarning, stacklevel=2)
if safe_hasattr(obj, '__call__') and not is_simple_callable(obj):
obj = obj.__call__
return inspect.getfullargspec(obj)
@undoc
def format_argspec(argspec):
"""Format argspect, convenience wrapper around inspect's.
This takes a dict instead of ordered arguments and calls
inspect.format_argspec with the arguments in the necessary order.
DEPRECATED (since 7.10): Do not use; will be removed in future versions.
"""
warnings.warn('`format_argspec` function is deprecated as of IPython 7.10'
'and will be removed in future versions.', DeprecationWarning, stacklevel=2)
return inspect.formatargspec(argspec['args'], argspec['varargs'],
argspec['varkw'], argspec['defaults'])
@undoc
def call_tip(oinfo, format_call=True):
"""DEPRECATED since 6.0. Extract call tip data from an oinfo dict."""
warnings.warn(
"`call_tip` function is deprecated as of IPython 6.0"
"and will be removed in future versions.",
DeprecationWarning,
stacklevel=2,
)
# Get call definition
argspec = oinfo.get('argspec')
if argspec is None:
call_line = None
else:
# Callable objects will have 'self' as their first argument, prune
# it out if it's there for clarity (since users do *not* pass an
# extra first argument explicitly).
try:
has_self = argspec['args'][0] == 'self'
except (KeyError, IndexError):
pass
else:
if has_self:
argspec['args'] = argspec['args'][1:]
call_line = oinfo['name']+format_argspec(argspec)
# Now get docstring.
# The priority is: call docstring, constructor docstring, main one.
doc = oinfo.get('call_docstring')
if doc is None:
doc = oinfo.get('init_docstring')
if doc is None:
doc = oinfo.get('docstring','')
return call_line, doc
def _get_wrapped(obj):
"""Get the original object if wrapped in one or more @decorators
Some objects automatically construct similar objects on any unrecognised
attribute access (e.g. unittest.mock.call). To protect against infinite loops,
this will arbitrarily cut off after 100 levels of obj.__wrapped__
attribute access. --TK, Jan 2016
"""
orig_obj = obj
i = 0
while safe_hasattr(obj, '__wrapped__'):
obj = obj.__wrapped__
i += 1
if i > 100:
# __wrapped__ is probably a lie, so return the thing we started with
return orig_obj
return obj
def find_file(obj) -> str:
"""Find the absolute path to the file where an object was defined.
This is essentially a robust wrapper around `inspect.getabsfile`.
Returns None if no file can be found.
Parameters
----------
obj : any Python object
Returns
-------
fname : str
The absolute path to the file where the object was defined.
"""
obj = _get_wrapped(obj)
fname = None
try:
fname = inspect.getabsfile(obj)
except TypeError:
# For an instance, the file that matters is where its class was
# declared.
try:
fname = inspect.getabsfile(obj.__class__)
except (OSError, TypeError):
# Can happen for builtins
pass
except OSError:
pass
return cast_unicode(fname)
def find_source_lines(obj):
"""Find the line number in a file where an object was defined.
This is essentially a robust wrapper around `inspect.getsourcelines`.
Returns None if no file can be found.
Parameters
----------
obj : any Python object
Returns
-------
lineno : int
The line number where the object definition starts.
"""
obj = _get_wrapped(obj)
try:
lineno = inspect.getsourcelines(obj)[1]
except TypeError:
# For instances, try the class object like getsource() does
try:
lineno = inspect.getsourcelines(obj.__class__)[1]
except (OSError, TypeError):
return None
except OSError:
return None
return lineno
class Inspector(Colorable):
def __init__(self, color_table=InspectColors,
code_color_table=PyColorize.ANSICodeColors,
scheme=None,
str_detail_level=0,
parent=None, config=None):
super(Inspector, self).__init__(parent=parent, config=config)
self.color_table = color_table
self.parser = PyColorize.Parser(out='str', parent=self, style=scheme)
self.format = self.parser.format
self.str_detail_level = str_detail_level
self.set_active_scheme(scheme)
def _getdef(self,obj,oname='') -> Union[str,None]:
"""Return the call signature for any callable object.
If any exception is generated, None is returned instead and the
exception is suppressed."""
try:
return _render_signature(signature(obj), oname)
except:
return None
def __head(self,h) -> str:
"""Return a header string with proper colors."""
return '%s%s%s' % (self.color_table.active_colors.header,h,
self.color_table.active_colors.normal)
def set_active_scheme(self, scheme):
if scheme is not None:
self.color_table.set_active_scheme(scheme)
self.parser.color_table.set_active_scheme(scheme)
def noinfo(self, msg, oname):
"""Generic message when no information is found."""
print('No %s found' % msg, end=' ')
if oname:
print('for %s' % oname)
else:
print()
def pdef(self, obj, oname=''):
"""Print the call signature for any callable object.
If the object is a class, print the constructor information."""
if not callable(obj):
print('Object is not callable.')
return
header = ''
if inspect.isclass(obj):
header = self.__head('Class constructor information:\n')
output = self._getdef(obj,oname)
if output is None:
self.noinfo('definition header',oname)
else:
print(header,self.format(output), end=' ')
# In Python 3, all classes are new-style, so they all have __init__.
@skip_doctest
def pdoc(self, obj, oname='', formatter=None):
"""Print the docstring for any object.
Optional:
-formatter: a function to run the docstring through for specially
formatted docstrings.
Examples
--------
In [1]: class NoInit:
...: pass
In [2]: class NoDoc:
...: def __init__(self):
...: pass
In [3]: %pdoc NoDoc
No documentation found for NoDoc
In [4]: %pdoc NoInit
No documentation found for NoInit
In [5]: obj = NoInit()
In [6]: %pdoc obj
No documentation found for obj
In [5]: obj2 = NoDoc()
In [6]: %pdoc obj2
No documentation found for obj2
"""
head = self.__head # For convenience
lines = []
ds = getdoc(obj)
if formatter:
ds = formatter(ds).get('plain/text', ds)
if ds:
lines.append(head("Class docstring:"))
lines.append(indent(ds))
if inspect.isclass(obj) and hasattr(obj, '__init__'):
init_ds = getdoc(obj.__init__)
if init_ds is not None:
lines.append(head("Init docstring:"))
lines.append(indent(init_ds))
elif hasattr(obj,'__call__'):
call_ds = getdoc(obj.__call__)
if call_ds:
lines.append(head("Call docstring:"))
lines.append(indent(call_ds))
if not lines:
self.noinfo('documentation',oname)
else:
page.page('\n'.join(lines))
def psource(self, obj, oname=''):
"""Print the source code for an object."""
# Flush the source cache because inspect can return out-of-date source
linecache.checkcache()
try:
src = getsource(obj, oname=oname)
except Exception:
src = None
if src is None:
self.noinfo('source', oname)
else:
page.page(self.format(src))
def pfile(self, obj, oname=''):
"""Show the whole file where an object was defined."""
lineno = find_source_lines(obj)
if lineno is None:
self.noinfo('file', oname)
return
ofile = find_file(obj)
# run contents of file through pager starting at line where the object
# is defined, as long as the file isn't binary and is actually on the
# filesystem.
if ofile.endswith(('.so', '.dll', '.pyd')):
print('File %r is binary, not printing.' % ofile)
elif not os.path.isfile(ofile):
print('File %r does not exist, not printing.' % ofile)
else:
# Print only text files, not extension binaries. Note that
# getsourcelines returns lineno with 1-offset and page() uses
# 0-offset, so we must adjust.
page.page(self.format(openpy.read_py_file(ofile, skip_encoding_cookie=False)), lineno - 1)
def _mime_format(self, text:str, formatter=None) -> dict:
"""Return a mime bundle representation of the input text.
- if `formatter` is None, the returned mime bundle has
a ``text/plain`` field, with the input text.
a ``text/html`` field with a ``<pre>`` tag containing the input text.
- if ``formatter`` is not None, it must be a callable transforming the
input text into a mime bundle. Default values for ``text/plain`` and
``text/html`` representations are the ones described above.
Note:
Formatters returning strings are supported but this behavior is deprecated.
"""
defaults = {
'text/plain': text,
'text/html': '<pre>' + text + '</pre>'
}
if formatter is None:
return defaults
else:
formatted = formatter(text)
if not isinstance(formatted, dict):
# Handle the deprecated behavior of a formatter returning
# a string instead of a mime bundle.
return {
'text/plain': formatted,
'text/html': '<pre>' + formatted + '</pre>'
}
else:
return dict(defaults, **formatted)
def format_mime(self, bundle):
text_plain = bundle['text/plain']
text = ''
heads, bodies = list(zip(*text_plain))
_len = max(len(h) for h in heads)
for head, body in zip(heads, bodies):
body = body.strip('\n')
delim = '\n' if '\n' in body else ' '
text += self.__head(head+':') + (_len - len(head))*' ' +delim + body +'\n'
bundle['text/plain'] = text
return bundle
def _get_info(
self, obj, oname="", formatter=None, info=None, detail_level=0, omit_sections=()
):
"""Retrieve an info dict and format it.
Parameters
----------
obj : any
Object to inspect and return info from
oname : str (default: ''):
Name of the variable pointing to `obj`.
formatter : callable
info
already computed information
detail_level : integer
Granularity of detail level, if set to 1, give more information.
omit_sections : container[str]
Titles or keys to omit from output (can be set, tuple, etc., anything supporting `in`)
"""
info = self.info(obj, oname=oname, info=info, detail_level=detail_level)
_mime = {
'text/plain': [],
'text/html': '',
}
def append_field(bundle, title:str, key:str, formatter=None):
if title in omit_sections or key in omit_sections:
return
field = info[key]
if field is not None:
formatted_field = self._mime_format(field, formatter)
bundle['text/plain'].append((title, formatted_field['text/plain']))
bundle['text/html'] += '<h1>' + title + '</h1>\n' + formatted_field['text/html'] + '\n'
def code_formatter(text):
return {
'text/plain': self.format(text),
'text/html': pylight(text)
}
if info['isalias']:
append_field(_mime, 'Repr', 'string_form')
elif info['ismagic']:
if detail_level > 0:
append_field(_mime, 'Source', 'source', code_formatter)
else:
append_field(_mime, 'Docstring', 'docstring', formatter)
append_field(_mime, 'File', 'file')
elif info['isclass'] or is_simple_callable(obj):
# Functions, methods, classes
append_field(_mime, 'Signature', 'definition', code_formatter)
append_field(_mime, 'Init signature', 'init_definition', code_formatter)
append_field(_mime, 'Docstring', 'docstring', formatter)
if detail_level > 0 and info['source']:
append_field(_mime, 'Source', 'source', code_formatter)
else:
append_field(_mime, 'Init docstring', 'init_docstring', formatter)
append_field(_mime, 'File', 'file')
append_field(_mime, 'Type', 'type_name')
append_field(_mime, 'Subclasses', 'subclasses')
else:
# General Python objects
append_field(_mime, 'Signature', 'definition', code_formatter)
append_field(_mime, 'Call signature', 'call_def', code_formatter)
append_field(_mime, 'Type', 'type_name')
append_field(_mime, 'String form', 'string_form')
# Namespace
if info['namespace'] != 'Interactive':
append_field(_mime, 'Namespace', 'namespace')
append_field(_mime, 'Length', 'length')
append_field(_mime, 'File', 'file')
# Source or docstring, depending on detail level and whether
# source found.
if detail_level > 0 and info['source']:
append_field(_mime, 'Source', 'source', code_formatter)
else:
append_field(_mime, 'Docstring', 'docstring', formatter)
append_field(_mime, 'Class docstring', 'class_docstring', formatter)
append_field(_mime, 'Init docstring', 'init_docstring', formatter)
append_field(_mime, 'Call docstring', 'call_docstring', formatter)
return self.format_mime(_mime)
def pinfo(
self,
obj,
oname="",
formatter=None,
info=None,
detail_level=0,
enable_html_pager=True,
omit_sections=(),
):
"""Show detailed information about an object.
Optional arguments:
- oname: name of the variable pointing to the object.
- formatter: callable (optional)
A special formatter for docstrings.
The formatter is a callable that takes a string as an input
and returns either a formatted string or a mime type bundle
in the form of a dictionary.
Although the support of custom formatter returning a string
instead of a mime type bundle is deprecated.
- info: a structure with some information fields which may have been
precomputed already.
- detail_level: if set to 1, more information is given.
- omit_sections: set of section keys and titles to omit
"""
info = self._get_info(
obj, oname, formatter, info, detail_level, omit_sections=omit_sections
)
if not enable_html_pager:
del info['text/html']
page.page(info)
def _info(self, obj, oname="", info=None, detail_level=0):
"""
Inspector.info() was likely improperly marked as deprecated
while only a parameter was deprecated. We "un-deprecate" it.
"""
warnings.warn(
"The `Inspector.info()` method has been un-deprecated as of 8.0 "
"and the `formatter=` keyword removed. `Inspector._info` is now "
"an alias, and you can just call `.info()` directly.",
DeprecationWarning,
stacklevel=2,
)
return self.info(obj, oname=oname, info=info, detail_level=detail_level)
def info(self, obj, oname="", info=None, detail_level=0) -> dict:
"""Compute a dict with detailed information about an object.
Parameters
----------
obj : any
An object to find information about
oname : str (default: '')
Name of the variable pointing to `obj`.
info : (default: None)
A struct (dict like with attr access) with some information fields
which may have been precomputed already.
detail_level : int (default:0)
If set to 1, more information is given.
Returns
-------
An object info dict with known fields from `info_fields`. Keys are
strings, values are string or None.
"""
if info is None:
ismagic = False
isalias = False
ospace = ''
else:
ismagic = info.ismagic
isalias = info.isalias
ospace = info.namespace
# Get docstring, special-casing aliases:
if isalias:
if not callable(obj):
try:
ds = "Alias to the system command:\n %s" % obj[1]
except:
ds = "Alias: " + str(obj)
else:
ds = "Alias to " + str(obj)
if obj.__doc__:
ds += "\nDocstring:\n" + obj.__doc__
else:
ds = getdoc(obj)
if ds is None:
ds = '<no docstring>'
# store output in a dict, we initialize it here and fill it as we go
out = dict(name=oname, found=True, isalias=isalias, ismagic=ismagic, subclasses=None)
string_max = 200 # max size of strings to show (snipped if longer)
shalf = int((string_max - 5) / 2)
if ismagic:
out['type_name'] = 'Magic function'
elif isalias:
out['type_name'] = 'System alias'
else:
out['type_name'] = type(obj).__name__
try:
bclass = obj.__class__
out['base_class'] = str(bclass)
except:
pass
# String form, but snip if too long in ? form (full in ??)
if detail_level >= self.str_detail_level:
try:
ostr = str(obj)
str_head = 'string_form'
if not detail_level and len(ostr)>string_max:
ostr = ostr[:shalf] + ' <...> ' + ostr[-shalf:]
ostr = ("\n" + " " * len(str_head.expandtabs())).\
join(q.strip() for q in ostr.split("\n"))
out[str_head] = ostr
except:
pass
if ospace:
out['namespace'] = ospace
# Length (for strings and lists)
try:
out['length'] = str(len(obj))
except Exception:
pass
# Filename where object was defined
binary_file = False
fname = find_file(obj)
if fname is None:
# if anything goes wrong, we don't want to show source, so it's as
# if the file was binary
binary_file = True
else:
if fname.endswith(('.so', '.dll', '.pyd')):
binary_file = True
elif fname.endswith('<string>'):
fname = 'Dynamically generated function. No source code available.'
out['file'] = compress_user(fname)
# Original source code for a callable, class or property.
if detail_level:
# Flush the source cache because inspect can return out-of-date
# source
linecache.checkcache()
try:
if isinstance(obj, property) or not binary_file:
src = getsource(obj, oname)
if src is not None:
src = src.rstrip()
out['source'] = src
except Exception:
pass
# Add docstring only if no source is to be shown (avoid repetitions).
if ds and not self._source_contains_docstring(out.get('source'), ds):
out['docstring'] = ds
# Constructor docstring for classes
if inspect.isclass(obj):
out['isclass'] = True
# get the init signature:
try:
init_def = self._getdef(obj, oname)
except AttributeError:
init_def = None
# get the __init__ docstring
try:
obj_init = obj.__init__
except AttributeError:
init_ds = None
else:
if init_def is None:
# Get signature from init if top-level sig failed.
# Can happen for built-in types (list, etc.).
try:
init_def = self._getdef(obj_init, oname)
except AttributeError:
pass
init_ds = getdoc(obj_init)
# Skip Python's auto-generated docstrings
if init_ds == _object_init_docstring:
init_ds = None
if init_def:
out['init_definition'] = init_def
if init_ds:
out['init_docstring'] = init_ds
names = [sub.__name__ for sub in type.__subclasses__(obj)]
if len(names) < 10:
all_names = ', '.join(names)
else:
all_names = ', '.join(names[:10]+['...'])
out['subclasses'] = all_names
# and class docstring for instances:
else:
# reconstruct the function definition and print it:
defln = self._getdef(obj, oname)
if defln:
out['definition'] = defln
# First, check whether the instance docstring is identical to the
# class one, and print it separately if they don't coincide. In
# most cases they will, but it's nice to print all the info for
# objects which use instance-customized docstrings.
if ds:
try:
cls = getattr(obj,'__class__')
except:
class_ds = None
else:
class_ds = getdoc(cls)
# Skip Python's auto-generated docstrings
if class_ds in _builtin_type_docstrings:
class_ds = None
if class_ds and ds != class_ds:
out['class_docstring'] = class_ds
# Next, try to show constructor docstrings
try:
init_ds = getdoc(obj.__init__)
# Skip Python's auto-generated docstrings
if init_ds == _object_init_docstring:
init_ds = None
except AttributeError:
init_ds = None
if init_ds:
out['init_docstring'] = init_ds
# Call form docstring for callable instances
if safe_hasattr(obj, '__call__') and not is_simple_callable(obj):
call_def = self._getdef(obj.__call__, oname)
if call_def and (call_def != out.get('definition')):
# it may never be the case that call def and definition differ,
# but don't include the same signature twice
out['call_def'] = call_def
call_ds = getdoc(obj.__call__)
# Skip Python's auto-generated docstrings
if call_ds == _func_call_docstring:
call_ds = None
if call_ds:
out['call_docstring'] = call_ds
return object_info(**out)
@staticmethod
def _source_contains_docstring(src, doc):
"""
Check whether the source *src* contains the docstring *doc*.
This is is helper function to skip displaying the docstring if the
source already contains it, avoiding repetition of information.
"""
try:
def_node, = ast.parse(dedent(src)).body
return ast.get_docstring(def_node) == doc
except Exception:
# The source can become invalid or even non-existent (because it
# is re-fetched from the source file) so the above code fail in
# arbitrary ways.
return False
def psearch(self,pattern,ns_table,ns_search=[],
ignore_case=False,show_all=False, *, list_types=False):
"""Search namespaces with wildcards for objects.
Arguments:
- pattern: string containing shell-like wildcards to use in namespace
searches and optionally a type specification to narrow the search to
objects of that type.
- ns_table: dict of name->namespaces for search.
Optional arguments:
- ns_search: list of namespace names to include in search.
- ignore_case(False): make the search case-insensitive.
- show_all(False): show all names, including those starting with
underscores.
- list_types(False): list all available object types for object matching.
"""
#print 'ps pattern:<%r>' % pattern # dbg
# defaults
type_pattern = 'all'
filter = ''
# list all object types
if list_types:
page.page('\n'.join(sorted(typestr2type)))
return
cmds = pattern.split()
len_cmds = len(cmds)
if len_cmds == 1:
# Only filter pattern given
filter = cmds[0]
elif len_cmds == 2:
# Both filter and type specified
filter,type_pattern = cmds
else:
raise ValueError('invalid argument string for psearch: <%s>' %
pattern)
# filter search namespaces
for name in ns_search:
if name not in ns_table:
raise ValueError('invalid namespace <%s>. Valid names: %s' %
(name,ns_table.keys()))
#print 'type_pattern:',type_pattern # dbg
search_result, namespaces_seen = set(), set()
for ns_name in ns_search:
ns = ns_table[ns_name]
# Normally, locals and globals are the same, so we just check one.
if id(ns) in namespaces_seen:
continue
namespaces_seen.add(id(ns))
tmp_res = list_namespace(ns, type_pattern, filter,
ignore_case=ignore_case, show_all=show_all)
search_result.update(tmp_res)
page.page('\n'.join(sorted(search_result)))
def _render_signature(obj_signature, obj_name) -> str:
"""
This was mostly taken from inspect.Signature.__str__.
Look there for the comments.
The only change is to add linebreaks when this gets too long.
"""
result = []
pos_only = False
kw_only = True
for param in obj_signature.parameters.values():
if param.kind == inspect._POSITIONAL_ONLY:
pos_only = True
elif pos_only:
result.append('/')
pos_only = False
if param.kind == inspect._VAR_POSITIONAL:
kw_only = False
elif param.kind == inspect._KEYWORD_ONLY and kw_only:
result.append('*')
kw_only = False
result.append(str(param))
if pos_only:
result.append('/')
# add up name, parameters, braces (2), and commas
if len(obj_name) + sum(len(r) + 2 for r in result) > 75:
# This doesn’t fit behind “Signature: ” in an inspect window.
rendered = '{}(\n{})'.format(obj_name, ''.join(
' {},\n'.format(r) for r in result)
)
else:
rendered = '{}({})'.format(obj_name, ', '.join(result))
if obj_signature.return_annotation is not inspect._empty:
anno = inspect.formatannotation(obj_signature.return_annotation)
rendered += ' -> {}'.format(anno)
return rendered
|
the-stack_0_16875 | class Solution:
def minCostToSupplyWater(self, n: int, wells, pipes) -> int:
q = sorted([[w, u, v] for u, v, w in pipes] + [[w, 0, i+1] for i, w in enumerate(wells)])
uf = [i for i in range(n+1)]
res = count = 0
def find(x):
if (x != uf[x]):
uf[x] = find(uf[x])
return uf[x]
def union(x, y):
uf[x] = y
for w, u, v in q:
rA, rB = find(u), find(v)
if rA == rB:
continue
union(rA, rB)
res += w
count += 1
if count == n:
return res
return res |
the-stack_0_16876 | import attr, os
from pprint import PrettyPrinter
from mavetools.client.client import Client
from mavetools.models.experiment import Experiment
pp = PrettyPrinter(indent=2) # displayes results in readable format
# check environment variables and see if variable named MAVEDB_BASE_URL exists and return value
# if the value does not exist, an empty string is returned instead
base_url = os.getenv("MAVEDB_BASE_URL", "")
experiment_urn = "urn:mavedb:00000001-a" # the urn of the experiment we want to get
# Generate a new auth_token in your profile and post it here
auth_token = "AseyaNLLhqv9jAm0joMkq2oqB0bw3GKxTclkT2NtG340RF6CfdM2UC3j8Fv4RpbQ"
# auth_token =
# if the base url exists, the client object is instantiated with that value
# otherwise the client object is instantiated with default value which points to localhost
client = (
Client(base_url, auth_token=auth_token)
if base_url
else Client(auth_token=auth_token)
)
# using the client object, GET the model instance of an Experiment with a particular urn
# GET retrieves a resource from the server via the appropriate API endpoint
experiment = client.get_model_instance(Experiment, experiment_urn)
# display results
pp.pprint(attr.asdict(experiment))
|
the-stack_0_16877 | from input import parse
from word2vec1 import word2vec, dictionaries
from collections import namedtuple,OrderedDict
import numpy as np
import json
import gensim
import copy
import logging
def training(fn, wordvecpath):
if not wordvecpath:
word2vec(fn)
wordvecpath = './tmpdata/vecs.bin'
ndeprel = dictionaries(fn)
X_lengths = np.array([])
Arcs = namedtuple('Arcs', ['headid', 'headform', 'tailid', 'tailform', 'deprel'])
Transition = namedtuple('Transition', ['transition', 'label'])
with open('./tmpdata/deprel.json', 'r') as fp:
dictionary2 = json.load(fp)
f = open(fn, 'r')
data = f.read()
mode = gensim.models.Word2Vec.load(wordvecpath)
model = mode.wv
vecdims = mode.layer1_size
vecdims = vecdims+11+2+2
del mode
Y2 = np.zeros([1, 4+ndeprel])
X2 = np.zeros([1, vecdims*5+4])
sid=0
buffer1 = []
stack = []
arcs = []
listofTransitions = []
for sent in parse(data):
del buffer1[:]
del stack[:]
del arcs[:]
buffer1 = copy.deepcopy(sent)
buffer1.append(OrderedDict(
[("id", 0), ("form", 'root'), ("lemma", 'root'), ("upostag", 'root'), ("xpostag", 'root'), ("feats", 'root'), ("head", -1),
("deprel", 'root'), ("deps", 'root'), ("misc", 'root'), ]))
flag=True
for word in sent:
if not pcheck(word['id'],word['head'],sent):
del buffer1[:]
flag=False
break
i=0
while buffer1:
transi, label = oracle(stack, buffer1, arcs)
trans = Transition(transi, label)
i+=1
X,t = nn(stack, buffer1, trans, dictionary2, model, sent, arcs, vecdims, ndeprel)
X2 = np.vstack((X2,X))
Y2 = np.vstack((Y2,t))
if trans.transition == 0: # SHIFT
stack.insert(0, buffer1[0])
del buffer1[0]
listofTransitions.append(trans.transition)
elif trans.transition == 1: # REDUCE
del stack[0]
listofTransitions.append(trans.transition)
elif trans.transition == 2: # LERFT ARC
arcs.append(Arcs(buffer1[0]['id'], buffer1[0]['form'], stack[0]['id'], stack[0]['form'], trans.label))
del stack[0]
listofTransitions.append(trans.transition)
elif trans.transition == 3: # RIGHT ARC
arcs.append(Arcs(stack[0]['id'], stack[0]['form'], buffer1[0]['id'], buffer1[0]['form'], trans.label))
stack.insert(0, buffer1[0])
del buffer1[0]
listofTransitions.append(trans.transition)
if flag : X_lengths = np.append(X_lengths, i)
sid+=1
logging.info ('vectorising sentence : '+str(sid))
X2 = np.delete(X2, 0, axis=0)
Y2 = np.delete(Y2, 0, axis=0)
return X2,Y2,X_lengths
def oracle(stack, buffer1, arcs):
global i
if not stack:
return 0, ""
if not buffer1[0] :
del buffer1[:]
i-=1
return 1, ""
s0id = stack[0]['id']
s0head = stack[0]['head']
b0id = buffer1[0]['id']
b0head = buffer1[0]['head']
if b0id == s0head:
return 2, stack[0]['deprel']
elif s0id == b0head:
return 3, buffer1[0]['deprel']
elif head(stack[0], arcs) != -1 and b0head<s0head :
return 1, ""
return 0, ""
def head(stackc, arcs):
for a in arcs:
if a.headid == stackc['head']:
return a.headid
return -1
def nn(stack, buffer1, trans, dictionary2, model, sent, arcs, vecdims, ndeprel):
mones = [-1] * vecdims
ones = [1] * (vecdims-4)
zeros = [0] * (vecdims-15)
dep = [-1]*4
sentenc = np.array([])
words=["_","_","_","_","_"]
if stack:
words.pop(0)
words.insert(0,stack[0])
dep[0] = iofdeprel(rightchild(stack[0], arcs))
dep[1] = iofdeprel(leftchild(stack[0], arcs))
if len(stack) > 1:
words.pop(1)
words.insert(1,stack[1])
if buffer1:
words.pop(2)
words.insert(2,buffer1[0])
dep[2] = iofdeprel(rightchild(buffer1[0], arcs))
dep[3] = iofdeprel(leftchild(buffer1[0], arcs))
if len(buffer1) > 1:
words.pop(3)
words.insert(3,buffer1[1])
if len(buffer1) > 2:
words.pop(4)
words.insert(4, buffer1[2])
for w in words:
if w == '_':
sentenc = np.hstack((sentenc, mones))
elif w['form'] == 'root':
sentenc = np.hstack((sentenc, ones, D(w['upostag'], dictionary2), D(w['xpostag'], dictionary2), w['id'], len(sent)))
elif w['form'] in model.vocab:
sentenc = np.hstack((sentenc, model[w['form']], featureids(w['feats'], dictionary2),D(w['upostag'], dictionary2), D(w['xpostag'], dictionary2), w['id'], len(sent)))
elif w['form'] is not None:
sentenc = np.hstack((sentenc, zeros, featureids(w['feats'], dictionary2), D(w['upostag'], dictionary2), D(w['xpostag'], dictionary2), w['id'], len(sent)))
else:
sentenc = np.hstack((sentenc, mones))
sentenc = np.hstack((sentenc,dep))
t = trans.transition
if t > 1:
t = np.hstack((np.eye(4)[t], np.eye(ndeprel)[iofdeprel(trans.label)-1]))
else:
t = np.hstack((np.eye(4)[t], np.zeros(ndeprel)))
return sentenc, t
def D(key, dic):
if dic.get(key): return dic[key]
return -1;
def featureids(feats1, dic):
f=[-1]*11
if feats1['cat'] in dic: f[0] = dic[feats1['cat']]
if feats1['gen'] in dic: f[1] = dic[feats1['gen']]
if feats1['num'] in dic: f[2] = dic[feats1['num']]
if feats1['pers'] in dic: f[3] = dic[feats1['pers']]
if feats1['case'] in dic: f[4] = dic[feats1['case']]
if feats1['vib'] in dic: f[5] = dic[feats1['vib']]
if feats1['tam'] in dic: f[6] = dic[feats1['tam']]
if feats1['chunkId'] in dic: f[7] = dic[feats1['chunkId']]
if feats1['chunkType'] in dic: f[8] = dic[feats1['chunkType']]
if feats1['stype'] in dic: f[9] = dic[feats1['stype']]
if feats1['voicetype'] in dic: f[0] = dic[feats1['voicetype']]
return f
def rightchild(stackc, arcs):
id=-1
deprel=""
for a in arcs :
if a.headid == stackc['id'] and a.tailid > stackc['id']:
if id==-1 :
id=a.tailid
deprel=a.deprel
else :
if id < a.tailid :
id = a.tailid
deprel = a.deprel
return deprel
def leftchild(stackc, arcs):
id=-1
deprel=""
for a in arcs :
if a.headid == stackc['id'] and a.tailid < stackc['id'] :
if not id :
id = a.tailid
deprel = a.deprel
else :
if id > a.tailid :
id = a.tailid
deprel = a.deprel
return deprel
def iofdeprel(ele):
with open('./tmpdata/deprel.json', 'r') as fp:
dict = json.load(fp)
if ele in dict: return dict[ele]
return -1
def pcheck(id1,id2,sentence):
flag=True
if id2>id1:
for words in sentence[id1:id2-1]:
if words['head'] > id2 or words['head'] < id1:
flag=False
break
if id1>id2:
for words in sentence[id2:id1-1]:
if words['head'] > id1 or words['head'] < id2 :
flag=False
break
return flag
|
the-stack_0_16878 | import os
import time
DEBUG = False
API_URL_PREFIX = "/api/v0"
HOST = '0.0.0.0'
PORT = 5001
ENABLE_CORS = False
#folders and file path
download_folder = 'upload'
TASK_STAT = 'FILE-CONVERTER'
CONSUMER_GROUP = 'anuvaad-etl-fc-consumer-group'
#mongo
MONGO_IP = 'MONGO_IP'
DEFAULT_VALUE = 'localhost'
MONGO_DB_IDENTIFIER = 'MONGO_DB'
DEFAULT_MONGO_DB_IDENTIFIER = 'preprocessing'
MONGO_SERVER_URL = os.environ.get(MONGO_IP, DEFAULT_VALUE)
MONGO_DB = os.environ.get(MONGO_DB_IDENTIFIER, DEFAULT_MONGO_DB_IDENTIFIER)
# kafka
consumer_grp_default = 'anuvaad-etl-fc-consumer-group'
consumer_grp_identifier = 'KAFKA_ANUVAAD_ETL_FC_CONSUMER_GRP'
CONSUMER_GROUP = os.environ.get(consumer_grp_identifier, consumer_grp_default)
tok_input_topic_default = 'anuvaad-dp-tools-fc-input-v1'
tok_input_topic_identifier = 'KAFKA_ANUVAAD_DP_TOOLS_FC_INPUT'
tok_input_topic = os.environ.get(tok_input_topic_identifier, tok_input_topic_default)
tok_output_topic_default = 'anuvaad-dp-tools-fc-output-v1'
tok_output_topic_identifier = 'KAFKA_ANUVAAD_DP_TOOLS_FC_OUTPUT'
tok_output_topic = os.environ.get(tok_output_topic_identifier, tok_output_topic_default)
kf_local_server = 'localhost:9092'
kafka_ip_host = 'KAFKA_CLUSTER_DETAILS'
bootstrap_server = os.environ.get(kafka_ip_host, kf_local_server)
|
the-stack_0_16880 | #!/usr/bin/env python3
#这个脚本用来下载pornhd网站的小电影,只下载720分辨率!
import requests
import sys
import re
import json
import os
import time
page_url = "https://www.pornhd.com/videos"
video_url_info = "https://api.pornhd.com/videos/get-download-url?videoId=%d&resolution=720"
post_headers = {
# ":authority" : "api.pornhd.com",
# ":method": "POST",
# ":scheme": "https",
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9",
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
"origin": "https://www.pornhd.com",
# "referer": "https://www.pornhd.com/videos/44756/peta-jensen-is-a-fucking-perfect-bombshell-hd-porn-video"
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36"
}
download_path = "/var/video/pornhd"
def formatFloat(num):
return '{:.2f}'.format(num)
def download_video(fpath, furl):
print("begin to download this video---------------------------------->")
print("vedio file save path: %s" % fpath)
print("vedio download url: %s" % furl)
request_video = requests.get(furl, stream=True)
length = float(request_video.headers['content-length'])
with open(fpath, 'wb') as f:
count = 0
count_tmp = 0
time1 = time.time()
for chunk in request_video.iter_content(chunk_size = 512):
if chunk:
f.write(chunk)
f.flush()
count += len(chunk)
if time.time() - time1 > 2:
p = count / length * 100
speed = (count - count_tmp) / 1024 / 1024 / 2
count_tmp = count
print(fpath + ': ' + formatFloat(p) + '%' + ' Speed: ' + formatFloat(speed) + 'M/S')
time1 = time.time()
print("------------------------------------->video download finished!")
def main():
if not os.path.isdir(download_path):
os.makedirs(download_path)
vid = sys.argv[1] or 0
if vid:
try:
vid = int(vid)
except Exception as e:
print(e)
sys.exit(1)
v_url = "%s/%d" % (page_url, vid)
try:
html_res = requests.get(v_url)
except Exception as e:
print('[Error] requests send get request error!')
html_content = html_res.text
csrf_token = re.search('\w{0,}\=\=', html_content).group()
meta_name = re.search('<meta name="og:url"(.*?)">', html_content).group()
if not csrf_token or not meta_name:
print("[Error] parse html goes error! Please Check!")
sys.exit(1)
meta_list = meta_name.split('"')
if len(meta_list) < 3:
print(meta_name)
print("[Error] meta info parse error! Please check!")
sys.exit(1)
video_url = meta_list[3] or None
if not video_url:
print(meta_list)
print("[Error] video_url cant be parsed from meta_list! Please check!")
sys.exit(1)
post_headers["referer"] = video_url
video_name_list = video_url.split('/')
video_name = video_name_list[-1]
post_url = video_url_info % vid
r = requests.post(post_url, data={'_csrf-frontend':csrf_token, 'domain': 'www.pornhd.com', '_jwt':'' }, headers=post_headers)
# print("----------------------------------------------------------------------")
# print(r.status_code)
# print(r.headers)
# print(r.text.encode('unicode_escape').decode('utf-8'))
# print("----------------------------------------------------------------------")
if r.status_code == 200:
res_dict = json.loads(r.text)
if res_dict.get('status') == 'success':
video_download_url = res_dict.get('result') or None
if video_download_url:
f_name = "%s/%s.mp4" % (download_path, video_name)
download_video(f_name, video_download_url)
else:
print("[Error] result field is empty!")
sys.exit(1)
else:
print('[Error] status is not success!')
sys.exit(1)
else:
print('[Error] status code is not 200!')
sys.exit(1)
else:
print('[Error] vid is None!')
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) != 2:
print('[Error] params passwd error!')
sys.exit(1)
main()
|
the-stack_0_16882 | import vtk
from heartFEM.lcleeHeart import vtk_py as vtk_py
import dolfin as dolfin
import numpy as np
def extractFeNiCsBiVFacet(ugrid,savePath='', geometry="BiV", tol=1e-2):
#tol = 1e-2
#ugrid = vtk_py.readUGrid(meshfilename)
# Extract surface
geom = vtk.vtkGeometryFilter()
if(vtk.vtkVersion().GetVTKMajorVersion() < 6):
geom.SetInput(ugrid)
else:
geom.SetInputData(ugrid)
geom.Update()
surf = geom.GetOutput()
bc_pts_locator = []
bc_pts = []
bc_pts_range = []
bc_pts_map = []
# Extract Surface Normal
normal = vtk.vtkPolyDataNormals()
if(vtk.vtkVersion().GetVTKMajorVersion() < 6):
normal.SetInput(surf)
else:
normal.SetInputData(surf)
normal.ComputeCellNormalsOn()
normal.Update()
surf_w_norm = normal.GetOutput()
#vtk_py.writePData(normal.GetOutput(), "normal.vtk")
zmax = surf_w_norm.GetBounds()[5]
surf_w_norm.BuildLinks()
idlist = vtk.vtkIdList()
basecellidlist = vtk.vtkIdTypeArray()
basesurf = vtk.vtkPolyData()
for p in range(0, surf_w_norm.GetNumberOfCells()):
zvec = surf_w_norm.GetCellData().GetNormals().GetTuple3(p)[2]
surf_w_norm.GetCellPoints(p, idlist)
zpos = surf_w_norm.GetPoints().GetPoint(idlist.GetId(0))[2]
if((abs(zvec - 1.0) < tol or abs(zvec + 1.0) < tol) and (abs(zmax - zpos) < tol)):
surf_w_norm.DeleteCell(p)
basecellidlist.InsertNextValue(p)
basesurf = vtk_py.extractCellFromPData(basecellidlist, surf)
baseptlocator = vtk.vtkPointLocator()
baseptlocator.SetDataSet(basesurf)
baseptlocator.BuildLocator()
#######################################################################
surf_w_norm.RemoveDeletedCells()
cleanpdata = vtk.vtkCleanPolyData()
if(vtk.vtkVersion().GetVTKMajorVersion() < 6):
cleanpdata.SetInput(surf_w_norm)
else:
cleanpdata.SetInputData(surf_w_norm)
cleanpdata.Update()
connfilter = vtk.vtkPolyDataConnectivityFilter()
if(vtk.vtkVersion().GetVTKMajorVersion() < 6):
connfilter.SetInput(cleanpdata.GetOutput())
else:
connfilter.SetInputData(cleanpdata.GetOutput())
connfilter.Update()
print ("Total_num_points = ", cleanpdata.GetOutput().GetNumberOfPoints())
tpt = 0
if(geometry=="BiV"):
nsurf = 3
else:
nsurf = 2
for p in range(0,nsurf):
pts = vtk.vtkPolyData()
connfilter.SetExtractionModeToSpecifiedRegions()
[connfilter.DeleteSpecifiedRegion(k) for k in range(0,nsurf)]
connfilter.AddSpecifiedRegion(p)
connfilter.ScalarConnectivityOff()
connfilter.FullScalarConnectivityOff()
connfilter.Update()
cleanpdata2 = vtk.vtkCleanPolyData()
if(vtk.vtkVersion().GetVTKMajorVersion() < 6):
cleanpdata2.SetInput(connfilter.GetOutput())
else:
cleanpdata2.SetInputData(connfilter.GetOutput())
cleanpdata2.Update()
pts.DeepCopy(cleanpdata2.GetOutput())
tpt = tpt + cleanpdata2.GetOutput().GetNumberOfPoints()
ptlocator = vtk.vtkPointLocator()
ptlocator.SetDataSet(pts)
ptlocator.BuildLocator()
bc_pts_locator.append(ptlocator)
bc_pts.append(pts)
bc_pts_range.append([abs(pts.GetBounds()[k+1] - pts.GetBounds()[k]) for k in range(0, 6, 2)])
#vtk_py.writePData(connfilter.GetOutput(), "/home/likchuan/Research/fenicsheartmesh/ellipsoidal/Geometry/test.vtk")
print ("Total_num_points = ", tpt)
Epiid = np.argmax(np.array([max(pts) for pts in bc_pts_range]))
maxzrank = np.array([pts[2] for pts in bc_pts_range]).argsort()
if(geometry=="BiV"):
LVid = maxzrank[1]
RVid = 3 - (LVid + Epiid)
bc_pts_map = [4, 4, 4, 4]
bc_pts_map[Epiid] = 1; bc_pts_map[LVid] = 2; bc_pts_map[RVid] = 3
baseid = 3;
else:
LVid = maxzrank[0]
bc_pts_map = [4, 4, 4]
bc_pts_map[Epiid] = 1; bc_pts_map[LVid] = 2
baseid = 2;
bc_pts_locator.append(baseptlocator)
bc_pts.append(basesurf)
dolfin_mesh = vtk_py.convertUGridToXMLMesh(ugrid)
#dolfin_facets = dolfin.FacetFunction('size_t', dolfin_mesh)
dolfin_facets = dolfin.MeshFunction('size_t', dolfin_mesh,dolfin_mesh.topology().dim()-1, dolfin_mesh.domains())
dolfin_facets.set_all(0)
for facet in dolfin.SubsetIterator(dolfin_facets, 0):
for locator in range(0,nsurf+1):
cnt = 0
for p in range(0,3):
v0 = dolfin.Vertex(dolfin_mesh, facet.entities(0)[p]).x(0)
v1 = dolfin.Vertex(dolfin_mesh, facet.entities(0)[p]).x(1)
v2 = dolfin.Vertex(dolfin_mesh, facet.entities(0)[p]).x(2)
ptid = bc_pts_locator[locator].FindClosestPoint(v0, v1, v2)
x0 = bc_pts[locator].GetPoints().GetPoint(ptid)
dist = vtk.vtkMath.Distance2BetweenPoints([v0,v1,v2], x0)
if(dist < 1e-5*tol):
cnt = cnt + 1
if(cnt == 3):
dolfin_facets[facet] = bc_pts_map[locator]
#dolfin_edges = dolfin.EdgeFunction('size_t', dolfin_mesh)
dolfin_edges = dolfin.MeshFunction('size_t', dolfin_mesh,1, dolfin_mesh.domains())
dolfin_edges.set_all(0)
epilocator = Epiid
lvendolocator = LVid
for edge in dolfin.SubsetIterator(dolfin_edges, 0):
cnt_epi = 0; cnt_lvendo = 0;
for p in range(0,2):
v0 = dolfin.Vertex(dolfin_mesh, edge.entities(0)[p]).x(0)
v1 = dolfin.Vertex(dolfin_mesh, edge.entities(0)[p]).x(1)
v2 = dolfin.Vertex(dolfin_mesh, edge.entities(0)[p]).x(2)
epiptid = bc_pts_locator[epilocator].FindClosestPoint(v0, v1, v2)
epix0 = bc_pts[epilocator].GetPoints().GetPoint(epiptid)
epidist = vtk.vtkMath.Distance2BetweenPoints([v0,v1,v2], epix0)
topptid = bc_pts_locator[baseid].FindClosestPoint(v0, v1, v2)
topx0 = bc_pts[baseid].GetPoints().GetPoint(topptid)
topdist = vtk.vtkMath.Distance2BetweenPoints([v0,v1,v2], topx0)
lvendoptid = bc_pts_locator[lvendolocator].FindClosestPoint(v0, v1, v2)
lvendox0 = bc_pts[lvendolocator].GetPoints().GetPoint(lvendoptid)
lvendodist = vtk.vtkMath.Distance2BetweenPoints([v0,v1,v2], lvendox0)
if(topdist < 1e-5*tol and epidist < 1e-5*tol):
cnt_epi = cnt_epi + 1
if(topdist < 1e-5*tol and lvendodist < 1e-5*tol):
cnt_lvendo = cnt_lvendo + 1
if(cnt_epi == 2):
dolfin_edges[edge] = 1
if(cnt_lvendo == 2):
dolfin_edges[edge] = 2
dolfin.File(savePath+"temp.pvd") << dolfin_facets
return dolfin_mesh, dolfin_facets, dolfin_edges
|
the-stack_0_16885 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<[email protected]>
# http://binux.me
# Created on 2014-08-09 11:39:25
import json
import time
import datetime
from tornado import gen
import re
import os
import config
from base import *
import sqlite3
from backup import DBnew
import codecs
import requests
import traceback
from funcs import pusher
def tostr(s):
if isinstance(s, bytearray):
return str(s)
return s
class UserRegPush(BaseHandler):
@tornado.web.authenticated
def get(self, userid):
self.render('user_register_pusher.html', userid=userid)
@tornado.web.authenticated
def post(self, userid):
env = json.loads(self.request.body_arguments['env'][0])
token = env["wxpusher_token"]
uid = env["wxpusher_uid"]
skey = env["skey"]
barkurl = env["barkurl"]
qywx_token = env["qywx_token"]
log = ""
if ("reg" == self.request.body_arguments['func'][0]):
try:
if (token != "") and (uid != ""):
temp = token + ";" + uid
self.db.user.mod(userid, wxpusher = temp)
if (self.db.user.get(userid, fields=("wxpusher"))["wxpusher"] == temp):
log = u"注册 wxpusher 成功\r\n"
else:
log = u"注册 wxpusher 失败\r\n"
else:
log = u"wxpusher 未填写完整\r\n"
if (skey != ""):
self.db.user.mod(userid, skey = skey)
if (self.db.user.get(userid, fields=("skey"))["skey"] == skey):
log = log+u"注册 S酱 成功\r\n"
else:
log = log+u"注册 S酱 失败\r\n"
else:
log = log+u"skey 未填写完整\r\n"
if (barkurl != ""):
if (barkurl[-1] != '/'):
barkurl=barkurl+'/'
self.db.user.mod(userid, barkurl = barkurl)
if (self.db.user.get(userid, fields=("barkurl"))["barkurl"] == barkurl):
log = log+u"注册 Bark 成功\r\n"
else:
log = log+u"注册 Bark 失败\r\n"
else:
log = log+u"Bark 未填写完整\r\n"
if (qywx_token != ""):
self.db.user.mod(userid, qywx_token = qywx_token)
if (self.db.user.get(userid, fields=("qywx_token"))["qywx_token"] == qywx_token):
log = log+u"注册 企业微信 成功\r\n"
else:
log = log+u"注册 企业微信 失败\r\n"
else:
log = log+u"企业微信 未填写完整\r\n"
except Exception as e:
self.render('tpl_run_failed.html', log=e)
return
self.render('utils_run_result.html', log=log, title=u'设置成功', flg='success')
return
else:
try:
f = pusher()
t = datetime.datetime.now().strftime('%y-%m-%d %H:%M:%S')
if (token != "") and (uid != ""):
f.send2wxpusher("{0};{1}".format(token, uid),u"{t} 发送测试".format(t=t))
log = u"wxpusher 已推送,请检查是否收到\r\n"
else:
log = u"wxpusher 未填写完整\r\n"
if (skey != ""):
f.send2s(skey, u"正在测试S酱", u"{t} 发送测试".format(t=t))
log = log+u"S酱 已推送,请检查是否收到\r\n"
else:
log = log+u"skey 未填写完整\r\n"
if (barkurl != ""):
f.send2bark(barkurl, u"正在测试Bark", u"{t} 发送测试".format(t=t))
log = log+u"Bark 已推送,请检查是否收到\r\n"
else:
log = log+u"Bark 未填写完整\r\n"
if (qywx_token != ""):
f.qywx_pusher_send(qywx_token, "正在测试企业微信", u"{t} 发送测试".format(t=t))
log = log+u"企业微信 已推送,请检查是否收到\r\n"
else:
log = log+u"企业微信 未填写完整\r\n"
except Exception as e:
self.render('tpl_run_failed.html', log=e)
return
self.render('utils_run_result.html', log=log, title=u'设置成功', flg='success')
return
class UserRegPushSw(BaseHandler):
@tornado.web.authenticated
def get(self, userid):
tasks = []
for task in self.db.task.list(userid, fields=('id', 'tplid', 'note', 'disabled', 'ctime', 'pushsw'), limit=None):
tpl = self.db.tpl.get(task['tplid'], fields=('id', 'userid', 'sitename', 'siteurl', 'banner', 'note') )
task['tpl'] = tpl
task['pushsw'] = json.loads(task['pushsw'])
tasks.append(task)
temp = self.db.user.get(userid, fields=('noticeflg'))
temp = temp['noticeflg']
flg = {}
flg['barksw'] = False if ((temp & 0x040) == 0) else True
flg['schansw'] = False if ((temp & 0x020) == 0) else True
flg['wxpushersw'] = False if ((temp & 0x010) == 0) else True
flg['mailpushersw'] = False if ((temp & 0x080) == 0) else True
flg['cuspushersw'] = False if ((temp & 0x100) == 0) else True
flg['qywxpushersw'] = False if ((temp & 0x200) == 0) else True
flg['handpush_succ'] = False if ((temp & 0x008) == 0) else True
flg['handpush_fail'] = False if ((temp & 0x004) == 0) else True
flg['autopush_succ'] = False if ((temp & 0x002) == 0) else True
flg['autopush_fail'] = False if ((temp & 0x001) == 0) else True
logtime = json.loads(self.db.user.get(userid, fields=('logtime'))['logtime'])
if 'schanEN' not in logtime:logtime['schanEN'] = False
if 'WXPEn' not in logtime:logtime['WXPEn'] = False
if 'ErrTolerateCnt' not in logtime:logtime['ErrTolerateCnt'] = 0
self.render('user_register_pushsw.html', userid=userid, flg=flg, tasks=tasks, logtime=logtime)
@tornado.web.authenticated
def post(self, userid):
try:
tasks = []
for task in self.db.task.list(userid, fields=('id', 'tplid', 'note', 'disabled', 'ctime', 'pushsw'), limit=None):
tpl = self.db.tpl.get(task['tplid'], fields=('id', 'userid', 'sitename', 'siteurl', 'banner', 'note') )
task['tpl'] = tpl
task['pushsw'] = json.loads(task['pushsw'])
task['pushsw']["logen"] = False
task['pushsw']["pushen"] = False
tasks.append(task)
temp = self.db.user.get(userid, fields=('noticeflg'))
env = json.loads(self.request.body_arguments['env'][0])
logtime = json.loads(self.db.user.get(userid, fields=('logtime'))['logtime'])
if 'ErrTolerateCnt' not in logtime:logtime['ErrTolerateCnt'] = 0
if (logtime['ErrTolerateCnt'] != int(env['ErrTolerateCnt'])):
logtime['ErrTolerateCnt'] = int(env['ErrTolerateCnt'])
self.db.user.mod(userid, logtime=json.dumps(logtime))
barksw_flg = 1 if ("barksw" in env) else 0
schansw_flg = 1 if ("schansw" in env) else 0
wxpushersw_flg = 1 if ("wxpushersw" in env) else 0
mailpushersw_flg = 1 if ("mailpushersw" in env) else 0
cuspushersw_flg = 1 if ("cuspushersw" in env) else 0
qywxpushersw_flg = 1 if ("qywxpushersw" in env) else 0
handpush_succ_flg = 1 if ("handpush_succ" in env) else 0
handpush_fail_flg = 1 if ("handpush_fail" in env) else 0
autopush_succ_flg = 1 if ("autopush_succ" in env) else 0
autopush_fail_flg = 1 if ("autopush_fail" in env) else 0
flg = (qywxpushersw_flg << 9) \
| (cuspushersw_flg << 8) \
| (mailpushersw_flg << 7) \
| (barksw_flg << 6) \
| (schansw_flg << 5) \
| (wxpushersw_flg << 4) \
| (handpush_succ_flg << 3) \
| (handpush_fail_flg << 2) \
| (autopush_succ_flg << 1) \
| (autopush_fail_flg)
for e in env:
temp = re.findall(r"(.+?)pushen", e)
if len(temp) > 0:
taskid = int(temp[0])
for task in tasks:
if (taskid == task["id"]):
task['pushsw']["pushen"] = True
self.db.user.mod(userid, noticeflg=flg)
for task in tasks:
self.db.task.mod(task["id"], pushsw=json.dumps(task['pushsw']))
except Exception as e:
self.render('tpl_run_failed.html', log=e)
return
self.render('utils_run_result.html', log=u"设置完成", title=u'设置成功', flg='success')
return
class UserManagerHandler(BaseHandler):
@tornado.web.authenticated
def get(self, userid):
adminflg = False
users = []
user = self.db.user.get(userid, fields=('role'))
if user and user['role'] == "admin":
adminflg = True
users = []
for user in self.db.user.list(fields=('id','status', 'role', 'ctime', 'email', 'atime', 'email_verified')):
if (user['email_verified'] == 0):
user['email_verified'] = False
else:
user['email_verified'] = True
users.append(user)
self.render("user_manage.html", users=users, userid=userid, adminflg=adminflg)
return
@tornado.web.authenticated
def post(self, userid):
try:
user = self.db.user.get(userid, fields=('role'))
if user and user['role'] == "admin":
envs = self.request.body_arguments
mail = envs['adminmail'][0]
pwd = u"{0}".format(envs['adminpwd'][0])
if self.db.user.challenge(mail, pwd):
Target_users = []
for key, value in envs.items():
if value[0] == "on":
Target_users.append(key)
for sub_user in Target_users:
if (self.db.user.get(sub_user, fields=('role')) != 'admin'):
if 'banbtn' in envs:
self.db.user.mod(sub_user, status='Disable')
for task in self.db.task.list(sub_user, fields=('id'), limit=None):
self.db.task.mod(task['id'], disabled=True)
if 'activatebtn' in envs:
self.db.user.mod(sub_user, status='Enable')
for task in self.db.task.list(sub_user, fields=('id'), limit=None):
self.db.task.mod(task['id'], disabled=False)
if 'delbtn' in envs:
for task in self.db.task.list(sub_user, fields=('id'), limit=None):
self.db.task.delete(task['id'])
logs = self.db.tasklog.list(taskid = task['id'], fields=('id'))
for log in logs:
self.db.tasklog.delete(log['id'])
for tpl in self.db.tpl.list(fields=('id', 'userid'), limit=None):
if tpl['userid'] == int(sub_user):
self.db.tpl.delete(tpl['id'])
self.db.user.delete(sub_user)
else:
raise Exception(u"账号/密码错误")
else:
raise Exception(u"非管理员,不可操作")
except Exception as e:
if (str(e).find('get user need id or email') > -1):
e = u'请输入用户名/密码'
self.render('tpl_run_failed.html', log=e)
return
self.redirect('/my/')
return
class UserDBHandler(BaseHandler):
@tornado.web.authenticated
def get(self, userid):
adminflg = False
user = self.db.user.get(userid, fields=('role'))
if user and user['role'] == "admin":
adminflg = True
self.render("DB_manage.html", userid=userid, adminflg=adminflg)
return
@tornado.web.authenticated
def post(self, userid):
try:
user = self.db.user.get(userid, fields=('role', 'email'))
envs = self.request.body_arguments
mail = envs['adminmail'][0]
pwd = u"{0}".format(envs['adminpwd'][0])
now=datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
if self.db.user.challenge(mail, pwd) and (user['email'] == mail):
if ('backupbtn' in envs):
if user and user['role'] == "admin":
filename = config.sqlite3.path
savename = "database_{now}.db".format(now=now)
self.set_header ('Content-Type', 'application/octet-stream')
self.set_header ('Content-Disposition', 'attachment; filename='+savename)
with open(filename, 'rb') as f:
while True:
data = f.read(1024)
if not data:
break
self.write(data)
self.finish()
return
else:
raise Exception(u"管理员才能备份数据库")
if ('backuptplsbtn' in envs):
tpls = []
for tpl in self.db.tpl.list(userid=userid, fields=('id', 'siteurl', 'sitename', 'banner', 'note','fork', 'groups', 'har', 'tpl', 'variables'), limit=None):
tpl['tpl'] = self.db.user.decrypt(userid, tpl['tpl'])
tpl['har'] = self.db.user.decrypt(userid, tpl['har'])
tpls.append(tpl)
tasks = []
for task in self.db.task.list(userid, fields=('id', 'tplid', 'note', 'disabled', 'groups', 'init_env', 'env', 'ontimeflg', 'ontime', 'pushsw', 'newontime'), limit=None):
task['init_env'] = self.db.user.decrypt(userid, task['init_env'])
task['env'] = self.db.user.decrypt(userid, task['env']) if task['env'] else None
tasks.append(task)
backupdata = {}
backupdata['tpls'] = tpls
backupdata['tasks'] = tasks
savename = "{mail}_{now}.json".format(mail = user['email'], now=now)
fp = codecs.open(savename, 'w', 'utf-8')
fp.write(json.dumps(backupdata, ensure_ascii=False, indent=4 ))
fp.close()
self.set_header ('Content-Type', 'application/octet-stream')
self.set_header ('Content-Disposition', 'attachment; filename='+savename)
with open(savename, 'rb') as f:
while True:
data = f.read(1024)
if not data:
break
self.write(data)
os.remove(savename)
self.finish()
return
if ('recoverytplsbtn' in envs):
if ('recfile' in envs):
tpls = json.loads(envs['recfile'][0])['tpls']
tasks = json.loads(envs['recfile'][0])['tasks']
ids = []
for newtpl in tpls:
userid2 = int(userid)
har = self.db.user.encrypt(userid2, newtpl['har'])
tpl = self.db.user.encrypt(userid2, newtpl['tpl'])
variables = newtpl['variables']
newid = self.db.tpl.add(userid2, har, tpl, variables)
self.db.tpl.mod(newid, fork = newtpl['fork'],
siteurl = newtpl['siteurl'],
sitename = newtpl['sitename'],
note = newtpl['note'],
groups = u'备份还原',
banner = newtpl['banner']
)
for task in tasks:
if (task['tplid'] == newtpl['id']):
task['tplid'] = newid
for newtask in tasks:
userid2 = int(userid)
newtask['init_env'] = self.db.user.encrypt(userid2, newtask['init_env'])
newtask['env'] = self.db.user.encrypt(userid2, newtask['env'])
taskid = self.db.task.add(newtask['tplid'], userid, newtask['env'])
self.db.task.mod(taskid, disabled = newtask['disabled'],
init_env = newtask['init_env'],
session = None,
note = newtask['note'],
groups = u'备份还原',
ontimeflg = newtask['ontimeflg'],
ontime = newtask['ontime'],
pushsw = newtask['pushsw'],
newontime = newtask['newontime']
)
self.render('utils_run_result.html', log=u"设置完成", title=u'设置成功', flg='success')
return
else:
raise Exception(u"请上传文件")
else:
raise Exception(u"账号/密码错误")
except Exception as e:
if (str(e).find('get user need id or email') > -1):
e = u'请输入用户名/密码'
self.render('tpl_run_failed.html', log=e)
return
return
class toolbox_notpad_Handler(BaseHandler):
@tornado.web.authenticated
def get(self,userid):
user = self.current_user
text_data = self.db.user.get(userid, fields=('notepad'))['notepad']
self.render('toolbox-notepad.html', text_data = text_data, userid=userid)
return
@tornado.web.authenticated
def post(self,userid):
try:
user = self.db.user.get(userid, fields=('role', 'email'))
envs = self.request.body_arguments
mail = envs['adminmail'][0]
pwd = u"{0}".format(envs['adminpwd'][0])
if self.db.user.challenge(mail, pwd) and (user['email'] == mail):
if ('mode' in envs) and ('content' in envs):
if (envs['mode'][0] == 'write'):
new_data = envs['content'][0]
else:
data = self.db.user.get(userid, fields=('notepad'))['notepad']
new_data = data + "\r\n" +envs['content'][0]
self.db.user.mod(userid, notepad=new_data)
else:
raise Exception(u"参数错误")
else:
raise Exception(u"账号/密码错误")
except Exception as e:
if (str(e).find('get user need id or email') > -1):
e = u'请输入用户名/密码'
self.render('tpl_run_failed.html', log=e)
return
return
class UserPushShowPvar(BaseHandler):
@tornado.web.authenticated
def post(self,userid):
try:
user = self.db.user.get(userid, fields=('role', 'email'))
envs = self.request.body_arguments
mail = envs['adminmail'][0]
pwd = u"{0}".format(envs['adminpwd'][0])
if self.db.user.challenge(mail, pwd) and (user['email'] == mail):
key = self.db.user.get(userid, fields=("barkurl", 'skey', 'wxpusher', 'qywx_token'))
log = u"""barkurl 前值:{bark}\r\nskey 前值:{skey}\r\nwxpusher 前值:{wxpusher}\r\n企业微信 前值:{qywx_token}""".format(
bark = key['barkurl'],
skey = key['skey'],
wxpusher = key['wxpusher'],
qywx_token = key['qywx_token'])
self.render('utils_run_result.html', log=log, title=u'设置成功', flg='success')
return log
else:
raise Exception(u"账号/密码错误")
except Exception as e:
if (str(e).find('get user need id or email') > -1):
e = u'请输入用户名/密码'
self.render('tpl_run_failed.html', log=e)
return
return
class custom_pusher_Handler(BaseHandler):
@tornado.web.authenticated
def get(self,userid):
diypusher = self.db.user.get(userid, fields=('diypusher'))['diypusher']
diypusher = json.loads(diypusher) if (diypusher != '') else {'mode':'GET'}
self.render('user_register_cus_pusher.html', userid=userid, diypusher=diypusher)
return
@tornado.web.authenticated
def post(self,userid):
try:
envs = self.request.body_arguments
for env in envs.keys():
envs[env] = envs[env][0]
req = pusher()
log = ''
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
tmp = req.cus_pusher_send(envs ,u'推送测试', now)
if ('True' == tmp):
if (envs['btn'] == 'regbtn'):
self.db.user.mod(userid, diypusher=json.dumps(envs))
else:
raise Exception(tmp)
log = u'运行成功,请检查是否收到推送'
except Exception as e:
if (str(e).find('get user need id or email') > -1):
e = u'请输入用户名/密码'
traceback.print_exc()
self.render('utils_run_result.html', log=traceback.format_exc(), title=u'设置失败', flg='danger')
return
self.render('utils_run_result.html', log=log, title=u'设置成功', flg='success')
return
class UserSetNewPWDHandler(BaseHandler):
@tornado.web.authenticated
def get(self,userid):
email = self.db.user.get(userid, fields=('email'))['email']
self.render('user_setnewpwd.html', userid=userid, usermail=email)
return
@tornado.web.authenticated
def post(self,userid):
try:
log = u'设置成功'
envs = self.request.body_arguments
for env in envs.keys():
envs[env] = u'{0}'.format(envs[env][0])
adminuser = self.db.user.get(email=envs['管理员邮箱'], fields=('role', 'email'))
newPWD = envs['新密码']
if self.db.user.challenge(envs['管理员邮箱'], envs['管理员密码']) and (adminuser['role'] == 'admin'):
if (len(newPWD) >= 6):
self.db.user.mod(userid, password=newPWD)
if not (self.db.user.challenge(envs['用户名'], newPWD)):
raise Exception(u'修改失败')
else:
raise Exception(u'密码长度要大于6位')
else:
raise Exception(u'管理员用户名/密码错误')
except Exception as e:
if (str(e).find('get user need id or email') > -1):
e = u'请输入用户名/密码'
traceback.print_exc()
self.render('utils_run_result.html', log=traceback.format_exc(), title=u'设置失败', flg='danger')
return
self.render('utils_run_result.html', log=log, title=u'设置成功', flg='success')
return
handlers = [
('/user/(\d+)/pushsw', UserRegPushSw),
('/user/(\d+)/regpush', UserRegPush),
('/user/(\d+)/UserPushShowPvar', UserPushShowPvar),
('/user/(\d+)/manage', UserManagerHandler),
('/user/(\d+)/database', UserDBHandler),
('/util/toolbox/(\d+)/notepad', toolbox_notpad_Handler),
('/util/custom/(\d+)/pusher', custom_pusher_Handler),
('/user/(\d+)/setnewpwd', UserSetNewPWDHandler),
]
|
the-stack_0_16886 | from nornir import InitNornir
from nornir.core.filter import F
import ipdb
ipdb.set_trace()
nr = InitNornir(config_file="config.yaml")
tmp_nr = nr.filter(name="sros1")
tmp_nr = nr.filter(platform="nokia_sros")
tmp_nr = nr.filter(hostname="vmx1.lasthop.io")
sros = nr.filter(F(groups__contains="sros"))
all_devices = nr.filter(F(groups__contains="sros") | F(groups__contains="junos"))
|
the-stack_0_16890 | from django.core.urlresolvers import reverse
from tastypie import authorization
from tastypie.authentication import MultiAuthentication
from tastypie.exceptions import BadRequest
from crits.services.handlers import add_result, add_log, finish_task
from crits.services.service import CRITsService
from crits.core.api import CRITsApiKeyAuthentication, CRITsSessionAuthentication
from crits.core.api import CRITsSerializer, CRITsAPIResource
class ServiceResource(CRITsAPIResource):
"""
Class to handle everything related to the Services API.
Currently supports POST.
"""
class Meta:
object_class = CRITsService
allowed_methods = ('post',)
resource_name = "services"
authentication = MultiAuthentication(CRITsApiKeyAuthentication(),
CRITsSessionAuthentication())
authorization = authorization.Authorization()
serializer = CRITsSerializer()
def get_object_list(self, request):
"""
Use the CRITsAPIResource to get our objects but provide the class to get
the objects from.
:param request: The incoming request.
:type request: :class:`django.http.HttpRequest`
:returns: Resulting objects in the specified format (JSON by default).
"""
return super(ServiceResource, self).get_object_list(request,
CRITsService,
False)
def obj_create(self, bundle, **kwargs):
"""
Handles creating service result entries through the API.
:param bundle: Bundle containing the service results to add.
:type bundle: Tastypie Bundle object.
:returns: HttpResponse.
"""
analyst = bundle.request.user.username
object_type = bundle.data.get('object_type', None)
object_id = bundle.data.get('object_id', None)
analysis_id = bundle.data.get('analysis_id', None)
result = bundle.data.get('result', None)
result_type = bundle.data.get('result_type', None)
result_subtype = bundle.data.get('result_subtype', None)
log_message = bundle.data.get('log_message', None)
log_level = bundle.data.get('log_level', 'info')
status = bundle.data.get('status', None)
finish = bundle.data.get('finish', False)
success = True
message = ""
content = {'return_code': 1,
'type': object_type}
if not object_type or not object_id or not analysis_id:
content['message'] = 'Need an object type, object id, and analysis id.'
self.crits_response(content)
if result:
if not result_type or not result_subtype:
content['message'] = 'When adding a result, also need type and subtype'
self.crits_response(content)
result = add_result(object_type, object_id, analysis_id,
result, result_type, result_subtype, analyst)
if not result['success']:
message += ", %s" % result['message']
success = False
if log_message:
result = add_log(object_type, object_id, analysis_id,
log_message, log_level, analyst)
if not result['success']:
message += ", %s" % result['message']
success = False
if finish:
result = finish_task(object_type, object_id, analysis_id,
status, analyst)
if not result['success']:
message += ", %s" % result['message']
success = False
content['message'] = message
content['id'] = object_id
rname = self.resource_name_from_type(object_type)
url = reverse('api_dispatch_detail',
kwargs={'resource_name': rname,
'api_name': 'v1',
'pk': object_id})
content['url'] = url
if success:
content['return_code'] = 0
self.crits_response(content)
|
the-stack_0_16891 | import torch.nn as nn
from torch.autograd import Variable
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers):
super(RNNModel, self).__init__()
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, bias=False)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, bias=False)
self.decoder = nn.Linear(nhid, ntoken)
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
emb = self.encoder(input)
output, hidden = self.rnn(emb, hidden)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()),
Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()))
else:
return Variable(weight.new(self.nlayers, bsz, self.nhid).zero_())
|
the-stack_0_16893 | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Classes to support Encoder-Decoder architectures"""
import warnings
from typing import Optional
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ...configuration_utils import PretrainedConfig
from ...modeling_outputs import Seq2SeqLMOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from ..auto.configuration_auto import AutoConfig
from ..auto.modeling_auto import AutoModel, AutoModelForCausalLM
from .configuration_encoder_decoder import EncoderDecoderConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "EncoderDecoderConfig"
DEPRECATION_WARNING = (
"Version v4.12.0 introduces a better way to train encoder-decoder models by computing the loss inside the "
"encoder-decoder framework rather than in the decoder itself. You may observe training discrepancies if fine-tuning "
"a model trained with versions anterior to 4.12.0. The decoder_input_ids are now created based on the labels, no "
"need to pass them yourself anymore."
)
ENCODER_DECODER_START_DOCSTRING = r"""
This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the
encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via
[`~AutoModel.from_pretrained`] function and the decoder is loaded via [`~AutoModelForCausalLM.from_pretrained`]
function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream
generative task, like summarization.
The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation
Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
Zhou, Wei Li, Peter J. Liu.
After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models
(see the examples for more information).
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`EncoderDecoderConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
ENCODER_DECODER_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
For training, `decoder_input_ids` are automatically created by the model by shifting the `labels` to the
right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`.
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
encoder_outputs (`tuple(torch.FloatTensor)`, *optional*):
This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
`last_hidden_state` (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`) is a tensor
of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the
decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert `decoder_input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,
..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
If set to `True`, the model will return a [`~file_utils.Seq2SeqLMOutput`] instead of a plain tuple.
kwargs: (*optional*) Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:
- Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function.
- With a *decoder_* prefix which will be input as `**decoder_kwargs` for the decoder forward function.
"""
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
if decoder_start_token_id is None:
raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.")
shifted_input_ids[:, 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.")
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
@add_start_docstrings(ENCODER_DECODER_START_DOCSTRING)
class EncoderDecoderModel(PreTrainedModel):
r"""
[`EncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with one
of the base model classes of the library as encoder and another one as decoder when created with the
:meth*~transformers.AutoModel.from_pretrained* class method for the encoder and
:meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder.
"""
config_class = EncoderDecoderConfig
base_model_prefix = "encoder_decoder"
def __init__(
self,
config: Optional[PretrainedConfig] = None,
encoder: Optional[PreTrainedModel] = None,
decoder: Optional[PreTrainedModel] = None,
):
if config is None and (encoder is None or decoder is None):
raise ValueError("Either a configuration or an encoder and a decoder has to be provided.")
if config is None:
config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
else:
if not isinstance(config, self.config_class):
raise ValueError(f"Config: {config} has to be of type {self.config_class}")
if config.decoder.cross_attention_hidden_size is not None:
if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
raise ValueError(
"If `cross_attention_hidden_size` is specified in the decoder's configuration, "
"it has to be equal to the encoder's `hidden_size`. "
f"Got {config.decoder.cross_attention_hidden_size} for `config.decoder.cross_attention_hidden_size` "
f"and {config.encoder.hidden_size} for `config.encoder.hidden_size`."
)
# initialize with config
super().__init__(config)
if encoder is None:
from ..auto.modeling_auto import AutoModel
encoder = AutoModel.from_config(config.encoder)
if decoder is None:
from ..auto.modeling_auto import AutoModelForCausalLM
decoder = AutoModelForCausalLM.from_config(config.decoder)
self.encoder = encoder
self.decoder = decoder
if self.encoder.config.to_dict() != self.config.encoder.to_dict():
logger.warning(
f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config: {self.config.encoder}"
)
if self.decoder.config.to_dict() != self.config.decoder.to_dict():
logger.warning(
f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config: {self.config.decoder}"
)
# make sure that the individual model's config refers to the shared config
# so that the updates to the config will be synced
self.encoder.config = self.config.encoder
self.decoder.config = self.config.decoder
# encoder outputs might need to be projected to different dimension for decoder
if (
self.encoder.config.hidden_size != self.decoder.config.hidden_size
and self.decoder.config.cross_attention_hidden_size is None
):
self.enc_to_dec_proj = nn.Linear(self.encoder.config.hidden_size, self.decoder.config.hidden_size)
if self.encoder.get_output_embeddings() is not None:
raise ValueError(
f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head"
)
# tie encoder, decoder weights if config set accordingly
self.tie_weights()
def tie_weights(self):
# tie encoder & decoder if needed
if self.config.tie_encoder_decoder:
# tie encoder and decoder base model
decoder_base_model_prefix = self.decoder.base_model_prefix
self._tie_encoder_decoder_weights(
self.encoder, self.decoder._modules[decoder_base_model_prefix], self.decoder.base_model_prefix
)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def get_input_embeddings(self):
return self.encoder.get_input_embeddings()
def get_output_embeddings(self):
return self.decoder.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
return self.decoder.set_output_embeddings(new_embeddings)
@classmethod
def from_pretrained(cls, *args, **kwargs):
# At the moment fast initialization is not supported for composite models
if kwargs.get("_fast_init", False):
logger.warning(
"Fast initialization is currently not supported for EncoderDecoderModel. "
"Falling back to slow initialization..."
)
kwargs["_fast_init"] = False
return super().from_pretrained(*args, **kwargs)
@classmethod
def from_encoder_decoder_pretrained(
cls,
encoder_pretrained_model_name_or_path: str = None,
decoder_pretrained_model_name_or_path: str = None,
*model_args,
**kwargs
) -> PreTrainedModel:
r"""
Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
checkpoints.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
the model, you need to first set it back in training mode with `model.train()`.
Params:
encoder_pretrained_model_name_or_path (`str`, *optional*):
Information necessary to initiate the encoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
this case, `from_tf` should be set to `True` and a configuration object should be provided as
`config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
Information necessary to initiate the decoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
this case, `from_tf` should be set to `True` and a configuration object should be provided as
`config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args (remaining positional arguments, *optional*):
All remaining positional arguments will be passed to the underlying model's `__init__` method.
kwargs (remaining dictionary of keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`).
- To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
- To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
- To update the parent model configuration, do not use a prefix for each configuration parameter.
Behaves differently depending on whether a `config` is provided or automatically loaded.
Example:
```python
>>> from transformers import EncoderDecoderModel
>>> # initialize a bert2bert from two pretrained BERT models. Note that the cross-attention layers will be randomly initialized
>>> model = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-uncased", "bert-base-uncased")
>>> # saving model after fine-tuning
>>> model.save_pretrained("./bert2bert")
>>> # load fine-tuned model
>>> model = EncoderDecoderModel.from_pretrained("./bert2bert")
```"""
kwargs_encoder = {
argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
# remove encoder, decoder kwargs from kwargs
for key in kwargs_encoder.keys():
del kwargs["encoder_" + key]
for key in kwargs_decoder.keys():
del kwargs["decoder_" + key]
# Load and initialize the encoder and decoder
# The distinction between encoder and decoder at the model level is made
# by the value of the flag `is_decoder` that we need to set correctly.
encoder = kwargs_encoder.pop("model", None)
if encoder is None:
if encoder_pretrained_model_name_or_path is None:
raise ValueError(
"If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
"to be defined."
)
if "config" not in kwargs_encoder:
encoder_config, kwargs_encoder = AutoConfig.from_pretrained(
encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True
)
if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
logger.info(
f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
"from a decoder model. Cross-attention and casual mask are disabled."
)
encoder_config.is_decoder = False
encoder_config.add_cross_attention = False
kwargs_encoder["config"] = encoder_config
encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
decoder = kwargs_decoder.pop("model", None)
if decoder is None:
if decoder_pretrained_model_name_or_path is None:
raise ValueError(
"If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
"to be defined."
)
if "config" not in kwargs_decoder:
decoder_config, kwargs_decoder = AutoConfig.from_pretrained(
decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True
)
if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
logger.info(
f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. "
f"Cross attention layers are added to {decoder_pretrained_model_name_or_path} "
f"and randomly initialized if {decoder_pretrained_model_name_or_path}'s architecture allows for "
"cross attention layers."
)
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
kwargs_decoder["config"] = decoder_config
if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
logger.warning(
f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
"make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
"passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
"`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
)
decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
# instantiate config with corresponding kwargs
config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
return cls(encoder=encoder, decoder=decoder, config=config)
@add_start_docstrings_to_model_forward(ENCODER_DECODER_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
Returns:
Examples:
```python
>>> from transformers import EncoderDecoderModel, BertTokenizer
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
>>> model = EncoderDecoderModel.from_encoder_decoder_pretrained(
... "bert-base-uncased", "bert-base-uncased"
>>> ) # initialize Bert2Bert from pre-trained checkpoints
>>> # training
>>> model.config.decoder_start_token_id = tokenizer.cls_token_id
>>> model.config.pad_token_id = tokenizer.pad_token_id
>>> model.config.vocab_size = model.config.decoder.vocab_size
>>> input_ids = tokenizer("This is a really long text", return_tensors="pt").input_ids
>>> labels = tokenizer("This is the corresponding summary", return_tensors="pt").input_ids
>>> outputs = model(input_ids=input_ids, labels=input_ids)
>>> loss, logits = outputs.loss, outputs.logits
>>> # save and load from pretrained
>>> model.save_pretrained("bert2bert")
>>> model = EncoderDecoderModel.from_pretrained("bert2bert")
>>> # generation
>>> generated = model.generate(input_ids)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs_encoder,
)
encoder_hidden_states = encoder_outputs[0]
# optionally project encoder_hidden_states
if (
self.encoder.config.hidden_size != self.decoder.config.hidden_size
and self.decoder.config.cross_attention_hidden_size is None
):
encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=attention_mask,
inputs_embeds=decoder_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
use_cache=use_cache,
past_key_values=past_key_values,
return_dict=return_dict,
**kwargs_decoder,
)
# Compute loss independent from decoder (as some shift the logits inside them)
loss = None
if labels is not None:
warnings.warn(DEPRECATION_WARNING, FutureWarning)
logits = decoder_outputs.logits if return_dict else decoder_outputs[0]
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.reshape(-1, self.decoder.config.vocab_size), labels.view(-1))
if not return_dict:
if loss is not None:
return (loss,) + decoder_outputs + encoder_outputs
else:
return decoder_outputs + encoder_outputs
return Seq2SeqLMOutput(
loss=loss,
logits=decoder_outputs.logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
def prepare_inputs_for_generation(
self, input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
):
decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past=past)
decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None
input_dict = {
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"decoder_input_ids": decoder_inputs["input_ids"],
"encoder_outputs": encoder_outputs,
"past_key_values": decoder_inputs["past_key_values"],
"use_cache": use_cache,
}
return input_dict
def resize_token_embeddings(self, *args, **kwargs):
raise NotImplementedError(
"Resizing the embedding layers via the EncoderDecoderModel directly is not supported. "
"Please use the respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or model.decoder.resize_token_embeddings(...))"
)
def _reorder_cache(self, past, beam_idx):
# apply decoder cache reordering here
return self.decoder._reorder_cache(past, beam_idx)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.