metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JJS-X/PiOT",
"score": 3
} |
#### File: graph/connexion/checkpassword.py
```python
import re
import sys
import crypt
import cherrypy
def checkpass(username, password):
passwd = open("/etc/shadow","r")
goodpassword = None
for line in passwd:
if username in line:
goodpassword = line
if not goodpassword is None:
table=goodpassword.split(":")
hash=table[1].split("$")
if (crypt.crypt(password, "$"+hash[1]+"$"+hash[2]+"$") == table[1]):
return "True"
else:
return "False"
else:
return "not user"
def setCookie():
cookie = cherrypy.response.cookie
cookie['cookieName'] = 'test'
cookie['cookieName']['path'] = '/'
cookie['cookieName']['max-age'] = 3600
cookie['cookieName']['version'] = 1
def readCookie():
cookie = cherrypy.request.cookie
for name in cookie.keys():
if name == "cookieName":
if cookie[name].value == "test":
return True
return False
# def readCookie():
# cookie = cherrypy.request.cookie
# res = """<html><body>Hi, you sent me %s cookies.<br />
# Here is a list of cookie names/values:<br />""" % len(cookie)
# for name in cookie.keys():
# res += "name: %s, value: %s<br>" % (name, cookie[name].value)
# return res + "</body></html>"
``` |
{
"source": "jjt20/scripts",
"score": 3
} |
#### File: scripts/util/compute_connectivity_files.py
```python
import numpy as np
import os
PRD = os.environ['PRD']
SUBJ_ID = os.environ['SUBJ_ID']
PARCEL = os.environ['PARCEL']
def compute_triangle_areas(vertices, triangles):
"""Calculates the area of triangles making up a surface."""
tri_u = vertices[triangles[:, 1], :] - vertices[triangles[:, 0], :]
tri_v = vertices[triangles[:, 2], :] - vertices[triangles[:, 0], :]
tri_norm = np.cross(tri_u, tri_v)
triangle_areas = np.sqrt(np.sum(tri_norm ** 2, axis=1)) / 2.0
triangle_areas = triangle_areas[:, np.newaxis]
return triangle_areas
def compute_region_areas(triangles_areas, vertex_triangles):
avt = np.array(vertex_triangles)
#NOTE: Slightly overestimates as it counts overlapping border triangles,
# but, not really a problem provided triangle-size << region-size.
regs = map(set, avt)
region_triangles = set.union(*regs)
region_surface_area = triangle_areas[list(region_triangles)].sum()
return region_surface_area
def compute_region_orientation(vertex_normals):
average_orientation = np.zeros((1, 3))
# Average orientation of the region
orient = vertex_normals[:, :]
avg_orient = np.mean(orient, axis=0)
average_orientation = avg_orient / np.sqrt(np.sum(avg_orient ** 2))
region_orientation = average_orientation
return region_orientation
def compute_vertex_triangles(number_of_vertices, number_of_triangles, triangles):
vertex_triangles = [[] for _ in range(number_of_vertices)]
for k in range(number_of_triangles):
vertex_triangles[triangles[k, 0]].append(k)
vertex_triangles[triangles[k, 1]].append(k)
vertex_triangles[triangles[k, 2]].append(k)
return vertex_triangles
def compute_vertex_normals(number_of_vertices, vertex_triangles, triangles,
triangle_angles, triangle_normals, vertices):
"""
Estimates vertex normals, based on triangle normals weighted by the
angle they subtend at each vertex...
"""
vert_norms = np.zeros((number_of_vertices, 3))
bad_normal_count = 0
for k in range(number_of_vertices):
try:
tri_list = list(vertex_triangles[k])
angle_mask = triangles[tri_list, :] == k
angles = triangle_angles[tri_list, :]
angles = angles[angle_mask][:, np.newaxis]
angle_scaling = angles / np.sum(angles, axis=0)
vert_norms[k, :] = np.mean(angle_scaling * triangle_normals[tri_list, :], axis=0)
# Scale by angle subtended.
vert_norms[k, :] = vert_norms[k, :] / np.sqrt(np.sum(vert_norms[k, :] ** 2, axis=0))
# Normalise to unit vectors.
except (ValueError, FloatingPointError):
# If normals are bad, default to position vector
# A nicer solution would be to detect degenerate triangles and ignore their
# contribution to the vertex normal
vert_norms[k, :] = vertices[k] / np.sqrt(vertices[k].dot(vertices[k]))
bad_normal_count += 1
if bad_normal_count:
print(" %d vertices have bad normals" % bad_normal_count)
return vert_norms
def compute_triangle_angles(vertices, number_of_triangles, triangles):
"""
Calculates the inner angles of all the triangles which make up a surface
"""
verts = vertices
# TODO: Should be possible with arrays, ie not nested loops...
# A short profile indicates this function takes 95% of the time to compute normals
# (this was a direct translation of some old matlab code)
angles = np.zeros((number_of_triangles, 3))
for tt in range(number_of_triangles):
triangle = triangles[tt, :]
for ta in range(3):
ang = np.roll(triangle, -ta)
angles[tt, ta] = np.arccos(np.dot(
(verts[ang[1], :] - verts[ang[0], :]) /
np.sqrt(np.sum((verts[ang[1], :] - verts[ang[0], :]) ** 2, axis=0)),
(verts[ang[2], :] - verts[ang[0], :]) /
np.sqrt(np.sum((verts[ang[2], :] - verts[ang[0], :]) ** 2, axis=0))))
return angles
def compute_triangle_normals(triangles, vertices):
"""Calculates triangle normals."""
tri_u = vertices[triangles[:, 1], :] - vertices[triangles[:, 0], :]
tri_v = vertices[triangles[:, 2], :] - vertices[triangles[:, 0], :]
tri_norm = np.cross(tri_u, tri_v)
try:
triangle_normals = tri_norm / np.sqrt(np.sum(tri_norm ** 2, axis=1))[:, np.newaxis]
except FloatingPointError:
#TODO: NaN generation would stop execution, however for normals this case could maybe be
# handled in a better way.
triangle_normals = tri_norm
return triangle_normals
def compute_region_areas_cortex(triangle_areas, vertex_triangles, region_mapping, list_name):
regions = np.unique(region_mapping)
region_surface_area = np.zeros((list_name.shape[0], 1))
avt = np.array(vertex_triangles)
#NOTE: Slightly overestimates as it counts overlapping border triangles,
# but, not really a problem provided triangle-size << region-size.
for k in regions:
regs = map(set, avt[region_mapping == k])
region_triangles = set.union(*regs)
region_surface_area[k] = triangle_areas[list(region_triangles)].sum()
return region_surface_area
def compute_region_orientation_cortex(vertex_normals, region_mapping, list_name):
regions = np.unique(region_mapping)
average_orientation = np.zeros((list_name.shape[0], 3))
#Average orientation of the region
for k in regions:
orient = vertex_normals[region_mapping == k, :]
avg_orient = np.mean(orient, axis=0)
average_orientation[k, :] = avg_orient / np.sqrt(np.sum(avg_orient ** 2))
return average_orientation
def compute_region_center_cortex(vertices, region_mapping, list_name):
regions = np.unique(region_mapping)
region_center= np.zeros((list_name.shape[0], 3))
#Average orientation of the region
for k in regions:
vert = vertices[region_mapping == k, :]
region_center[k, :] = np.mean(vert, axis=0)
return region_center
if __name__ == '__main__':
# Cortex
# import data
verts = np.loadtxt(os.path.join(PRD, SUBJ_ID, 'surface', 'vertices.txt'))
tri = np.loadtxt(os.path.join(PRD, SUBJ_ID, 'surface', 'triangles.txt'))
tri = tri.astype(int)
region_mapping = np.loadtxt(os.path.join(PRD, SUBJ_ID, 'surface', 'region_mapping.txt')).astype(int)
# save connectivity and tract length matrices
weights = np.loadtxt(os.path.join(PRD, 'connectivity', 'weights.csv'))
tract_lengths = np.loadtxt(os.path.join(PRD, 'connectivity', 'tract_lengths.csv'))
weights = weights + weights.transpose() - np.diag(np.diag(weights))
# add the first region
weights = np.vstack([np.zeros((1, weights.shape[0])), weights])
weights = np.hstack([np.zeros((weights.shape[0], 1)), weights])
tract_lengths = tract_lengths + tract_lengths.transpose() # because diagonal nul
tract_lengths = np.vstack([np.zeros((1, tract_lengths.shape[0])), tract_lengths])
tract_lengths = np.hstack([np.zeros((tract_lengths.shape[0], 1)), tract_lengths])
np.savetxt(os.path.join(PRD, SUBJ_ID, 'connectivity', 'weights.txt'), weights, fmt='%d')
np.savetxt(os.path.join(PRD, SUBJ_ID, 'connectivity', 'tract_lengths.txt'), tract_lengths, fmt='%.3f')
# name of the centers
list_name = np.loadtxt(open(os.path.join('share', 'reference_table_' + PARCEL + ".csv"), "r"), delimiter=",", skiprows=1, usecols=(1, ), dtype='str')
# compute centers
centers = compute_region_center_cortex(verts, region_mapping, list_name)
# calculate average orientations
number_of_vertices = int(verts.shape[0])
number_of_triangles = int(tri.shape[0])
vertex_triangles = compute_vertex_triangles(number_of_vertices, number_of_triangles,
tri)
triangle_normals = compute_triangle_normals(tri, verts)
triangle_angles = compute_triangle_angles(verts, number_of_triangles, tri)
vertex_normals = compute_vertex_normals(number_of_vertices, vertex_triangles,
tri, triangle_angles,
triangle_normals, verts)
orientations = compute_region_orientation_cortex(vertex_normals, region_mapping, list_name)
# compute areas
triangle_areas = compute_triangle_areas(verts, tri)
areas = compute_region_areas_cortex(triangle_areas, vertex_triangles, region_mapping, list_name)
# subcorticals
corr_table = np.loadtxt(open(os.path.join('share', 'reference_table_' + PARCEL + ".csv"), "r"), delimiter=",", skiprows=1, usecols=(0, 5))
for val in ['16', '08', '10', '11', '12', '13', '17', '18', '26', '47', '49',
'50', '51', '52', '53', '54', '58']:
verts = np.loadtxt(os.path.join(PRD, 'surface', 'subcortical',
'aseg_0'+str(val)+'_vert.txt'))
tri = np.loadtxt(os.path.join(PRD, 'surface', 'subcortical',
'aseg_0'+str(val)+'_tri.txt'))
tri = tri.astype(int)
curr_center = np.mean(verts, axis=0)
indx = int(corr_table[np.nonzero(corr_table[:, 0] == np.int(val)), 1])
centers[indx, :] = curr_center
# Now calculate average orientations
number_of_vertices = int(verts.shape[0])
number_of_triangles = int(tri.shape[0])
vertex_triangles = compute_vertex_triangles(number_of_vertices, number_of_triangles,
tri)
triangle_normals = compute_triangle_normals(tri, verts)
triangle_angles = compute_triangle_angles(verts, number_of_triangles, tri)
vertex_normals = compute_vertex_normals(number_of_vertices, vertex_triangles,
tri, triangle_angles,
triangle_normals, verts)
average_orientation = compute_region_orientation(vertex_normals)
orientations[indx, :] = average_orientation
triangle_areas = compute_triangle_areas(verts, tri)
region_areas = compute_region_areas(triangle_areas, vertex_triangles)
areas[indx] = region_areas
# save orientations and areas
np.savetxt(os.path.join(PRD, SUBJ_ID, 'connectivity/areas.txt'), areas, fmt='%.2f')
np.savetxt(os.path.join(PRD, SUBJ_ID, 'connectivity/average_orientations.txt'),
orientations, fmt='%.2f %.2f %.2f')
f = open(os.path.join(PRD, SUBJ_ID, 'connectivity/centres.txt'), 'w')
for i, name in enumerate(list_name):
f.write(str(name) +' ')
for j in range(3):
f.write('{:.4f} '.format(centers[i, j]))
f.write('\n')
f.close()
# save cortical
ref_table = np.loadtxt(open(os.path.join('share', 'reference_table_' + PARCEL + ".csv"), "rb"), delimiter=",", skiprows=1, usecols=(7,))
np.savetxt(os.path.join(PRD, SUBJ_ID, 'connectivity/cortical.txt'), ref_table, fmt='%d')
```
#### File: scripts/util/region_mapping.py
```python
import numpy as np
import os
import os.path as op
import sys
rl = sys.argv[1]
PRD = os.environ['PRD']
FS = os.environ['FS']
SUBJ_ID = os.environ['SUBJ_ID']
PARCEL = os.environ['PARCEL']
#TODO: use nibabel read_annot function instead
def read_annot(fname):
"""Read a Freesurfer annotation from a .annot file.
Note : Copied from nibabel
Parameters
----------
fname : str
Path to annotation file
Returns
-------
annot : numpy array, shape=(n_verts)
Annotation id at each vertex
ctab : numpy array, shape=(n_entries, 5)
RGBA + label id colortable array
names : list of str
List of region names as stored in the annot file
"""
if not op.isfile(fname):
dir_name = op.split(fname)[0]
if not op.isdir(dir_name):
raise IOError('Directory for annotation does not exist: %s',
fname)
cands = os.listdir(dir_name)
cands = [c for c in cands if '.annot' in c]
if len(cands) == 0:
raise IOError('No such file %s, no candidate parcellations '
'found in directory' % fname)
else:
raise IOError('No such file %s, candidate parcellations in '
'that directory: %s' % (fname, ', '.join(cands)))
with open(fname, "rb") as fid:
n_verts = np.fromfile(fid, '>i4', 1)[0]
data = np.fromfile(fid, '>i4', n_verts * 2).reshape(n_verts, 2)
annot = data[data[:, 0], 1]
ctab_exists = np.fromfile(fid, '>i4', 1)[0]
if not ctab_exists:
raise Exception('Color table not found in annotation file')
n_entries = np.fromfile(fid, '>i4', 1)[0]
if n_entries > 0:
length = np.fromfile(fid, '>i4', 1)[0]
orig_tab = np.fromfile(fid, '>c', length)
orig_tab = orig_tab[:-1]
names = list()
ctab = np.zeros((n_entries, 5), np.int)
for i in range(n_entries):
name_length = np.fromfile(fid, '>i4', 1)[0]
name = np.fromfile(fid, "|S%d" % name_length, 1)[0]
names.append(name)
ctab[i, :4] = np.fromfile(fid, '>i4', 4)
ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) +
ctab[i, 2] * (2 ** 16) +
ctab[i, 3] * (2 ** 24))
else:
ctab_version = -n_entries
if ctab_version != 2:
raise Exception('Color table version not supported')
n_entries = np.fromfile(fid, '>i4', 1)[0]
ctab = np.zeros((n_entries, 5), np.int)
length = np.fromfile(fid, '>i4', 1)[0]
np.fromfile(fid, "|S%d" % length, 1) # Orig table path
entries_to_read = np.fromfile(fid, '>i4', 1)[0]
names = list()
for i in range(entries_to_read):
np.fromfile(fid, '>i4', 1) # Structure
name_length = np.fromfile(fid, '>i4', 1)[0]
name = np.fromfile(fid, "|S%d" % name_length, 1)[0]
names.append(name)
ctab[i, :4] = np.fromfile(fid, '>i4', 4)
ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) +
ctab[i, 2] * (2 ** 16))
# convert to more common alpha value
ctab[:, 3] = 255 - ctab[:, 3]
return annot, ctab, names
if PARCEL=='desikan':
L, _, _ = read_annot(os.path.join(FS, SUBJ_ID, 'label', rl + '.aparc.annot'))
elif PARCEL=='destrieux':
L, _, _ = read_annot(os.path.join(FS, SUBJ_ID, 'label', rl + '.aparc.a2009s.annot'))
elif PARCEL=='HCP-MMP':
raise NotImplementedError #TODO volumetric parcellation script
L, _, _ = read_annot(os.path.join('share', rl + '.HCP-MMP1.annot'))
elif PARCEL=='Yeo-7nets':
raise NotImplementedError #TODO volumetric parcellation script
L, _, _ = read_annot(os.path.join('share', rl + '.Yeo_7nets.annot'))
elif PARCEL=='Yeo-17nets':
raise NotImplementedError #TODO volumetric parcellation script
L, _, _ = read_annot(os.path.join('share', rl + '.Yeo_17nets.annot'))
# using the ref table instead of the annot to reorder the region indices as we want for the region mapping
ref_table = np.loadtxt(open(os.path.join('share', 'reference_table_' + PARCEL + ".csv"), "rb"), delimiter=",", skiprows=1, usecols=(5,6))
vl = np.loadtxt(os.path.join(PRD, 'surface', rl + '_vertices_low.txt')) # vertices low
vh = np.loadtxt(os.path.join(PRD, 'surface', rl + '_vertices_high.txt')) # vertices high
reg_map = []
for vli in vl:
pos = np.argmin(np.sum(np.abs(vh - vli), 1))
if rl == 'lh': # colors are the same for left and right hemispheres
find_tab = np.nonzero(ref_table[:, 1] == L[pos])[0][0]
elif rl=='rh':
find_tab = np.nonzero(ref_table[:, 1] == L[pos])[0][-1]
reg_map.append(ref_table[find_tab, 0])
np.savetxt(os.path.join(PRD, 'surface', rl + '_region_mapping_low_not_corrected.txt'), reg_map)
``` |
{
"source": "JJTech0130/pypyjs-pwa",
"score": 2
} |
#### File: modules/ctypes_config_cache/dumpcache.py
```python
import sys, os
from ctypes_configure import dumpcache
def dumpcache2(basename, config):
size = 32 if sys.maxint <= 2**32 else 64
filename = '_%s_%s_.py' % (basename, size)
dumpcache.dumpcache(__file__, filename, config)
#
filename = os.path.join(os.path.dirname(__file__),
'_%s_cache.py' % (basename,))
g = open(filename, 'w')
print >> g, '''\
import sys
_size = 32 if sys.maxint <= 2**32 else 64
# XXX relative import, should be removed together with
# XXX the relative imports done e.g. by lib_pypy/pypy_test/test_hashlib
_mod = __import__("_%s_%%s_" %% (_size,),
globals(), locals(), ["*"])
globals().update(_mod.__dict__)\
''' % (basename,)
g.close()
``` |
{
"source": "jjtechuy/lumino",
"score": 2
} |
#### File: network/proxies/token.py
```python
import structlog
from eth_utils import is_binary_address, to_checksum_address, to_normalized_address
from raiden.constants import GAS_LIMIT_FOR_TOKEN_CONTRACT_CALL
from raiden.exceptions import RaidenUnrecoverableError, TransactionThrew
from raiden.network.rpc.client import check_address_has_code
from raiden.network.rpc.smartcontract_proxy import ContractProxy
from raiden.network.rpc.transactions import check_transaction_threw
from raiden.utils import pex, safe_gas_limit
from raiden.utils.typing import Address, BlockSpecification, TokenAmount
from raiden_contracts.constants import CONTRACT_HUMAN_STANDARD_TOKEN
from raiden_contracts.contract_manager import ContractManager
log = structlog.get_logger(__name__) # pylint: disable=invalid-name
# Determined by safe_gas_limit(estimateGas(approve)) on 17/01/19 with geth 1.8.20
GAS_REQUIRED_FOR_APPROVE = 58792
class Token:
def __init__(
self,
jsonrpc_client,
token_address,
contract_manager: ContractManager,
):
contract = jsonrpc_client.new_contract(
contract_manager.get_contract_abi(CONTRACT_HUMAN_STANDARD_TOKEN),
to_normalized_address(token_address),
)
proxy = ContractProxy(jsonrpc_client, contract)
if not is_binary_address(token_address):
raise ValueError('token_address must be a valid address')
check_address_has_code(jsonrpc_client, token_address, 'Token')
self.address = token_address
self.client = jsonrpc_client
self.node_address = jsonrpc_client.address
self.proxy = proxy
def allowance(self, owner, spender, block_identifier):
return self.proxy.contract.functions.allowance(
to_checksum_address(owner),
to_checksum_address(spender),
).call(block_identifier=block_identifier)
def approve(self, allowed_address: Address, allowance: TokenAmount):
""" Aprove `allowed_address` to transfer up to `deposit` amount of token.
Note:
For channel deposit please use the channel proxy, since it does
additional validations.
"""
log_details = {
'node': pex(self.node_address),
'contract': pex(self.address),
'allowed_address': pex(allowed_address),
'allowance': allowance,
}
checking_block = self.client.get_checking_block()
error_prefix = 'Call to approve will fail'
gas_limit = self.proxy.estimate_gas(
checking_block,
'approve',
to_checksum_address(allowed_address),
allowance,
)
if gas_limit:
error_prefix = 'Call to approve failed'
log.debug('approve called', **log_details)
transaction_hash = self.proxy.transact(
'approve',
safe_gas_limit(gas_limit),
to_checksum_address(allowed_address),
allowance,
)
self.client.poll(transaction_hash)
receipt_or_none = check_transaction_threw(self.client, transaction_hash)
transaction_executed = gas_limit is not None
if not transaction_executed or receipt_or_none:
if transaction_executed:
block = receipt_or_none['blockNumber']
else:
block = checking_block
self.proxy.jsonrpc_client.check_for_insufficient_eth(
transaction_name='approve',
transaction_executed=transaction_executed,
required_gas=GAS_REQUIRED_FOR_APPROVE,
block_identifier=block,
)
msg = self._check_why_approved_failed(allowance, block)
error_msg = f'{error_prefix}. {msg}'
log.critical(error_msg, **log_details)
raise RaidenUnrecoverableError(error_msg)
log.info('approve successful', **log_details)
def _check_why_approved_failed(
self,
allowance: TokenAmount,
block_identifier: BlockSpecification,
) -> str:
user_balance = self.balance_of(
address=self.client.address,
block_identifier=block_identifier,
)
# If the balance is zero, either the smart contract doesnt have a
# balanceOf function or the actual balance is zero
if user_balance == 0:
msg = (
"Approve failed. \n"
"Your account balance is 0 (zero), either the smart "
"contract is not a valid ERC20 token or you don't have funds "
"to use for openning a channel. "
)
# The approve call failed, check the user has enough balance
# (assuming the token smart contract may check for the maximum
# allowance, which is not necessarily the case)
elif user_balance < allowance:
msg = (
f'Approve failed. \n'
f'Your account balance is {user_balance}. '
f'The requested allowance is {allowance}. '
f'The smart contract may be rejecting your request due to the '
f'lack of balance.'
)
# If the user has enough balance, warn the user the smart contract
# may not have the approve function.
else:
msg = (
f'Approve failed. \n'
f'Your account balance is {user_balance}. Nevertheless the call to'
f'approve failed. Please make sure the corresponding smart '
f'contract is a valid ERC20 token.'
).format(user_balance)
return msg
def balance_of(self, address, block_identifier='latest'):
""" Return the balance of `address`. """
return self.proxy.contract.functions.balanceOf(
to_checksum_address(address),
).call(block_identifier=block_identifier)
def transfer(self, to_address, amount):
log_details = {
'node': pex(self.node_address),
'contract': pex(self.address),
'to_address': pex(to_address),
'amount': amount,
}
log.debug('transfer called', **log_details)
startgas = GAS_LIMIT_FOR_TOKEN_CONTRACT_CALL
transaction_hash = self.proxy.transact(
'transfer',
safe_gas_limit(startgas),
to_checksum_address(to_address),
amount,
)
self.client.poll(transaction_hash)
receipt_or_none = check_transaction_threw(self.client, transaction_hash)
if receipt_or_none:
log.critical('transfer failed', **log_details)
raise TransactionThrew('Transfer', receipt_or_none)
# TODO: check Transfer event (issue: #2598)
log.info('transfer successful', **log_details)
```
#### File: lumino/raiden/routing.py
```python
from heapq import heappop, heappush
from typing import Any, Dict, List, Tuple
import networkx
import requests
import structlog
from eth_utils import to_canonical_address, to_checksum_address
from raiden.constants import DEFAULT_HTTP_REQUEST_TIMEOUT
from raiden.transfer import channel, views
from raiden.transfer.state import (
CHANNEL_STATE_OPENED,
NODE_NETWORK_REACHABLE,
NODE_NETWORK_UNKNOWN,
ChainState,
NettingChannelState,
RouteState,
)
from raiden.utils import pex, typing
log = structlog.get_logger(__name__) # pylint: disable=invalid-name
def check_channel_constraints(
channel_state: NettingChannelState,
from_address: typing.InitiatorAddress,
partner_address: typing.Address,
amount: int,
network_statuses: Dict[typing.Address, str],
routing_module: str,
) -> bool:
# check channel state
if channel.get_status(channel_state) != CHANNEL_STATE_OPENED:
log.info(
'Channel is not opened, ignoring',
from_address=pex(from_address),
partner_address=pex(partner_address),
routing_source=routing_module,
)
return False
# check channel distributable
distributable = channel.get_distributable(
channel_state.our_state,
channel_state.partner_state,
)
if amount > distributable:
log.info(
'Channel doesnt have enough funds, ignoring',
from_address=pex(from_address),
partner_address=pex(partner_address),
amount=amount,
distributable=distributable,
routing_source=routing_module,
)
return False
# check channel partner reachability
network_state = network_statuses.get(partner_address, NODE_NETWORK_UNKNOWN)
if network_state != NODE_NETWORK_REACHABLE:
log.info(
'Partner for channel isn\'t reachable, ignoring',
from_address=pex(from_address),
partner_address=pex(partner_address),
status=network_state,
routing_source=routing_module,
)
return False
return True
def get_best_routes(
chain_state: ChainState,
token_network_id: typing.TokenNetworkID,
from_address: typing.InitiatorAddress,
to_address: typing.TargetAddress,
amount: int,
previous_address: typing.Optional[typing.Address],
config: Dict[str, Any],
) -> List[RouteState]:
services_config = config.get('services', None)
if services_config and services_config['pathfinding_service_address'] is not None:
pfs_answer_ok, pfs_routes = get_best_routes_pfs(
chain_state=chain_state,
token_network_id=token_network_id,
from_address=from_address,
to_address=to_address,
amount=amount,
previous_address=previous_address,
config=services_config,
)
if pfs_answer_ok:
return pfs_routes
else:
log.warning(
'Request to Pathfinding Service was not successful, '
'falling back to internal routing.',
)
return get_best_routes_internal(
chain_state=chain_state,
token_network_id=token_network_id,
from_address=from_address,
to_address=to_address,
amount=amount,
previous_address=previous_address,
)
def get_best_routes_internal(
chain_state: ChainState,
token_network_id: typing.TokenNetworkID,
from_address: typing.InitiatorAddress,
to_address: typing.TargetAddress,
amount: int,
previous_address: typing.Optional[typing.Address],
) -> List[RouteState]:
""" Returns a list of channels that can be used to make a transfer.
This will filter out channels that are not open and don't have enough
capacity.
"""
# TODO: Route ranking.
# Rate each route to optimize the fee price/quality of each route and add a
# rate from in the range [0.0,1.0].
available_routes = list()
token_network = views.get_token_network_by_identifier(
chain_state,
token_network_id,
)
network_statuses = views.get_networkstatuses(chain_state)
neighbors_heap = list()
try:
all_neighbors = networkx.all_neighbors(token_network.network_graph.network, from_address)
except networkx.NetworkXError:
# If `our_address` is not in the graph, no channels opened with the
# address
return list()
for partner_address in all_neighbors:
# don't send the message backwards
if partner_address == previous_address:
continue
channel_state = views.get_channelstate_by_token_network_and_partner(
chain_state,
token_network_id,
partner_address,
)
channel_constraints_fulfilled = check_channel_constraints(
channel_state=channel_state,
from_address=from_address,
partner_address=partner_address,
amount=amount,
network_statuses=network_statuses,
routing_module='Internal Routing',
)
if not channel_constraints_fulfilled:
continue
nonrefundable = amount > channel.get_distributable(
channel_state.partner_state,
channel_state.our_state,
)
try:
length = networkx.shortest_path_length(
token_network.network_graph.network,
partner_address,
to_address,
)
heappush(
neighbors_heap,
(length, nonrefundable, partner_address, channel_state.identifier),
)
except (networkx.NetworkXNoPath, networkx.NodeNotFound):
pass
if not neighbors_heap:
log.warning(
'No routes available',
from_address=pex(from_address),
to_address=pex(to_address),
)
return list()
while neighbors_heap:
*_, partner_address, channel_state_id = heappop(neighbors_heap)
route_state = RouteState(partner_address, channel_state_id)
available_routes.append(route_state)
return available_routes
def get_best_routes_pfs(
chain_state: ChainState,
token_network_id: typing.TokenNetworkID,
from_address: typing.InitiatorAddress,
to_address: typing.TargetAddress,
amount: int,
previous_address: typing.Optional[typing.Address],
config: Dict[str, Any],
) -> Tuple[bool, List[RouteState]]:
pfs_path = '{}/api/v1/{}/paths'.format(
config['pathfinding_service_address'],
to_checksum_address(token_network_id),
)
payload = {
'from': to_checksum_address(from_address),
'to': to_checksum_address(to_address),
'value': amount,
'max_paths': config['pathfinding_max_paths'],
}
# check that the response is successful
try:
response = requests.get(pfs_path, params=payload, timeout=DEFAULT_HTTP_REQUEST_TIMEOUT)
except requests.RequestException:
log.warning(
'Could not connect to Pathfinding Service',
request=pfs_path,
parameters=payload,
exc_info=True,
)
return False, []
# check that the response contains valid json
try:
response_json = response.json()
except ValueError:
log.warning(
'Pathfinding Service returned invalid JSON',
response_text=response.text,
exc_info=True,
)
return False, []
if response.status_code != 200:
log_info = {
'error_code': response.status_code,
}
error = response_json.get('errors')
if error is not None:
log_info['pfs_error'] = error
log.info(
'Pathfinding Service returned error code',
**log_info,
)
return False, []
if response_json.get('result') is None:
log.info(
'Pathfinding Service returned unexpected result',
result=response_json,
)
return False, []
paths = []
network_statuses = views.get_networkstatuses(chain_state)
for path_object in response_json['result']:
path = path_object['path']
# get the second entry, as the first one is the node itself
# also needs to be converted to canonical representation
partner_address = to_canonical_address(path[1])
# don't route back
if partner_address == previous_address:
continue
channel_state = views.get_channelstate_by_token_network_and_partner(
chain_state=chain_state,
token_network_id=token_network_id,
partner_address=partner_address,
)
channel_constraints_fulfilled = check_channel_constraints(
channel_state=channel_state,
from_address=from_address,
partner_address=partner_address,
amount=amount,
network_statuses=network_statuses,
routing_module='Pathfinding Service',
)
if not channel_constraints_fulfilled:
continue
paths.append(RouteState(
node_address=partner_address,
channel_identifier=channel_state.identifier,
))
return True, paths
```
#### File: integration/contracts/test_token.py
```python
from eth_utils import to_canonical_address, to_checksum_address
from raiden.network.proxies import Token
from raiden.network.rpc.client import JSONRPCClient
from raiden.utils import privatekey_to_address
def test_token(
deploy_client,
token_proxy,
private_keys,
web3,
contract_manager,
):
privkey = private_keys[1]
address = privatekey_to_address(privkey)
address = to_canonical_address(address)
other_client = JSONRPCClient(web3, privkey)
other_token_proxy = Token(
jsonrpc_client=other_client,
token_address=to_canonical_address(token_proxy.proxy.contract.address),
contract_manager=contract_manager,
)
# send some funds from deployer to generated address
transfer_funds = 100
token_proxy.transfer(address, transfer_funds)
assert transfer_funds == token_proxy.balance_of(address)
allow_funds = 100
token_proxy.approve(address, allow_funds)
assert allow_funds == token_proxy.proxy.contract.functions.allowance(
to_checksum_address(deploy_client.address),
to_checksum_address(address),
).call(block_identifier='latest')
other_token_proxy.transfer(deploy_client.address, transfer_funds)
assert token_proxy.balance_of(address) == 0
```
#### File: tests/unit/test_upgrade.py
```python
import random
from datetime import datetime
from pathlib import Path
from unittest.mock import patch
from raiden.storage.serialize import JSONSerializer
from raiden.storage.sqlite import SQLiteStorage
from raiden.tests.utils import factories
from raiden.transfer.state_change import ActionInitChain
from raiden.utils.upgrades import UpgradeManager
def setup_storage(db_path):
storage = SQLiteStorage(str(db_path), JSONSerializer())
storage.write_state_change(
ActionInitChain(
pseudo_random_generator=random.Random(),
block_number=1,
our_address=factories.make_address(),
chain_id=1,
),
datetime.utcnow().isoformat(timespec='milliseconds'),
)
return storage
def test_upgrade_manager_restores_backup(tmp_path):
db_path = tmp_path / Path('v17_log.db')
upgrade_manager = UpgradeManager(db_filename=db_path)
old_db_filename = tmp_path / Path('v16_log.db')
storage = None
with patch('raiden.utils.upgrades.older_db_file') as older_db_file:
older_db_file.return_value = str(old_db_filename)
storage = setup_storage(old_db_filename)
with patch('raiden.storage.sqlite.RAIDEN_DB_VERSION', new=16):
storage.update_version()
upgrade_manager.run()
# Once restored, the state changes written above should be
# in the restored database
storage = SQLiteStorage(str(db_path), JSONSerializer())
state_change_record = storage.get_latest_state_change_by_data_field(
{'_type': 'raiden.transfer.state_change.ActionInitChain'},
)
assert state_change_record.data is not None
assert not old_db_filename.exists()
assert Path(str(old_db_filename).replace('_log.db', '_log.backup')).exists()
```
#### File: raiden/transfer/architecture.py
```python
from copy import deepcopy
from raiden.transfer.queue_identifier import QueueIdentifier
from raiden.utils.typing import (
Address,
BlockExpiration,
BlockNumber,
ChannelID,
List,
MessageID,
Optional,
T_BlockNumber,
T_ChannelID,
TransactionHash,
)
# Quick overview
# --------------
#
# Goals:
# - Reliable failure recovery.
#
# Approach:
# - Use a write-ahead-log for state changes. Under a node restart the
# latest state snapshot can be recovered and the pending state changes
# reaplied.
#
# Requirements:
# - The function call `state_transition(curr_state, state_change)` must be
# deterministic, the recovery depends on the re-execution of the state changes
# from the WAL and must produce the same result.
# - StateChange must be idempotent because the partner node might be recovering
# from a failure and a Event might be produced more than once.
#
# Requirements that are enforced:
# - A state_transition function must not produce a result that must be further
# processed, i.e. the state change must be self contained and the result state
# tree must be serializable to produce a snapshot. To enforce this inputs and
# outputs are separated under different class hierarquies (StateChange and Event).
class State:
""" An isolated state, modified by StateChange messages.
Notes:
- Don't duplicate the same state data in two different States, instead use
identifiers.
- State objects may be nested.
- State classes don't have logic by design.
- Each iteration must operate on fresh copy of the state, treating the old
objects as immutable.
- This class is used as a marker for states.
"""
__slots__ = ()
class StateChange:
""" Declare the transition to be applied in a state object.
StateChanges are incoming events that change this node state (eg. a
blockchain event, a new packet, an error). It is not used for the node to
communicate with the outer world.
Nomenclature convention:
- 'Receive' prefix for protocol messages.
- 'ContractReceive' prefix for smart contract logs.
- 'Action' prefix for other interactions.
Notes:
- These objects don't have logic by design.
- This class is used as a marker for state changes.
"""
__slots__ = ()
def to_dict(self):
return {
attr: value
for attr, value in self.__dict__.items()
if not attr.startswith('_')
}
class Event:
""" Events produced by the execution of a state change.
Nomenclature convention:
- 'Send' prefix for protocol messages.
- 'ContractSend' prefix for smart contract function calls.
- 'Event' for node events.
Notes:
- This class is used as a marker for events.
- These objects don't have logic by design.
- Separate events are preferred because there is a decoupling of what the
upper layer will use the events for.
"""
__slots__ = ()
class SendMessageEvent(Event):
""" Marker used for events which represent off-chain protocol messages tied
to a channel.
Messages are sent only once, delivery is guaranteed by the transport and
not by the state machine
"""
def __init__(
self,
recipient: Address,
channel_identifier: ChannelID,
message_identifier: MessageID,
):
# Note that here and only here channel identifier can also be 0 which stands
# for the identifier of no channel (i.e. the global queue)
if not isinstance(channel_identifier, T_ChannelID):
raise ValueError('channel identifier must be of type T_ChannelIdentifier')
self.recipient = recipient
self.queue_identifier = QueueIdentifier(
recipient=recipient,
channel_identifier=channel_identifier,
)
self.message_identifier = message_identifier
def __eq__(self, other):
return (
isinstance(other, SendMessageEvent) and
self.recipient == other.recipient and
self.queue_identifier == other.queue_identifier and
self.message_identifier == other.message_identifier
)
def __ne__(self, other):
return not self.__eq__(other)
class AuthenticatedSenderStateChange(StateChange):
""" Marker used for state changes for which the sender has been verified. """
def __init__(self, sender):
self.sender = sender
def __eq__(self, other):
return (
isinstance(other, AuthenticatedSenderStateChange) and
self.sender == other.sender
)
def __ne__(self, other):
return not self.__eq__(other)
class BalanceProofStateChange(AuthenticatedSenderStateChange):
""" Marker used for state changes which contain a balance proof. """
def __init__(self, balance_proof):
super().__init__(sender=balance_proof.sender)
self.balance_proof = balance_proof
def __eq__(self, other):
return (
isinstance(other, BalanceProofStateChange) and
super().__eq__(other) and
self.balance_proof == other.balance_proof
)
def __ne__(self, other):
return not self.__eq__(other)
class ContractSendEvent(Event):
""" Marker used for events which represent on-chain transactions. """
pass
class ContractSendExpirableEvent(ContractSendEvent):
""" Marker used for events which represent on-chain transactions which are
time dependent.
"""
def __init__(self, expiration: BlockExpiration):
self.expiration = expiration
def __eq__(self, other):
return (
isinstance(other, ContractSendExpirableEvent) and
self.expiration == other.expiration
)
def __ne__(self, other):
return not self.__eq__(other)
class ContractReceiveStateChange(StateChange):
""" Marker used for state changes which represent on-chain logs. """
def __init__(self, transaction_hash: TransactionHash, block_number: BlockNumber):
if not isinstance(block_number, T_BlockNumber):
raise ValueError('block_number must be of type block_number')
self.transaction_hash = transaction_hash
self.block_number = block_number
def __eq__(self, other):
return (
isinstance(other, ContractReceiveStateChange) and
self.transaction_hash == other.transaction_hash and
self.block_number == other.block_number
)
def __ne__(self, other):
return not self.__eq__(other)
class StateManager:
""" The mutable storage for the application state, this storage can do
state transitions by applying the StateChanges to the current State.
"""
__slots__ = (
'state_transition',
'current_state',
)
def __init__(self, state_transition, current_state):
""" Initialize the state manager.
Args:
state_transition: function that can apply a StateChange message.
current_state: current application state.
"""
if not callable(state_transition):
raise ValueError('state_transition must be a callable')
self.state_transition = state_transition
self.current_state = current_state
def dispatch(self, state_change: StateChange) -> List[Event]:
""" Apply the `state_change` in the current machine and return the
resulting events.
Args:
state_change: An object representation of a state
change.
Return:
A list of events produced by the state transition.
It's the upper layer's responsibility to decided how to handle
these events.
"""
assert isinstance(state_change, StateChange)
# the state objects must be treated as immutable, so make a copy of the
# current state and pass the copy to the state machine to be modified.
next_state = deepcopy(self.current_state)
# update the current state by applying the change
iteration = self.state_transition(
next_state,
state_change,
)
assert isinstance(iteration, TransitionResult)
self.current_state = iteration.new_state
events = iteration.events
assert isinstance(self.current_state, (State, type(None)))
assert all(isinstance(e, Event) for e in events)
return events
def __eq__(self, other):
return (
isinstance(other, StateManager) and
self.state_transition == other.state_transition and
self.current_state == other.current_state
)
def __ne__(self, other):
return not self.__eq__(other)
class TransitionResult:
""" Representes the result of applying a single state change.
When a task is completed the new_state is set to None, allowing the parent
task to cleanup after the child.
"""
__slots__ = (
'new_state',
'events',
)
def __init__(self, new_state: Optional[State], events: List[Event]):
self.new_state = new_state
self.events = events
def __eq__(self, other):
return (
isinstance(other, TransitionResult) and
self.new_state == other.new_state and
self.events == other.events
)
def __ne__(self, other):
return not self.__eq__(other)
```
#### File: raiden/transfer/queue_identifier.py
```python
from eth_utils import to_canonical_address, to_checksum_address
from raiden.utils import pex
from raiden.utils.typing import Address, Any, ChannelID, Dict
class QueueIdentifier:
def __init__(
self,
recipient: Address,
channel_identifier: ChannelID,
):
self.recipient = recipient
self.channel_identifier = channel_identifier
def __repr__(self):
return '<QueueIdentifier recipient:{} channel_identifier:{}>'.format(
pex(self.recipient),
self.channel_identifier,
)
def __eq__(self, other):
return (
isinstance(other, QueueIdentifier) and
self.recipient == other.recipient and
self.channel_identifier == other.channel_identifier
)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.recipient, self.channel_identifier))
def to_dict(self) -> Dict[str, Any]:
return {
'recipient': to_checksum_address(self.recipient),
'channel_identifier': self.channel_identifier,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> 'QueueIdentifier':
restored = cls(
recipient=to_canonical_address(data['recipient']),
channel_identifier=data['channel_identifier'],
)
return restored
``` |
{
"source": "JJtheNOOB/NLP",
"score": 4
} |
#### File: NLP/Latent Semantic Analysis/tokenizing.py
```python
import nltk
from nltk.stem import WordNetLemmatizer
#Setting up lemmetizer, transform words to their original form
word_lemmetizer = WordNetLemmatizer()
#Read in all the titles and save them into a list called titles
titles = [line.rstrip() for line in open('C:/Users/lianj/Desktop/Udemy/NLP_machine_learning_examples-master/nlp_class/all_book_titles.txt')]
#Read in stopwords and make them into a set
stopwords = set(w.rstrip() for w in open('C:/Users/lianj/Desktop/Udemy/NLP_machine_learning_examples-master/nlp_class/stopwords.txt'))
#Append additional stop words into the set
stopwords = stopwords.union({
'introduction', 'edition', 'series', 'application',
'approach', 'card', 'access', 'package', 'plus', 'etext',
'brief', 'vol', 'fundamental', 'guide', 'essential', 'printed',
'third', 'second', 'fourth', })
#Now start processing the words
def tokenizer(s):
s = s.lower() #Make everything in lower case
tokens = nltk.tokenize.word_tokenize(s) #split string into words
tokens = [t for t in tokens if len(t) > 2] #Remove short words that is less than 2 characters long (not going to be so usefyul)
tokens = [word_lemmetizer.lemmatize(t) for t in tokens] #Now we starts to lemmetize our tokens
tokens = [t for t in tokens if t not in stopwords] #Remove all the stop words
tokens = [t for t in tokens if not any(c.isdigit() for c in t)] #looping through all the tokens and its characters to remove the numbers
return tokens
word_index_map = {}
current_index = 0
all_tokens = []
error_count = 0
for title in titles:
try:
title = title.encode('ascii', 'ignore').decode('utf-8') # QA: this will throw exception if bad characters
tokens = tokenizer(title) #process the title
all_tokens.append(tokens) #Append the tokens from the titles into tokens list
for token in tokens:
if token not in word_index_map:
word_index_map[token] = current_index
current_index += 1
except Exception as e:
print(e)
print(title)
error_count += 1
print(index_word_map)
``` |
{
"source": "jjthomas/metaflow-tvm",
"score": 2
} |
#### File: nnvm/testing/rnntc.py
```python
from .. import symbol as sym
from . utils import create_workload
EMBED_SIZE = 1024
LENGTH = 20
def sru_node(x, c):
x1 = sym.dense(data=x, units=EMBED_SIZE)
x2 = sym.dense(data=x, units=EMBED_SIZE)
f = sym.sigmoid(data=x2)
x3 = sym.dense(data=x, units=EMBED_SIZE)
r = sym.sigmoid(data=x3)
outC = sym.elemwise_add(sym.elemwise_mul(f, c), sym.elemwise_mul(f, x1))
outH = sym.elemwise_add(sym.elemwise_mul(r, outC), sym.elemwise_mul(r, x))
return (outC, outH)
def get_symbol():
data = sym.Variable('data')
data = sym.flatten(data=data)
splits = sym.split(data, indices_or_sections=LENGTH, axis=1)
lastC = splits[0]
hs = []
for i in range(LENGTH):
lastC, h = sru_node(splits[i], lastC)
hs.append(h)
hs.append(lastC)
return sym.concatenate(*hs, axis=1)
def get_workload(batch_size, image_shape=(1, 1, EMBED_SIZE * LENGTH), dtype="float32"):
"""Get benchmark workload for a simple multilayer perceptron
Parameters
----------
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of claseses
image_shape : tuple, optional
The input image shape
dtype : str, optional
The data type
Returns
-------
net : nnvm.symbol
The computational graph
params : dict of str to NDArray
The parameters.
"""
net = get_symbol()
return create_workload(net, batch_size, image_shape, dtype)
``` |
{
"source": "jjthomas/mflowgen",
"score": 3
} |
#### File: mflowgen/components/Graph.py
```python
from __future__ import print_function
from .Edge import Edge
from .Step import Step
from ..utils import get_top_dir
class Graph( object ):
def __init__( s ):
s._edges_i = {}
s._edges_o = {}
s._steps = {}
#-----------------------------------------------------------------------
# API to help build the graph interactively
#-----------------------------------------------------------------------
# ADKs
def set_adk( s, adk, default=True ):
if default:
s.adk = Step( get_top_dir() + '/adks/' + adk, default=False )
else:
s.adk = Step( adk, default=False )
s.add_step( s.adk )
def get_adk_step( s ):
return s.adk
# Steps
def add_step( s, step ):
key = step.get_name()
assert key not in s._steps.keys(), \
'add_step -- Duplicate step!' \
'If this is intentional, first change the step name'
s._steps[ key ] = step
def get_step( s, step_name ):
return s._steps[ step_name ]
def all_steps( s ):
return s._steps.keys()
# Edges -- incoming and outgoing adjacency lists
def get_edges_i( s, step_name ):
try:
return s._edges_i[ step_name ]
except KeyError:
return []
def get_edges_o( s, step_name ):
try:
return s._edges_o[ step_name ]
except KeyError:
return []
# Quality-of-life utility function
def dangling_inputs( s ):
dangling = []
for step_name in s.all_steps():
incoming_edges = s.get_edges_i( step_name )
incoming_edge_f_names = [ e.get_dst()[1] for e in incoming_edges ]
inputs = s.get_step( step_name ).all_inputs()
if inputs:
for x in inputs:
if x not in incoming_edge_f_names:
dangling.append( ( step_name, x ) )
if dangling:
for step_name, f_name in dangling:
msg = 'Dangling input in step "{}": {}'
msg = msg.format( step_name, f_name )
print( msg )
else:
print( 'No dangling inputs in graph' )
#-----------------------------------------------------------------------
# Connect
#-----------------------------------------------------------------------
def connect( s, l_handle, r_handle ):
# Twizzle and figure out which side is the src and which is the dst
l_step_name, l_direction, l_handle_name = l_handle
r_step_name, r_direction, r_handle_name = r_handle
if l_direction == 'inputs':
assert r_direction == 'outputs', \
'connect -- Must connect an input to an output'
src_handle = r_handle
dst_handle = l_handle
elif r_direction == 'inputs':
assert l_direction == 'outputs', \
'connect -- Must connect an input to an output'
src_handle = l_handle
dst_handle = r_handle
else:
assert False, \
'connect -- Must connect an input to an output'
# Create an edge from src to dst
src_step_name, src_direction, src_f = src_handle
dst_step_name, dst_direction, dst_f = dst_handle
if dst_step_name not in s._edges_i.keys():
s._edges_i[ dst_step_name ] = []
if src_step_name not in s._edges_o.keys():
s._edges_o[ src_step_name ] = []
src = ( src_step_name, src_f )
dst = ( dst_step_name, dst_f )
e = Edge( src, dst )
# Add this edge to tracking
s._edges_i[ dst_step_name ].append( e )
s._edges_o[ src_step_name ].append( e )
def connect_by_name( s, src, dst ):
# Get the step (in case the user provided step names instead)
if type( src ) != Step:
src_step = s.get_step( src )
else:
src_step = src
src_step_name = src_step.get_name()
assert src_step_name in s.all_steps(), \
'connect_by_name -- ' \
'Step "{}" not found in graph'.format( src_step_name )
if type( dst ) != Step:
dst_step = s.get_step( dst )
else:
dst_step = dst
dst_step_name = dst_step.get_name()
assert dst_step_name in s.all_steps(), \
'connect_by_name -- ' \
'Step "{}" not found in graph'.format( dst_step_name )
# Find same-name matches between the src output and dst input
src_outputs = src_step.all_outputs()
dst_inputs = dst_step.all_inputs()
overlap = set( src_outputs ).intersection( set( dst_inputs ) )
# For all overlaps, connect src to dst
for name in overlap:
l_handle = src_step.o( name )
r_handle = dst_step.i( name )
s.connect( l_handle, r_handle )
#-----------------------------------------------------------------------
# Parameter system
#-----------------------------------------------------------------------
def update_params( s, params ):
for step_name in s.all_steps():
s.get_step( step_name ).update_params( params )
def expand_params( s ):
for step_name in s.all_steps():
s.get_step( step_name ).expand_params()
#-----------------------------------------------------------------------
# Design-space exploration
#-----------------------------------------------------------------------
# param_space
#
# Spins out new copies of the step across the parameter space.
#
# For example, for a graph like this:
#
# +-----+ +-----------+ +-----------+
# | foo | -> | bar | -> | baz |
# | | | ( p = 1 ) | | |
# +-----+ +-----------+ +-----------+
#
# this call:
#
# s.param_space( 'bar', 'p', [ 1, 2, 3 ] )
#
# will be transformed into a graph like this:
#
# +-----------+ +-----------+
# +-> | bar-p-1 | -> | baz-p-1 |
# | | ( p = 1 ) | | |
# | +-----------+ +-----------+
# +-----+ | +-----------+ +-----------+
# | foo | --> | bar-p-2 | -> | baz-p-2 |
# | | | | ( p = 2 ) | | |
# +-----+ | +-----------+ +-----------+
# | +-----------+ +-----------+
# +-> | bar-p-3 | -> | baz-p-3 |
# | ( p = 3 ) | | |
# +-----------+ +-----------+
#
# Returns a list of (parameterized) steps (i.e., 'bar-p-1', 'bar-p-2',
# and 'bar-p-3').
#
def param_space( s, step, param_name, param_space ):
# Get the step name (in case the user provided a step object instead)
if type( step ) != str:
step_name = step.get_name()
else:
step_name = step
step = s.get_step( step_name )
assert step_name in s.all_steps(), \
'param_space -- ' \
'Step "{}" not found in graph'.format( step_name )
# Remove the step and its incoming edges from the graph
del( s._steps[ step_name ] )
elist_i = s._param_space_helper_remove_incoming_edges( step_name )
# Now spin out new copies of the step across the parameter space
new_steps = []
for p in param_space:
p_step = step.clone()
p_step.set_param( param_name, p )
p_step.set_name( step_name + '-' + param_name + '-' + str(p) )
s.add_step( p_step )
for e in elist_i:
src_step_name, src_f = e.get_src()
dst_step_name, dst_f = e.get_dst()
src_step = s.get_step( src_step_name )
s.connect( src_step.o( src_f ), p_step.i( dst_f ) )
new_steps.append( p_step )
# Get the steps that directly depended on this step
dep_steps = s._param_space_helper_get_dependent_steps( step_name )
# For each dependent step, replicate and connect to the new steps
for dep_step in dep_steps:
s._param_space_helper( step = dep_step,
old_src = step,
new_srcs = new_steps,
param_name = param_name,
param_space = param_space )
return new_steps
def _param_space_helper( s, step, old_src, new_srcs, param_name,
param_space ):
step_name = step.get_name()
# Remove the step and its incoming edges from the graph
del( s._steps[ step_name ] )
elist_i = s._param_space_helper_remove_incoming_edges( step_name )
# Now spin out new copies of the step + attach them to new srcs
new_steps = []
for i, p in enumerate( param_space ):
p_step = step.clone()
p_step.set_name( step_name + '-' + param_name + '-' + str(p) )
s.add_step( p_step )
for e in elist_i:
src_step_name, src_f = e.get_src()
dst_step_name, dst_f = e.get_dst()
if src_step_name == old_src.get_name():
src_step = new_srcs[i]
else:
src_step = s.get_step( src_step_name )
s.connect( src_step.o( src_f ), p_step.i( dst_f ) )
new_steps.append( p_step )
# Get the steps that directly depended on this step
dep_steps = s._param_space_helper_get_dependent_steps( step_name )
# For each dependent step, replicate and connect to the new steps
for dep_step in dep_steps:
s._param_space_helper( step = dep_step,
old_src = step,
new_srcs = new_steps,
param_name = param_name,
param_space = param_space )
return new_steps
def _param_space_helper_remove_incoming_edges( s, step_name ):
try:
elist_i = s._edges_i[ step_name ]
del( s._edges_i[ step_name ] ) # Delete edges in incoming edge list
for e in elist_i: # Also delete these edges in outgoing edge lists
src_step_name, src_f = e.get_src()
src_elist_o = s._edges_o[src_step_name]
del( src_elist_o[ src_elist_o.index( e ) ] )
except KeyError:
elist_i = []
return elist_i
def _param_space_helper_get_dependent_steps( s, step_name ):
dep_steps = set()
try:
elist_o = s._edges_o[ step_name ]
except KeyError:
elist_o = []
for e in elist_o:
dst_step_name, dst_f = e.get_dst()
dep_steps.add( s.get_step( dst_step_name ) )
return dep_steps
#-----------------------------------------------------------------------
# Ninja helpers
#-----------------------------------------------------------------------
def escape_dollars( s ):
for step_name in s.all_steps():
s.get_step( step_name ).escape_dollars()
#-----------------------------------------------------------------------
# Drawing
#-----------------------------------------------------------------------
# plot
#
# Dumps a graphviz dot file
def plot( s, dot_title='', dot_f='graph.dot' ):
# Templates for generating graphviz dot statements
graph_template = \
'''\
digraph {{
label="{title}";
labelloc="t";
fontsize=60;
size="8.5;11";
ratio="fill";
margin=0;
pad=1;
rankdir="TB";
concentrate=true;
splines=polyline;
center=true;
nodesep=1.2;
ranksep=0.8;
{nodes}
{edges}
}}\
'''
node_template = \
'{dot_id} [ fontsize=24, width=2, penwidth=2, shape=Mrecord, ' + \
'label="{{ {i} | \\n{name}\\n\\n | {o} }}", color=black ];'
edge_template = \
'{src_dot_id}:{src_port_id}:s -> {dst_dot_id}:{dst_port_id}:n ' + \
'[ arrowsize=2, penwidth=2 ];'
# Helper function
def dot_format_fix( x ):
return x.replace( '-', '_' ).replace( '.', '_' )
# Loop over all steps and generate a graphviz node declaration
#
# Each step will become a graphviz "record" shape, which has a special
# label syntax that dot interprets to extract the ports.
#
# Basically, a label "{ <in1> in1_text | foobar | <out1> out1_text }"
# turns into a three-section node:
#
# - the input with dot ID "in1"
# - the name "foobar"
# - the output with dot ID "out1"
#
dot_nodes = []
for step_name in s.all_steps():
step = s.get_step( step_name )
port_str = '<{dot_port_id}> {label}'
i_port_strs = []
o_port_strs = []
for _input in sorted( step.all_inputs() ):
dot_port_id = dot_format_fix( 'i_' + _input )
i_port_strs.append( \
port_str.format( dot_port_id=dot_port_id, label=_input ) )
for _output in sorted( step.all_outputs() ):
dot_port_id = dot_format_fix( 'o_' + _output )
o_port_strs.append( \
port_str.format( dot_port_id=dot_port_id, label=_output ) )
node_cfg = {}
node_cfg['dot_id'] = dot_format_fix( step_name )
node_cfg['name'] = '\n' + step_name + '\n\n'
node_cfg['i'] = '{ ' + ' | '.join( i_port_strs ) + ' }'
node_cfg['o'] = '{ ' + ' | '.join( o_port_strs ) + ' }'
dot_nodes.append( node_template.format( **node_cfg ) )
# Loop over all edges and generate graphviz edge commands
#
# A command like "foo -> bar" will draw an edge from foo to bar.
#
dot_edges = []
for elist in s._edges_i.values():
for e in elist:
src_step_name, src_f = e.get_src()
dst_step_name, dst_f = e.get_dst()
e_cfg = {}
e_cfg['src_dot_id'] = dot_format_fix( src_step_name )
e_cfg['src_port_id'] = dot_format_fix( 'o_' + src_f )
e_cfg['dst_dot_id'] = dot_format_fix( dst_step_name )
e_cfg['dst_port_id'] = dot_format_fix( 'i_' + dst_f )
dot_edges.append( edge_template.format( **e_cfg ) )
# Write out the graphviz dot graph file
with open( dot_f, 'w' ) as fd:
graph_cfg = {}
graph_cfg['title'] = dot_title
graph_cfg['nodes'] = '\n'.join( dot_nodes )
graph_cfg['edges'] = '\n'.join( dot_edges )
fd.write( graph_template.format( **graph_cfg ) )
#-----------------------------------------------------------------------
# Graph traversal order
#-----------------------------------------------------------------------
def topological_sort( s ):
order = []
# Make a deep copy of the edges (destructive algorithm)
edges_deep_copy = {}
for step_name, elist in s._edges_i.items():
edges_deep_copy[ step_name ] = list(elist)
edges = edges_deep_copy
# Consider all steps in the graph
steps = set( s.all_steps() )
# Topological sort
while( steps ):
steps_with_deps = set( edges.keys() )
steps_without_deps = steps.difference( steps_with_deps )
order.extend( steps_without_deps )
steps = steps_with_deps
keys_to_delete = []
for step_name, elist in edges.items():
idx_to_delete = []
for i, e in enumerate( elist ):
if e.get_src()[0] in order:
idx_to_delete.append( i )
for i in reversed( idx_to_delete ):
del( elist[i] )
if elist == []:
keys_to_delete.append( step_name )
for k in keys_to_delete:
del( edges[k] )
return order
``` |
{
"source": "jjthrash/Fabricius",
"score": 2
} |
#### File: Fabricius/v0.1/sync.py
```python
import re
from typing import Dict, Tuple, List
from aqt import mw
from aqt.utils import showInfo, showText
from .roam.real import Client, InputError
from .tools.markdown2.lib.markdown2 import markdown
DEBUGWARNING = True
DEBUGVERBOSE = False
# Dict keys for config.json
CONFIG_API_KEY_K = "apiKey"
CONFIG_API_TOKEN_K = "apiToken"
CONFIG_GRAPH_NAME_K = "graphName"
CONFIG_API_URL_K = "roamAPIUrl"
CONFIG_CARD_K = "cards"
CONFIG_CARD_MODEL_K = "model"
CONFIG_CARD_DECK_K = "deck"
CONFIG_CARD_TAGS_K = "tagMap"
def debugInfo(s):
if DEBUGVERBOSE:
showInfo(s)
def debugWarning(s):
if DEBUGWARNING:
showInfo(s)
def showAndThrowErrors(errors: List[str]):
if len(errors) != 0:
errorStr = "; ".join(errors)
showInfo(errorStr)
raise Exception(errorStr)
class Syncer:
def __init__(self):
config = mw.addonManager.getConfig(__name__)
errors = []
for k in [
CONFIG_API_KEY_K,
CONFIG_API_TOKEN_K,
CONFIG_GRAPH_NAME_K,
CONFIG_API_URL_K,
CONFIG_CARD_K,
]:
if not k in config:
errors.append('did not find required key "{}" in config.json'.format(k))
showAndThrowErrors(errors)
for i, cardConfig in enumerate(config[CONFIG_CARD_K]):
for k in [CONFIG_CARD_MODEL_K, CONFIG_CARD_DECK_K, CONFIG_CARD_TAGS_K]:
if not k in cardConfig:
errors.append(
'did not find required key "{}" for card at index {} in config.json'.format(
k, i
)
)
showAndThrowErrors(errors)
self.errorLog = []
self.roamClient = Client(
config[CONFIG_GRAPH_NAME_K],
config[CONFIG_API_KEY_K],
config[CONFIG_API_TOKEN_K],
config[CONFIG_API_URL_K],
)
# idea is to build a single query that will get all relevant blocks?
# or is it possible that there will be too much data for a single api call?
# def buildQuery():
def sync(self):
config = mw.addonManager.getConfig(__name__)
for cardCfg in config[CONFIG_CARD_K]:
modelName = cardCfg[CONFIG_CARD_MODEL_K]
deckName = cardCfg[CONFIG_CARD_DECK_K]
deckID = mw.col.decks.id(deckName, create=True)
mw.col.decks.select(deckID)
model = mw.col.models.byName(modelName)
if not model:
showInfo(
'no such model "{}", please create it before proceeding. Sync stopped.'.format(
modelName
)
)
return
deck = mw.col.decks.get(deckID, default=False)
if not deck:
showInfo(
'no such deck "{}", please create it before proceeding. Sync stopped.'.format(
deck
)
)
return
deck["mid"] = model["id"]
for tag, field in cardCfg[CONFIG_CARD_TAGS_K].items():
# [(uid, text, timestamp)]
matchingBlocks = self.roamClient.queryForTag(tag)
for block in matchingBlocks:
self.createOrUpdateNote(
{field: (block.text, block.uid)}, block.modifiedTime, deckID
)
mw.col.decks.save(deck)
mw.col.save()
if len(self.errorLog) > 0:
showAndThrowErrors(self.errorLog)
def createOrUpdateNote(
self, res: Dict[str, Tuple[str, str]], blockModifiedTime: str, did: int
):
for textField, data in res.items():
text, uid = data
refField = refFieldFromTextField(textField)
queryByRef = "{}:{}".format(refField, uid)
ids = mw.col.find_notes(queryByRef)
if not ids:
debugInfo("card not found for query {}".format(queryByRef))
note = mw.col.newNote()
note[refField] = uid
note[textField] = convertToCloze(text)
mw.col.add_note(note, did)
else:
debugInfo("note found for query {} - {}".format(queryByRef, ids))
if len(ids) > 1:
showText(
'should never happen: more than 1 note found with block ref {}. Please search for the duplicate and delete it. You can use the query "{}". After deleting, run Fabricus Sync again.'.format(
ids,
queryByRef,
)
)
debugInfo("note ids found = {}".format(ids))
# update the card based on date
id = ids[0]
note = mw.col.getNote(id)
# Roam returns in msecs
noteModifiedTime = int(note.mod) * 1000
debugInfo(
"noteModifiedTime {}, blockModifiedTime {}, (noteModifiedTime>blockModifiedTime)? {}".format(
noteModifiedTime,
blockModifiedTime,
(noteModifiedTime > blockModifiedTime),
)
)
# Text is from Roam
# note[textField] is from Anki.
textInAnkiFormat = convertToCloze(text)
if note[textField] == textInAnkiFormat:
debugInfo("skipping this note/block since contents are the same")
continue
if noteModifiedTime > int(blockModifiedTime):
debugInfo(
"note modified later: changing block (({})) in roam with text {}".format(
uid, note[textField]
)
)
try:
self.roamClient.updateBlock(
uid, convertToRoamBlock(note[textField])
)
except InputError as e:
self.logError(e)
else:
debugWarning(
"block modified later: changing note {} in anki with text {}".format(
id, textInAnkiFormat
)
)
# change note
note[textField] = textInAnkiFormat
note.flush()
debugWarning(note.__repr__())
def logError(self, t: str):
self.errorLog.append(t)
def convertToCloze(s: str):
res = re.sub(r"{\s*c(\d*):([^}]*)}", r"{{c\g<1>::\g<2>}}", s)
res = basicMarkdownToHtml(res)
return res
def convertToRoamBlock(s: str):
res = re.sub(r"{{c(\d*)::([^}]*)}}", r"{c\g<1>:\g<2>}", s)
res = basicHtmlToMarkdown(res)
return res
# Markdown <-> HTML conversion using regex is hacky but the best we can do for now.
# 1 hour investigation with html->md and md->html libraries proved unsuccessful with too many edge cases.
# Main issue is that both fns need to be "inverses" otherwise cards will start to get mis-formatted.
# Issue: now there is no easy way to write these literals in flashcard blocks.
def basicHtmlToMarkdown(s: str):
s = s.replace("<b>", "**")
s = s.replace("</b>", "**")
s = s.replace("<i>", "__")
s = s.replace("</i>", "__")
s = s.replace(" ", " ")
return s
def basicMarkdownToHtml(s: str):
# ungreedy match
res = re.sub(r"\*\*(.*?)\*\*", r"<b>\g<1></b>", s)
# ungreedy match
res = re.sub(r"__(.*?)__", r"<i>\g<1></i>", res)
return res
def refFieldFromTextField(s):
return "{}UID".format(s)
``` |
{
"source": "JJToB/honeygrove",
"score": 2
} |
#### File: honeygrove/honeygrove/config.py
```python
from pathlib import PurePath
import pickle
# Utility methods to pickle some config parts
def load_object(path):
with open(str(path), 'rb') as f:
return pickle.load(f)
def save_object(obj, path):
with open(str(path), 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
# With this we get dot-notation for config subsections
class ConfigSection:
pass
# Honeygrove configuration
class Config:
# General configuration
general = ConfigSection()
general.id = "HG1"
general.description = {"Name": str(general.id), "Location": "Hamburg, Germany", "Description": "Honeygrove instance #1"}
general.address = "0.0.0.0"
general.hostname = "euve256525"
# Default maximum connections per host per service
general.max_connections_per_host = 100
# True = use UTC, False = use System Time
general.use_utc = True
# Set this to False if you do not want to use broker or broker is
# unavailable on your machine. Currently, the management-console
# and the EKStack can not be used without communication via Broker.
general.use_broker = False
# Set this to True if you want your output as parsable json format
# for example to forward with logstash
general.output_json = False
# Set this to False if you do not want to use geoip or no database
# is available on your machine.
general.use_geoip = False
# List of service names that should be enabled at startup
# (defaults to all implemented services if letf empty)
general.enabled_services = []
# Logfile and output configuration
logging = ConfigSection()
# Status: Includes INFO-, HEARTBEAT-, RESPONSE- and ERROR-messages
logging.print_status = True
logging.print_alerts = True
# Alerts: Includes LOGIN-, REQUEST-, FILE-, and SYN-messages
logging.log_status = True
logging.log_alerts = True
# Folder configuration
# All folder are relative to `folder.base`, so it is usually sufficient to only change this
folder = ConfigSection()
# Base path for resources and logs
folder.base = PurePath('/var/honeygrove')
# Resource related folders
folder.resources = folder.base / 'resources'
# Folder for emulated filesystem used by all services
folder.filesystem = folder.resources / 'filesystem' / 'unix.xml'
folder.honeytoken_files = folder.resources / 'honeytoken_files'
folder.quarantine = folder.resources / 'quarantine'
folder.tls = folder.resources / 'tls'
if general.use_geoip:
folder.geo_ip = folder.resources / 'geo_ip.db'
# Log folder (currently only a single file)
folder.log = folder.base / 'logs' / 'log.txt'
# Ports without specific service
listen = ConfigSection()
listen.name = "LISTEN"
listen.ports = [r for r in range(1, 5000)]
tcp_scan = ConfigSection()
tcp_scan.name = "TCP Scan Detector"
tcp_scan.ports = [r for r in range(1, 5000)]
# Timeframe in which ACK packets are expected to return
# (to distinguish between port scans and valid connection attempts)
tcp_scan.timeout = 5
# Services which are not bound to a single port
multiple_port_services = [listen.name, tcp_scan.name]
# HTTP service configuration
http = ConfigSection()
http.name = "HTTP"
http.port = 80
http.connections_per_host = general.max_connections_per_host
# Modify to simulate another server
http.response_headers = {'Last-Modified': "Sun, 07 Aug 2019 08:02:22 GMT",
'Cache-Control': "no-store, no-cache, must-revalidate, post-check=0, pre-check=0",
'Pragma': "no-cache",
'Content-Type': "text/html; charset=UTF-8"}
# To add your own HTML file you need to add it at httpSupportedSites
# and add it into the dictionary below with its index. A
# content page is optional. The first site is the login site, the
# second one is the content page. The html login file needs
# to have a login field with the name "log" and a password field
# with the name of "<PASSWORD>"
http.resource_folder = folder.resources / 'http'
http.html_dictionary_path = http.resource_folder / 'html_dictionary.pkl'
http.html_dictionary_content = load_object(http.html_dictionary_path)
def save_html_dictionary(self):
save_object(self.http.html_dictionary_content, self.http.html_dictionary_path)
# HTTPS service configuration
https = ConfigSection()
https.name = "HTTPS"
https.port = 443
https.connections_per_host = general.max_connections_per_host
# TLS configuration
https.tls_key = folder.tls / 'https.key'
https.tls_cert = folder.tls / 'https.crt'
# SSH service configuration
ssh = ConfigSection()
ssh.name = "SSH"
ssh.port = 22
ssh.connections_per_host = general.max_connections_per_host
# must start with "SSH-2.0-"
ssh.banner = b'SSH-2.0-' + general.hostname.encode()
ssh.resource_folder = folder.resources / 'ssh'
ssh.database_path = ssh.resource_folder / 'database.json'
ssh.helptext_folder = ssh.resource_folder / 'helptexts'
ssh.gnuhelp_folder = ssh.resource_folder / 'gnuhelp'
ssh.real_shell = False
ssh.accept_files = True
ssh.accept_keys = False
# Telnet service configuration
telnet = ConfigSection()
telnet.name = "Telnet"
telnet.port = 23
telnet.connections_per_host = general.max_connections_per_host
# Currently not implemented
telnet.real_shell = False
# FTP service configuration
ftp = ConfigSection()
ftp.name = "FTP"
ftp.port = 21
ftp.connections_per_host = general.max_connections_per_host
ftp.accept_files = True
# Email (POP3(S), SMTP(S), IMAP(S)) related configuration
email = ConfigSection()
email.resource_folder = folder.resources / 'email'
email.database_path = email.resource_folder / 'database.py'
# TLS configuration
email.tls_key = folder.tls / 'email.key'
email.tls_cert = folder.tls / 'email.crt'
# SMTP service configuration
smtp = ConfigSection()
smtp.name = "SMTP"
smtp.port = 25
smtp.connections_per_host = general.max_connections_per_host
# CRAM-MD5 and SCRAM-SHA-1 aren't yet implemented! (using them anyway crashes the connection)
smtp.authentication_methods = {"PLAIN": True, "LOGIN": True, "CRAM-MD5": False, "SCRAM-SHA-1": False}
# SMTPS (SMTP + TLS) service configuration
smtps = ConfigSection()
smtps.name = "SMTPS"
smtps.port = 587
smtps.connections_per_host = general.max_connections_per_host
# POP3 service configuration
pop3 = ConfigSection()
pop3.name = "POP3"
pop3.port = 110
pop3.connections_per_host = general.max_connections_per_host
# POP3S (POP3 + TLS) service configuration
pop3s = ConfigSection()
pop3s.name = "POP3S"
pop3s.port = 995
pop3s.connections_per_host = general.max_connections_per_host
# IMAP service configuration
imap = ConfigSection()
imap.name = "IMAP"
imap.port = 143
imap.connections_per_host = general.max_connections_per_host
# CRAM-MD5 and SCRAM-SHA-1 aren't yet implemented! (using them anyway crashes the connection)
imap.authentication_methods = smtp.authentication_methods
# IMAPS (IMAP + TLS) service configuration
imaps = ConfigSection()
imaps.name = "IMAPS"
imaps.port = 993
imaps.connections_per_host = general.max_connections_per_host
# Enable all known services if none are explicitly configured above
if not general.enabled_services:
general.enabled_services = [http.name, https.name, ssh.name, telnet.name, ftp.name, smtp.name,
smtps.name, pop3.name, pop3s.name, imap.name, imaps.name,
tcp_scan.name]
# HoneytokenDB configuration
honeytoken = ConfigSection()
honeytoken.database_file = folder.resources / 'honeytokendb' / 'database.txt'
honeytoken.generating = {"SSH": ["SSH", "FTP", "HTTP"], "HTTP": ["HTTP", "SSH"], "FTP": ["FTP"]}
honeytoken.probabilities = {"SSH": 0.5, "FTP": 0.1, "HTTP": 0.9, "Telnet": 0.8}
# True: password acceptance via hash, False: random acceptance
honeytoken.accept_via_hash = True
honeytoken.hash_seed = '__honeygrove__'
# username length limits
honeytoken.username_min = 6
honeytoken.username_max = 24
# password length limits
honeytoken.password_min = 6
honeytoken.password_max = 24
# Optional: Broker configuration
if (general.use_broker):
broker = ConfigSection()
# Optional: IP/port to listen on (e.g. for connections from the management console)
broker.listen = False
broker.listen_ip = '127.0.0.1'
broker.listen_port = 8888
# Optional: IP/port to peer to at startup (e.g. for connection to the CIM)
broker.peer = False
broker.peer_ip = '127.0.0.1'
broker.peer_port = 34445
# Optional: SSL Authentication
broker.ssl_ca_file = None # Path to CA file
broker.ssl_ca_path = None # Path to directory with CA files
broker.ssl_certificate = None # Own certificate
broker.ssl_key_file = None # Own key
```
#### File: honeygrove/core/ServiceController.py
```python
from honeygrove import log
from honeygrove.config import Config
from honeygrove.services import ServiceBaseModel
# from honeygrove.tests.testresources import serviceControllerTestPkg # Actually used
import threading
from twisted.internet import reactor
class ServiceController():
def __init__(self):
"""
Instantiates all subclasses of ServiceBaseModel and keeps track of them in a dict.
"""
threading.Thread(target=reactor.run, args=(False,)).start()
self.serviceList = []
for service in ServiceBaseModel.ServiceBaseModel.__subclasses__():
self.serviceList.append(service())
self.serviceDict = dict([(service._name, service) for service in self.serviceList])
self.listen = self.serviceDict[Config.listen.name]
self.runningServicesDict = dict([])
def startService(self, name):
"""
Starts the given service and adds it to threadDict
:param name: Name of the service (str)
"""
service = self.serviceDict[name]
address = service._address
if service._port:
address += ":{}".format(service._port)
log.info("{}: Starting on {}".format(name, address))
if name not in self.runningServicesDict:
if name not in Config.multiple_port_services:
self.listen.stopOnPort(service._port)
service.startService()
self.runningServicesDict[name] = service
return True
else:
return False
def stopService(self, name):
"""
Stops the given service and removes it from threadDict
:param name: Name of the service (str)
"""
log.info("Stop Service: " + name)
if name in self.runningServicesDict:
self.serviceDict[name].stopService()
self.runningServicesDict.pop(name)
if name not in Config.noPortSpecificService:
self.listen.startOnPort(self.serviceDict[name]._port)
return True
else:
return False
```
#### File: honeygrove/services/ServiceBaseModel.py
```python
from honeygrove import log
from honeygrove.config import Config
from abc import ABC, abstractmethod
from twisted.internet import reactor
from twisted.internet.protocol import Factory
from twisted.protocols.policies import WrappingFactory
class ServiceBaseModel(ABC):
def __init__(self):
"""
Initializeses some needed service parts.
Only add variables here if they are needet in all services.
"""
self._fService = Factory()
self._fService.clients = dict([])
# Only these variables should be changeable
self._name = None
self._address = Config.general.address
self._port = None
self._stop = True
# XXX: Not used currently?
self._status = None
self._limiter = None
self._transport = None
def startService(self):
"""
Starts the specific service
"""
self._stop = False
self._transport = reactor.listenTCP(self._port, self._limiter, interface=self._address)
def stopService(self):
"""
Stops the specific service
:return:
"""
self._stop = True
self._transport.stopListening()
def changePort(self, port):
"""
changes the port. If it is not possible the service will be terminatet.
Eg. port is already in use.
:param port: int
"""
self.stopService()
self._port = port
self.startService()
class Limiter(WrappingFactory):
# name = Name of the service
# max_conns = Maximum number of connections per host
def __init__(self, service, name, max_conns):
super(Limiter, self).__init__(service)
self._maxConnectionsPerPeer = max_conns
self._name = name
def startFactory(self):
self.peerConnections = {}
def buildProtocol(self, addr):
peerHost = addr.host
connectionCount = self.peerConnections.get(peerHost, 0)
if connectionCount >= self._maxConnectionsPerPeer:
log.limit_reached(self._name, peerHost)
return None
self.peerConnections[peerHost] = connectionCount + 1
return WrappingFactory.buildProtocol(self, addr)
# p = "protocol"?
def unregisterProtocol(self, p):
peerHost = p.getPeer().host
self.peerConnections[peerHost] -= 1
if self.peerConnections[peerHost] == 0:
del self.peerConnections[peerHost]
```
#### File: honeygrove/tests/FilesystemParser_Test.py
```python
import unittest
from honeygrove.core.FilesystemParser import FilesystemParser
from honeygrove.tests.testresources import __path__ as resources
from honeygrove.tests.testresources import testconfig as config
class FilesystemParserUnixTest(unittest.TestCase):
def setUp(self):
FilesystemParser.honeytoken_directory = config.tokendir
self.fp = FilesystemParser(resources._path[0] + '/test_unix.xml')
def test_get_absolute_path(self):
self.assertEqual("/home/root", self.fp.get_absolute_path("../../bin/../home/root"))
self.assertEqual("/bin", self.fp.get_absolute_path("/../home/../bin"))
def test_tree_contains(self):
self.assertTrue(self.fp.tree_contains("id_rsa.pub"))
self.assertFalse(self.fp.tree_contains("michgibtsnicht"))
def test_add_honeytoken_files(self):
self.assertTrue(self.fp.tree_contains("id_rsa"))
self.assertTrue(self.fp.tree_contains("id_rsa.pub"))
self.assertTrue(self.fp.tree_contains("suspicious_data.txt"))
def test_get_element(self):
self.assertEqual(self.fp.get_element([]).attrib['name'], "/")
def test_get_current_path(self):
self.fp.cd("/home/root")
self.assertEqual(self.fp.get_current_path(), "/home/root")
self.fp.cd("..")
self.assertEqual(self.fp.get_current_path(), "/home")
self.fp.cd("/")
self.assertEqual(self.fp.get_current_path(), "/")
def test_get_formatted_path(self):
self.fp.cd("/home/root")
self.assertEqual(self.fp.get_formatted_path(), "~")
self.fp.cd("/home")
self.assertFalse(self.fp.get_formatted_path() == "~")
def test_mkdir(self):
self.fp.mkdir("new_folder_01")
self.assertTrue(self.fp.tree_contains("new_folder_01"))
self.assertEqual(self.fp.ls().count("new_folder_01"), 1) # pruefen, dass nicht mehrfach erzeugt
self.fp.mkdir("~/new_folder_02")
self.assertTrue(self.fp.tree_contains("new_folder_02"))
self.fp.mkdir("../new_folder_03")
self.assertTrue(self.fp.tree_contains("new_folder_03"))
response = self.fp.mkdir("~/new_folder_02")
self.assertEqual(response, "mkdir: cannot create directory 'new_folder_02': File exists")
def test_touch(self):
self.fp.mkdir("new_file_01")
self.assertTrue(self.fp.tree_contains("new_file_01"))
self.assertEqual(self.fp.ls().count("new_file_01"), 1) # pruefen, dass nicht mehrfach erzeugt
self.fp.mkdir("~/new_file_02")
self.assertTrue(self.fp.tree_contains("new_file_02"))
self.fp.mkdir("../new_file_03")
self.assertTrue(self.fp.tree_contains("new_file_03"))
def test_ls(self):
self.assertEqual(self.fp.ls("/var"), "log\nmail\nspool\ntmp\n")
self.assertEqual(self.fp.ls("/var/log"), "")
self.fp.cd("~")
self.assertEqual(self.fp.ls(".ssh"), "id_rsa\nid_rsa.pub\n")
def test_change_dir(self):
path = self.fp.get_current_path() # alten Pfad merken
self.fp.cd("./..")
self.assertEqual(self.fp.get_current_path().split("/")[-1],
path.split("/")[-2]) # neuer Pfad = alter Pfad ohne letzten /
self.fp.cd("~")
path = self.fp.get_current_path() # alten Pfad merken
self.fp.cd("../.")
self.assertEqual(self.fp.get_current_path().split("/")[-1],
path.split("/")[-2]) # neuer Pfad = alter Pfad ohne letzten /
self.fp.cd("/")
self.assertEqual(self.fp.get_current_path(), "/")
self.fp.cd("~")
self.assertEqual(self.fp.get_formatted_path(), "~")
self.fp.cd("../..")
self.fp.cd("../../..")
self.assertEqual(self.fp.get_current_path(), "/")
path = "mich/gibtsnicht"
self.assertEqual(self.fp.cd(path), path + ": No such file or directory")
path = "~~"
self.assertEqual(self.fp.cd(path), path + ": No such file or directory")
def test_get_absoulte_path(self):
self.fp.cd("/home/root")
self.assertEqual(self.fp.get_absolute_path("~"), "/home/root")
self.assertEqual(self.fp.get_absolute_path("./."), "/home/root")
self.assertEqual(self.fp.get_absolute_path("./"), "/home/root")
self.assertEqual(self.fp.get_absolute_path("."), "/home/root")
self.assertEqual(self.fp.get_absolute_path("/"), "/")
self.assertEqual(self.fp.get_absolute_path("/home"), "/home")
self.assertEqual(self.fp.get_absolute_path("/home/../bin"), "/bin")
self.assertEqual(self.fp.get_absolute_path(""), "")
self.fp.cd("/")
self.assertEqual(self.fp.get_absolute_path("C:\\Benutzer"), "/C:\\Benutzer")
self.assertEqual(self.fp.get_absolute_path("/#wasistdaßfür1Verzeichnis,_vong_Name_her?\\\n"),
"/#wasistdaßfür1Verzeichnis,_vong_Name_her?\\\n")
self.assertEqual(self.fp.get_absolute_path("/PfadDarfMitSlashEnden/"), "/PfadDarfMitSlashEnden")
def test_valid_dir(self):
self.assertTrue(self.fp.valid_directory("/home/root"))
self.assertTrue(self.fp.valid_directory("/home/root/"))
self.assertTrue(self.fp.valid_directory("/"))
self.assertTrue(self.fp.valid_directory("~"))
self.assertTrue(self.fp.valid_directory(".."))
self.assertTrue(self.fp.valid_directory("./.."))
self.assertTrue(self.fp.valid_directory("../.."))
self.assertTrue(self.fp.valid_directory("."))
self.assertTrue(self.fp.valid_directory("./."))
self.assertTrue(self.fp.valid_directory("../."))
self.assertFalse(self.fp.valid_directory("..."))
def test_valid_file(self):
self.assertTrue(self.fp.valid_file("~/.ssh/id_rsa"))
self.assertTrue(self.fp.valid_file("~/.ssh/id_rsa.pub"))
self.assertFalse(self.fp.valid_file("michgibtsnicht!1!"))
def test_delete(self):
self.fp.cd("/")
self.fp.mkdir("testdir")
self.fp.cd("testdir")
self.fp.cd("..")
self.assertTrue("testdir" in self.fp.ls())
self.assertEqual(self.fp.ls().count("testdir"), 1)
self.fp.delete("testdir")
self.assertFalse("testdir" in self.fp.ls())
self.fp.touch("testfile")
self.assertTrue("testfile" in self.fp.ls())
self.assertEqual(self.fp.ls().count("testfile"), 1)
response = self.fp.delete(".")
self.assertEqual(response, "rm: refusing to remove '.' or '..' directory: skipping '.'")
response = self.fp.delete("..")
self.assertEqual(response, "rm: refusing to remove '.' or '..' directory: skipping '..'")
def test_rename(self):
self.fp.cd("/")
self.fp.touch("old_name")
self.fp.rename("old_name", "new_name")
self.assertFalse("old_name" in self.fp.ls())
self.assertTrue("new_name" in self.fp.ls())
self.assertEqual(self.fp.ls().count("new_name"), 1)
def test_move(self):
self.fp.cd("/")
self.fp.mkdir("testdir")
self.fp.touch("testfile")
response = self.fp.move("testfile", "testdir")
self.assertEqual(response, "Not possible")
self.fp.mkdir("testdir/testrecursive")
self.fp.move("testdir", "/bin/testdir")
self.assertFalse("testdir" in self.fp.ls())
self.assertTrue("testdir" in self.fp.ls("/bin"))
self.assertEqual(self.fp.ls("/bin").count("testdir"), 1)
self.assertTrue("testrecursive" in self.fp.ls("/bin/testdir"))
self.assertEqual(self.fp.ls("/bin/testdir").count("testrecursive"), 1)
def test_cat(self):
self.fp.cd("~")
self.assertTrue("-----BEGIN RSA PRIVATE KEY-----" in self.fp.cat(".ssh/id_rsa"))
self.assertFalse(self.fp.cat("~/suspicious_data.txt") == "")
class FilesystemParserWindowsTest(unittest.TestCase):
def setUp(self):
FilesystemParser.honeytoken_directory = config.tokendir
self.fp = FilesystemParser(resources._path[0] + '/test_dir_sys.xml')
def test_get_absolute_path(self):
self.assertEqual(self.fp.get_absolute_path("~"), "/Benutzer/TestUser")
def test_tree_contains(self):
self.assertTrue(self.fp.tree_contains("scan_01.jpg"))
self.assertTrue(self.fp.tree_contains("Firefox"))
self.assertTrue(self.fp.tree_contains("id_rsa"))
self.assertFalse(self.fp.tree_contains("michgibtsnicht"))
def test_add_honeytoken_files(self):
print(self.fp.ls())
self.assertTrue(self.fp.tree_contains("id_rsa"))
self.assertTrue(self.fp.tree_contains("suspicious_data.txt"))
def test_get_element(self):
self.assertEqual(self.fp.get_element([]).attrib['name'], "C:")
def test_get_current_path(self):
self.fp.cd("\Programme\Firefox")
self.assertEqual(self.fp.get_current_path(), "/Programme/Firefox")
self.fp.cd("..")
self.assertEqual(self.fp.get_current_path(), "/Programme")
self.fp.cd("\\")
self.assertEqual(self.fp.get_current_path(), "/")
def test_mkdir(self):
self.fp.mkdir("new_folder_01")
self.assertTrue(self.fp.tree_contains("new_folder_01"))
self.assertEqual(self.fp.ls().count("new_folder_01"), 1) # pruefen, dass nicht mehrfach erzeugt
self.fp.mkdir("~/new_folder_02")
self.assertTrue(self.fp.tree_contains("new_folder_02"))
self.fp.mkdir("../new_folder_03")
self.assertTrue(self.fp.tree_contains("new_folder_03"))
response = self.fp.mkdir("~/new_folder_02")
self.assertEqual(response, "mkdir: cannot create directory 'new_folder_02': File exists")
def test_touch(self):
self.fp.mkdir("new_file_01")
self.assertTrue(self.fp.tree_contains("new_file_01"))
self.assertEqual(self.fp.ls().count("new_file_01"), 1) # pruefen, dass nicht mehrfach erzeugt
self.fp.mkdir("~/new_file_02")
self.assertTrue(self.fp.tree_contains("new_file_02"))
self.fp.mkdir("../new_file_03")
self.assertTrue(self.fp.tree_contains("new_file_03"))
def test_ls(self):
self.assertEqual(self.fp.ls("\Benutzer\TestUser\Musik"), "asdf.mp3\n")
self.assertEqual(self.fp.ls("~/Downloads/"), "")
def test_change_dir(self):
path = self.fp.get_current_path() # alten Pfad merken
self.fp.cd("./..")
self.assertEqual(self.fp.get_current_path().split("/")[-1],
path.split("/")[-2]) # neuer Pfad = alter Pfad ohne letzten /
self.fp.cd("~")
path = self.fp.get_current_path() # alten Pfad merken
self.fp.cd("../.")
self.assertEqual(self.fp.get_current_path().split("/")[-1],
path.split("/")[-2]) # neuer Pfad = alter Pfad ohne letzten /
self.fp.cd("/")
self.assertEqual(self.fp.get_current_path(), "/")
self.fp.cd("~")
self.assertEqual(self.fp.get_formatted_path(), "C:\\Benutzer\\TestUser")
self.fp.cd("../..")
self.fp.cd("../../..")
self.assertEqual(self.fp.get_current_path(), "/")
path = "mich/gibtsnicht"
self.assertEqual(self.fp.cd(path), path + ": No such file or directory")
path = "~~"
self.assertEqual(self.fp.cd(path), path + ": No such file or directory")
def test_valid_dir(self):
self.fp.cd("~")
self.assertTrue(self.fp.valid_directory("..\..\Programme\Firefox"))
self.assertTrue(self.fp.valid_directory("/"))
self.assertTrue(self.fp.valid_directory("~"))
self.assertTrue(self.fp.valid_directory(".."))
self.assertTrue(self.fp.valid_directory("./.."))
self.assertTrue(self.fp.valid_directory("../.."))
self.assertTrue(self.fp.valid_directory("."))
self.assertTrue(self.fp.valid_directory("./."))
self.assertTrue(self.fp.valid_directory("../."))
self.assertFalse(self.fp.valid_directory("..."))
def test_valid_file(self):
self.assertFalse(self.fp.valid_file("michgibtsnicht!1!"))
def test_delete(self):
self.fp.cd("/")
self.fp.mkdir("testdir")
self.fp.cd("testdir")
self.fp.cd("..")
self.assertTrue("testdir" in self.fp.ls())
self.assertEqual(self.fp.ls().count("testdir"), 1)
self.fp.delete("testdir")
self.assertFalse("testdir" in self.fp.ls())
self.fp.touch("testfile")
self.assertTrue("testfile" in self.fp.ls())
self.assertEqual(self.fp.ls().count("testfile"), 1)
response = self.fp.delete(".")
self.assertEqual(response, "rm: refusing to remove '.' or '..' directory: skipping '.'")
response = self.fp.delete("..")
self.assertEqual(response, "rm: refusing to remove '.' or '..' directory: skipping '..'")
def test_rename(self):
self.fp.cd("/")
self.fp.touch("old_name")
self.fp.rename("old_name", "new_name")
self.assertFalse("old_name" in self.fp.ls())
self.assertTrue("new_name" in self.fp.ls())
self.assertEqual(self.fp.ls().count("new_name"), 1)
def test_move(self):
self.fp.cd("/")
self.fp.mkdir("testdir")
self.fp.touch("testfile")
response = self.fp.move("testfile", "testdir")
self.assertEqual(response, "Not possible")
# self.fp.mkdir("testdir/testrecursive")
# self.fp.move("testdir", "/bin/testdir")
# self.assertFalse("testdir" in self.fp.ls())
# self.assertTrue("testdir" in self.fp.ls("/bin"))
# self.assertEqual(self.fp.ls("/bin").count("testdir"), 1)
# self.assertTrue("testrecursive" in self.fp.ls("/bin/testdir"))
# self.assertEqual(self.fp.ls("/bin/testdir").count("testrecursive"), 1)
def test_cat(self):
# self.assertTrue("-----BEGIN RSA PRIVATE KEY-----" in self.fp.cat("~/.ssh/id_rsa"))
# self.assertTrue("ssh-rsa " in self.fp.cat("~/.ssh/id_rsa.pub"))
self.assertFalse(self.fp.cat("~/suspicious_data.txt") == "")
```
#### File: honeygrove/tests/HoneyAdapter_Test.py
```python
from honeygrove.tests.testresources import testconfig as config
from honeygrove.tests.testresources.honeyadaptertest import incoming_messages as messages, outgoing_messages as answers
from honeygrove.tests.testresources.honeyadaptertest.DummyAdapter import BrokerEndpoint, DummyAdapter as HoneyAdapter
import mock
import os
from os.path import join, isfile
import unittest
class HoneyAdapter_Test(unittest.TestCase):
def setUp(self):
self.adapter = HoneyAdapter()
self.patch = mock.patch.object(BrokerEndpoint, 'sendMessageToTopic')
self.patched = self.patch.start()
def test_ping(self):
msg = messages.msg_ping
self.adapter.handle_messages([[msg]])
self.patched.assert_called_with('answer', answers.answ_ping)
def test_getAllServices(self):
msg = messages.msg_get_all_services_id
self.adapter.handle_messages([[msg]])
self.assertTrue(self.patched.call_count == 1)
msg = messages.msg_get_all_services_all
self.adapter.handle_messages([[msg]])
self.assertTrue(self.patched.call_count == 2)
def test_startServices(self):
msg = messages.msg_start_services
self.adapter.handle_messages([[msg]])
self.patched.assert_called_with('answer', answers.answ_started_services)
def test_stopServices(self):
msg = messages.msg_stop_services
self.adapter.handle_messages([[msg]])
self.patched.assert_called_with('answer', answers.answ_stopped_services)
def test_getSettings(self):
msg = messages.msg_get_settings
self.adapter.handle_messages([[msg]])
self.patched.assert_called_with('answer', answers.answ_get_settings)
def test_set_filesys(self):
msg = messages.msg_set_filesys
self.adapter.handle_messages([[msg]])
self.patched.assert_called_with('answer', answers.answ_set_filesys)
msg = messages.msg_set_invalid_1_filesys
self.adapter.handle_messages([[msg]])
self.patched.assert_called_with('answer', answers.answ_set_invalid_filesys)
msg = messages.msg_set_invalid_2_filesys
self.adapter.handle_messages([[msg]])
self.patched.assert_called_with('answer', answers.answ_set_invalid_filesys)
def test_get_filesys(self):
msg = messages.msg_get_filesys
self.adapter.handle_messages([[msg]])
self.patched.assert_called_with('answer', answers.answ_get_filesys)
def test_getTokenFiles(self):
addTokenFile("suspicious_data.txt", "a:b:c:d")
addTokenFile("mytoken", "sometoken")
msg = messages.msg_get_token_files
self.adapter.handle_messages([[msg]])
self.assertTrue(self.patched.call_count == 1)
removenTokenFile("suspicious_data.txt")
removenTokenFile("mytoken")
def test_AddTokenFile(self):
filepath = config.tokendir_adapter + '/new_token.txt'
# Case 1: Non Existent File
msg = messages.msg_add_token_file
self.adapter.handle_messages([[msg]])
self.patched.assert_called_with('answer', answers.answ_add_token_file)
with open(filepath) as f:
s = f.read()
self.assertEqual(s, "File as String")
# Case 2: Existent File
msg = messages.msg_add_token_file_existent
self.adapter.handle_messages([[msg]])
self.patched.assert_called_with('answer', answers.answ_add_token_file)
with open(filepath) as f:
s = f.read()
self.assertEqual(s, "New File as String")
removenTokenFile("new_token.txt")
def test_RemoveTokenFile(self):
# Case 1: Specific Files
addTokenFile("tf1", "Content")
addTokenFile("tf2", "Content")
msg = messages.msg_rem_token_files
self.adapter.handle_messages([[msg]])
self.patched.assert_called_with('answer', answers.answ_remove_token_file)
# Make sure they are deleted
self.assertFalse(isfile(join(config.tokendir_adapter, "tf1")))
self.assertFalse(isfile(join(config.tokendir_adapter, "tf2")))
# Case 2: All Files
addTokenFile("tf3", "Content")
msg = messages.msg_rem_all_token_files
self.adapter.handle_messages([[msg]])
# Make sure everything is deleted
self.assertFalse(os.listdir(config.tokendir_adapter))
# directory stays trackable
addTokenFile("afile", " ")
def test_setSettings(self):
# CASE 1: Service Not Running
old_tokenprob = config.honeytokendbProbabilities['LISTEN']
msg = messages.msg_set_settings
self.adapter.handle_messages([[msg]])
# Correct Answer Was Sent
self.patched.assert_called_with('answer', answers.answ_set_settings)
# New Token Probability Was Set
self.assertTrue(config.honeytokendbProbabilities['LISTEN'] != old_tokenprob)
# New Port Was Set
service = self.adapter.controller.serviceDict['LISTEN']
new_ports = service._port
self.assertEqual(new_ports, [9, 8, 7])
# CASE 2: Service Running
old_tokenprob = config.honeytokendbProbabilities['TESTSERVICEB']
msg = messages.msg_set_settings_run
self.adapter.handle_messages([[msg]])
# Correct Answer Was Sent (Included Port)
self.patched.assert_called_with('answer', answers.answ_set_settings_run)
# New Token Probability Was Set
self.assertTrue(config.honeytokendbProbabilities['TESTSERVICEB'] != old_tokenprob)
def addTokenFile(name, content):
path = join(config.tokendir_adapter, name)
with open(path, 'w+') as file:
file.write(content)
def removenTokenFile(name):
os.remove(join(config.tokendir_adapter, name))
```
#### File: honeygrove/tests/HoneytokenDB_Test.py
```python
import unittest
from honeygrove.core.HoneytokenDB import HoneytokenDataBase
import twisted.cred.credentials as credentials
from twisted.cred.error import UnauthorizedLogin
class HoneyTokenDBTest(unittest.TestCase):
databasefile = 'testresources/testdatabase.txt'
servicename = 'MyServiceName'
sep = ':'
def addCredentials(self, credstring):
with open(self.databasefile, 'w') as file:
file.write(credstring)
def clearCredentials(self):
with open(self.databasefile, 'w') as file:
file.seek(0)
def setUp(self):
HoneytokenDataBase.filepath = self.databasefile
self.db = HoneytokenDataBase(self.servicename)
def test_validCreds(self):
username = 'usermcuserface'
pw = '<PASSWORD>'
c = credentials.UsernamePassword(username, pw)
# Write them to Database
credstring = self.servicename + self.sep + username + self.sep + pw + self.sep
self.addCredentials(credstring)
# Make sure you got UserName back ==> creds are valid
actual = self.db.requestAvatarId(c).result
self.assertEqual(username, actual)
# Delete creds from file
self.clearCredentials()
def test_inValidCreds(self):
c = credentials.UsernamePassword('id<PASSWORD>', '<PASSWORD>')
actual = self.db.requestAvatarId(c).result.value
self.assertTrue(isinstance(actual, UnauthorizedLogin))
```
#### File: honeygrove/tests/ListenService_Test.py
```python
from honeygrove.config import Config
from honeygrove.core.ServiceController import ServiceController
from honeygrove.services.ListenService import ListenService
import twisted.internet.reactor
import unittest
class ListenServiceTest(unittest.TestCase):
listen = None
Controller = None
@classmethod
def setUpClass(cls):
Config.listenServicePorts = [9991, 9992]
def setUp(self):
ListenServiceTest.listen = ListenService()
ListenServiceTest.Controller = ServiceController()
ListenServiceTest.Controller.listen = ListenServiceTest.listen
def tearDown(self):
ListenServiceTest.listen.stopService()
twisted.internet.reactor.callFromThread(twisted.internet.reactor.stop)
def testInit(self):
"""
Test if all Ports are initialisiert
"""
self.assertEqual(ListenServiceTest.listen._port, [9991, 9992])
self.assertEqual(ListenServiceTest.listen._stop, True)
self.assertEqual(ListenServiceTest.listen._transport, dict([]))
def testStart(self):
"""
Tests if the service is active after start
"""
self.assertRaises(KeyError, lambda: ListenServiceTest.listen._transport[9991])
self.assertRaises(KeyError, lambda: ListenServiceTest.listen._transport[9992])
ListenServiceTest.listen.startService()
self.assertNotEqual(ListenServiceTest.listen._transport[9991], None)
self.assertNotEqual(ListenServiceTest.listen._transport[9992], None)
def testStopOnPort(self):
"""
Tests if an specific service can start on a port used by ListenService
"""
ListenServiceTest.listen.startService()
self.assertNotEqual(ListenServiceTest.listen._transport[9991], None)
self.assertNotEqual(ListenServiceTest.listen._transport[9992], None)
ListenServiceTest.Controller.startService("serviceControllerTestService")
self.assertRaises(KeyError, lambda: ListenServiceTest.listen._transport[9991])
def testStartOnPort(self):
"""
Test if the service will start automaticly after a service stops on the port
"""
ListenServiceTest.Controller.startService("serviceControllerTestService")
ListenServiceTest.listen.startService()
ListenServiceTest.listen.stopOnPort(9991)
self.assertNotEqual(ListenServiceTest.listen._transport[9992], None)
ListenServiceTest.Controller.stopService("serviceControllerTestService")
self.assertNotEqual(ListenServiceTest.listen._transport[9991], None)
``` |
{
"source": "jjtoledo/Treinamento-Data-Science",
"score": 4
} |
#### File: jjtoledo/Treinamento-Data-Science/factorial.py
```python
def factorial(n):
while n > 1:
n = n * (n - 1)
return n
# print factorial(4)
# >>> 24
# print factorial(5)
# >>> 120
# print factorial(6)
# >>> 720
```
#### File: jjtoledo/Treinamento-Data-Science/print_all_links.py
```python
def get_next_target(page):
start_link = page.find('<a href=')
# Insert your code below here
if (start_link == -1):
return None, 0
else:
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def print_all_links(page):
while True:
url, endpos = get_next_target(page)
if url:
print url
page = page[endpos:]
else:
break
page = '<a href="www.testes.com" fiopajidoa jiopafdopafho <a href="www.jfioafp.com" fdsaf'
print_all_links(page)
```
#### File: jjtoledo/Treinamento-Data-Science/sum_procedure.py
```python
def sum(a, b):
a = a + b
# correct
def sum1(a, b):
a = a + b
return a
print sum1(1, 2)
``` |
{
"source": "jjtolton/pyxfn",
"score": 3
} |
#### File: pyxfn/xfn/xfn.py
```python
import builtins
import itertools
import random
from collections import deque
from functools import reduce
__author__ = "<NAME>"
class Reduced:
def __init__(self, x):
self.value = x
class ThrowReduced(Exception):
pass
class nil:
"""The absence of a value. Used to differentiate where 'None' might be
considered a valid value. Try not to use this as a return value
or pass it around, otherwise it loses effectiveness. Primarily
used in the kwargs."""
def is_reduced(x):
return isinstance(x, Reduced)
def reduced(x):
return Reduced(x)
def comp(*fns):
def composed(x):
return xreduce(lambda a, b: b(a), reversed(fns), x)
return composed
def undreduced(x):
if is_reduced(x):
return x.value
return x
def ensure_reduced(x):
if is_reduced(x):
return x
else:
return reduced(x)
def xreduce(f, coll, init):
# reduce with a cc style escape for "reduced" values
try:
def ccshim(a, b):
res = f(a, b)
if is_reduced(res):
xreduce.res = undreduced(res)
raise ThrowReduced
else:
return res
return reduce(ccshim, coll, init)
except ThrowReduced:
return xreduce.res
def transduce(xfn, fn_start, fn_end, init, coll):
def arrity_shim(*args):
if len(args) == 2:
return fn_start(*args)
elif len(args) == 1:
return fn_end(*args)
else:
raise Exception("This shouldn't have happened. "
"Please open a ticket. Thanks. --J")
f = xfn(arrity_shim)
res = xreduce(f, coll, init)
return f(res)
def eduction(*xfn, multi=False):
*xfns, initial = xfn
xs = iter(initial)
def eductor(*args):
def eductor0():
return None
def eductor1(acc):
return acc
def eductor2(acc, o):
eductor.__next.appendleft(o)
return acc
mapper = {0: eductor0, 1: eductor1, 2: eductor2}
return mapper[len(args)](*args)
eductor.__next = deque()
xfns = comp(*xfns)(eductor)
completed = False
while not completed:
try:
while len(eductor.__next) == 0 and not completed:
if multi is True:
x = xfns(*[None, *next(xs)])
else:
x = xfns(None, next(xs))
if is_reduced(x):
xfns(None)
completed = True
while eductor.__next:
yield eductor.__next.pop()
except StopIteration:
xfns(None)
completed = True
while eductor.__next:
yield eductor.__next.pop()
def xmap(f):
def _map(rf):
def map0():
return rf()
def map1(a):
return rf(a)
def map2(a, b):
return rf(a, f(b))
def mapn(a, b, *c):
return rf(a, f(b, *c))
mapper = {0: map0,
1: map1,
2: map2}
def __map(*args):
return mapper.get(len(args), mapn)(*args)
return __map
return _map
def map(*args):
if len(args) >= 2:
return builtins.map(*args)
else:
return xmap(*args)
def xfilter(pred):
def _filter(rf):
def filter1(res):
return rf(res)
def filter2(res, input):
if pred(input):
return rf(res, input)
else:
return res
mapper = {0: rf,
1: filter1,
2: filter2}
def __filter(*args):
return mapper[len(args)](*args)
return __filter
return _filter
def filter(*args):
if len(args) > 2:
return filter(*args)
else:
return xfilter(*args)
def preserving_reduced(rf):
def _preserving_reduced(a, b):
res = rf(a, b)
if is_reduced(res):
return reduced(res)
return res
return _preserving_reduced
def cat(rf):
rf1 = preserving_reduced(rf)
def cat2(res, input):
return xreduce(rf1, input, res)
mapper = {0: rf, 1: rf, 2: cat2}
def _cat(*args):
return mapper[len(args)](*args)
return _cat
def drop(n):
def _drop(rf):
def __drop(*args):
return __drop.mapper[len(args)](*args)
def drop2(res, input):
n = __drop.n
__drop.n -= 1
if n > 0:
return res
else:
return rf(res, input)
mapper = {0: rf, 1: rf, 2: drop2}
__drop.mapper = mapper
__drop.n = n
return __drop
return _drop
def drop_while(pred):
def _drop_while(rf):
def __drop_while(*args):
return __drop_while.mapper[len(args)](*args)
def drop2(res, input):
should_drop = __drop_while.should_drop
if should_drop and pred(input):
return res
else:
__drop_while.should_drop = False
return rf(res, input)
__drop_while.should_drop = True
__drop_while.mapper = {0: rf, 1: rf, 2: drop2}
return __drop_while
return _drop_while
def interpose(sep):
def _interpose(rf):
def __interpose(*args):
return __interpose.mapper[len(args)](*args)
def interpose2(res, input):
if __interpose.started is True:
sepr = rf(res, sep)
if is_reduced(sep):
return sepr
else:
return rf(sepr, input)
else:
__interpose.started = True
return rf(res, input)
__interpose.mapper = {0: rf, 1: rf, 2: interpose2}
__interpose.started = False
return __interpose
return _interpose
def remove(pred):
return xfilter(lambda x: not pred(x))
def distinct(rf):
def _distinct(*args):
return _distinct.mapper[len(args)](*args)
def distinct2(res, input):
if input in _distinct.seen:
return res
else:
_distinct.seen.add(input)
return rf(res, input)
_distinct.mapper = {0: rf, 1: rf, 2: distinct2}
_distinct.seen = set()
return _distinct
def partition_all(n):
def _partition_all(rf):
def __partition_all(*args):
return __partition_all.mapper[len(args)](*args)
def partition_all1(res):
if len(__partition_all.a) != 0:
v = [x for x in __partition_all.a]
__partition_all.a.clear()
res = undreduced(rf(res, v))
return rf(res)
def partition_all2(res, input):
__partition_all.a.append(input)
if len(__partition_all.a) == n:
v = [x for x in __partition_all.a]
__partition_all.a.clear()
return rf(res, v)
else:
return res
__partition_all.mapper = {0: rf, 1: partition_all1, 2: partition_all2}
__partition_all.a = []
return __partition_all
return _partition_all
def take_while(pred):
def _take_while(rf):
def __take_while(*args):
return __take_while.mapper[len(args)](*args)
def take_while2(res, input):
if pred(input):
return rf(res, input)
else:
return reduced(res)
__take_while.mapper = {0: rf, 1: rf, 2: take_while2}
return __take_while
return _take_while
def take_nth(n):
def _take_nth(rf):
def __take_nth(*args):
return __take_nth.mapper[len(args)](*args)
def take_nth2(res, input):
__take_nth.ia += 1
i = __take_nth.ia
if i % n == 0:
return rf(res, input)
else:
return res
__take_nth.mapper = {0: rf, 1: rf, 2: take_nth2}
__take_nth.ia = -1
return __take_nth
return _take_nth
def partition_by(f):
def _partition_by(rf):
def __partition_by(*args):
return __partition_by.mapper[len(args)](*args)
def partition_by1(res):
if len(__partition_by.a) != 0:
v = []
while __partition_by.a:
v.append(__partition_by.a.pop())
__partition_by.a.clear()
res = undreduced(rf(res, v))
return rf(res)
def partition_by2(res, input):
pval = __partition_by.pa.pop()
val = f(input)
__partition_by.pa.append(val)
if pval is nil or val == pval:
__partition_by.a.appendleft(input)
return res
else:
v = []
while __partition_by.a:
v.append(__partition_by.a.pop())
res = rf(res, v)
if not is_reduced(res):
__partition_by.a.appendleft(input)
return res
__partition_by.pa = [nil]
__partition_by.a = deque()
__partition_by.mapper = {0: rf, 1: partition_by1, 2: partition_by2}
return __partition_by
return _partition_by
class Halt:
def __init__(self, value):
self.value = value
def halt_when(pred, retf=nil):
if retf is nil:
retf = None
def _halt_when(rf):
def __halt_when(*args):
return __halt_when.mapper[len(args)](*args)
def halt_when1(res):
if isinstance(res, Halt):
return res.value
return rf(res)
def halt_when2(res, input):
if pred(input):
if retf:
res = retf(rf(res), input)
else:
res = input
return reduced(Halt(res))
else:
return rf(res, input)
__halt_when.mapper = {0: rf, 1: halt_when1, 2: halt_when2}
return __halt_when
return _halt_when
def map_indexed(f):
def _map_indexed(rf):
def __map_indexed(*args):
return __map_indexed.mapper[len(args)](*args)
def map_indexed2(res, input):
__map_indexed.i += 1
return rf(res, f(__map_indexed.i, input))
__map_indexed.i = -1
__map_indexed.mapper = {0: rf, 1: rf, 2: map_indexed2}
return __map_indexed
return _map_indexed
def keep_indexed(f):
def _keep_indexed(rf):
def __keep_indexed(*args):
return __keep_indexed.mapper[len(args)](*args)
def keep_indexed2(res, input):
__keep_indexed.i += 1
i = __keep_indexed.i
v = f(i, input)
if v is None:
return res
else:
return rf(res, v)
__keep_indexed.i = -1
__keep_indexed.mapper = {0: rf, 1: rf, 2: keep_indexed2}
return __keep_indexed
return _keep_indexed
def take(n):
def _take(rf):
def __take(*args):
return __take.mapper[len(args)](*args)
def take2(res, input):
n = __take.na
__take.na -= 1
nn = __take.na
if n > 0:
res = rf(res, input)
else:
res = res
if nn <= 0:
return ensure_reduced(res)
return res
__take.na = n
__take.mapper = {0: rf, 1: rf, 2: take2}
return __take
return _take
def mapcat(f):
return comp(map(f), cat)
def random_sample(prob):
return filter(lambda _: random.random() < prob)
if __name__ == '__main__':
def inc(x):
return x + 1
def identity(x):
return x
print(transduce(comp(map(inc),
map(inc)),
lambda a, b: a + b,
identity,
0,
range(10)))
print(transduce(comp(cat, map(inc)),
lambda a, b: [*a, b],
identity,
[],
[[1, 2], [3, 4]]))
print(list(eduction(cat,
map(inc),
drop_while(lambda x: x < 5),
drop(10),
take(20),
filter(lambda x: x % 2 == 0),
[range(4), range(4), range(20)])))
print(list(eduction(take_while(lambda x: x < 5),
interpose("-"),
distinct,
partition_all(2), range(10))))
print(list(eduction(take_while(lambda x: x < 3), range(10))))
print(list(eduction(take_nth(3), partition_all(2), interpose("-"),
list(range(10)))))
print(transduce(comp(take_nth(3),
partition_all(2),
interpose("-"),
map(lambda x: [x] if isinstance(x, str) else x),
cat,
xmap(str)),
lambda a, b: (a.append(b), a)[1],
lambda res: ''.join(res),
[],
range(20)))
print(list(eduction(partition_by(lambda x: x < 4), range(10))))
print(transduce(halt_when(lambda x: x == 10,
lambda res, input: (
reduce(lambda a, b: a + b, res, input))),
lambda x, y: (x.append(y), x)[1],
identity,
[],
range(20)))
print(list(eduction(map_indexed(lambda a, b: [a, b]), range(10))))
print(list(eduction(keep_indexed(lambda i, x: i + x if i < 10 else None),
range(20))))
print(tuple(eduction(take(10), range(10))))
print(transduce(
comp(random_sample(0.01),
mapcat(lambda x: [x] * x),
take(1000)),
lambda res, x: (res.append(x), res)[-1], lambda res: res, [],
range(10000)))
# reduced stress test #
# while True:
# sum(transduce(
# comp(random_sample(0.01),
# mapcat(lambda x: [x] * x),
# take(1000)),
# lambda res, x: (res.append(x), res)[-1], lambda res: res, [],
# range(10000)))
``` |
{
"source": "jjuch/covid19_model",
"score": 2
} |
#### File: covid19_model/data_20_07_11/analysis_parts.py
```python
import numpy as np
import datetime
import matplotlib.pyplot as plt
from scipy import optimize
from pyswarms.single.global_best import GlobalBestPSO
from pyswarms.utils.plotters import plot_cost_history
from analysis import load_and_process_data
from fracDiffDelay import FOPFDD
def calculate_error_PSO(x, ref_data, Ts, Tmax):
K_list = x[:, 0]
tau_list = x[:, 1]
alpha_list = x[:, 2]
L_list = x[:, 3]
res = []
for i in range(len(L_list)):
K, tau, alpha, L = K_list[i], tau_list[i], alpha_list[i], L_list[i]
model = FOPFDD(K, tau, alpha, L)
t, y = model.step_response(Ts, Tmax, verbose=False)
weight = np.ones(int(Tmax))
weight = [1/t_el for t_el in t]
kwad_difference_per_sample = [(r - y_el)**2 * w for r, y_el, w in zip(ref_data, y, weight)]
# print(K, ', ', tau, ', ', alpha, ', ', L, ' : ', sum(kwad_difference_per_sample))
res.append(sum(kwad_difference_per_sample))
return np.array(res)
if __name__ == "__main__":
file_name = 'cum_cases_flanders.csv'
t_fl, data_fl = load_and_process_data(file_name, plot=False)
# Important dates National Security Board
start_date = datetime.date(2020, 1, 24) # https://ec.europa.eu/info/live-work-travel-eu/health/coronavirus-response/timeline-eu-action_en
dates = [datetime.date(2020, 3, 18), datetime.date(2020, 5, 18)]
dates.insert(0, start_date)
dates_converted = [(d - start_date).days for d in dates]
print(dict(zip(dates, dates_converted)))
dates_converted.append(len(t_fl))
t_cut = [t_fl[dates_converted[i]:dates_converted[i + 1]] for i in range(len(dates_converted[1:]))]
data_cut = [data_fl[dates_converted[i]:dates_converted[i + 1]] for i in range(len(dates_converted[1:]))]
# plt.figure()
# plt.plot(t_fl, data_fl, linewidth=4, label='original')
# for part in range(len(t_cut)):
# plt.plot(t_cut[part], data_cut[part], label='part {}'.format(part + 1))
# plt.legend()
# plt.show()
####### Part 1
K = 1.2
tau = 26
alpha = 0.8
L = 95
fopfdd1 = FOPFDD(K, tau, alpha, L)
t_1, y_1 = fopfdd1.step_response(1, t_cut[0][-1], verbose=True)
if False:
plt.figure()
plt.plot(t_cut[0], data_cut[0], label='data')
plt.plot(t_1, y_1, label='model')
plt.legend()
plt.xlabel('Time [days]')
plt.ylabel('Cumulative cases')
plt.title('Flanders')
plt.show()
################ FOPFDD model - PSO
# # Create bounds
# K_min, K_max = 1, 1.5
# tau_min, tau_max = 1, 100
# alpha_min, alpha_max = 0.75, 0.85
# L_min, L_max = 50, 150
# bounds = (np.array([K_min, tau_min, alpha_min, L_min]), np.array([K_max, tau_max, alpha_max, L_max]))
# # Initialize swarm
# options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9}
# kwargs = {"ref_data": data_cut[0] , "Ts": 1 , "Tmax": t_cut[0][-1]}
# optimizer = GlobalBestPSO(n_particles=10, dimensions=4, options=options, bounds=bounds)
# cost, pos = optimizer.optimize(calculate_error_PSO, iters=50, **kwargs)
# plot_cost_history(cost_history=optimizer.cost_history)
# plt.show()
# pos = np.array([1.2, 38, 0.81572044, 90.25755211])
# fopfdd1_opt = FOPFDD(*pos.tolist())
# t1_opt, data1_opt = fopfdd1_opt.step_response(1, t_cut[0][-1], verbose=True)
# plt.figure()
# plt.plot(t_cut[0], data_cut[0], label='data')
# plt.plot(t1_opt, data1_opt, label='model')
# plt.legend()
# plt.xlabel('Time [days]')
# plt.ylabel('Cumulative cases')
# plt.title('Flanders')
# plt.show()
################ FOPFDD model - PSO
# # Create bounds
# K_min, K_max = 1, 1.5
# tau_min, tau_max = 1, 100
# alpha_min, alpha_max = 0.75, 0.85
# L_min, L_max = 50, 150
# bounds = (np.array([K_min, tau_min, alpha_min, L_min]), np.array([K_max, tau_max, alpha_max, L_max]))
# # Initialize swarm
# options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9}
# kwargs = {"ref_data": data_cut[0] , "Ts": 1 , "Tmax": t_cut[0][-1]}
# optimizer = GlobalBestPSO(n_particles=10, dimensions=4, options=options, bounds=bounds)
# cost, pos = optimizer.optimize(calculate_error_PSO, iters=50, **kwargs)
# plot_cost_history(cost_history=optimizer.cost_history)
# plt.show()
# pos = np.array([1.2, 38, 0.81572044, 90.25755211])
# fopfdd1_opt = FOPFDD(*pos.tolist())
# t1_opt, data1_opt = fopfdd1_opt.step_response(1, t_cut[0][-1], verbose=True)
# plt.figure()
# plt.plot(t_cut[0], data_cut[0], label='data')
# plt.plot(t1_opt, data1_opt, label='model')
# plt.legend()
# plt.xlabel('Time [days]')
# plt.ylabel('Cumulative cases')
# plt.title('Flanders')
# plt.show()
####### Part 2
K = 6
tau = 38
alpha = 0.974
L = 45
fopfdd2 = FOPFDD(K, tau, alpha, L)
t_2, y_2 = fopfdd2.step_response(1, 200, verbose=True)
if True:
plt.figure()
plt.plot(t_cut[1], data_cut[1], label='data')
plt.plot(t_2, y_2, label='model')
plt.legend()
plt.xlabel('Time [days]')
plt.ylabel('Cumulative cases')
plt.title('Flanders')
plt.show()
``` |
{
"source": "jjuch/simupy",
"score": 2
} |
#### File: simupy/systems/symbolic.py
```python
import sympy as sp
from sympy.physics.mechanics import dynamicsymbols
from sympy.physics.mechanics.functions import find_dynamicsymbols
from simupy.utils.symbolic import (lambdify_with_vector_args, grad,
DEFAULT_LAMBDIFY_MODULES)
from simupy.array import Array
from simupy.systems import DynamicalSystem as DynamicalSystemBase
DEFAULT_CODE_GENERATOR = lambdify_with_vector_args
DEFAULT_CODE_GENERATOR_ARGS = {
'modules': DEFAULT_LAMBDIFY_MODULES
}
empty_array = lambda: []
class DynamicalSystem(DynamicalSystemBase):
def __init__(self, state_equation=None, state=None, input_=None,
output_equation=None, constants_values={}, dt=0,
initial_condition=None, code_generator=None,
code_generator_args={}):
"""
DynamicalSystem constructor, used to create systems from symbolic
expressions.
Parameters
----------
state_equation : array_like of sympy Expressions, optional
Vector valued expression for the derivative of the state.
state : array_like of sympy symbols, optional
Vector of symbols representing the components of the state, in the
desired order, matching state_equation.
input_ : array_like of sympy symbols, optional
Vector of symbols representing the components of the input, in the
desired order. state_equation may depend on the system input. If
the system has no state, the output_equation may depend on the
system input.
output_equation : array_like of sympy Expressions
Vector valued expression for the output of the system.
constants_values : dict
Dictionary of constants substitutions.
dt : float
Sampling rate of system. Use 0 for continuous time systems.
initial_condition : array_like of numerical values, optional
Array or Matrix used as the initial condition of the system.
Defaults to zeros of the same dimension as the state.
code_generator : callable, optional
Function to be used as code generator.
code_generator_args : dict, optional
Dictionary of keyword args to pass to the code generator.
By default, the code generator uses a wrapper for ``sympy.lambdify``.
You can change it by passing the system initialization arguments
``code_generator`` (the function) and additional keyword arguments to
the generator in a dictionary ``code_generator_args``. You can change
the defaults for future systems by changing the module values. See the
readme or docs for an example.
"""
self.constants_values = constants_values
self.state = state
self.input = input_
self.code_generator = code_generator or DEFAULT_CODE_GENERATOR
code_gen_args_to_set = DEFAULT_CODE_GENERATOR_ARGS.copy()
code_gen_args_to_set.update(code_generator_args)
self.code_generator_args = code_gen_args_to_set
self.state_equation = state_equation
self.output_equation = output_equation
self.initial_condition = initial_condition
self.dt = dt
self.validate()
@property
def state(self):
return self._state
@state.setter
def state(self, state):
if state is None: # or other checks?
state = empty_array()
if isinstance(state, sp.Expr):
state = Array([state])
self.dim_state = len(state)
self._state = state
@property
def input(self):
return self._inputs
@input.setter
def input(self, input_):
if input_ is None: # or other checks?
input_ = empty_array()
if isinstance(input_, sp.Expr): # check it's a single dynamicsymbol?
input_ = Array([input_])
self.dim_input = len(input_)
self._inputs = input_
@property
def state_equation(self):
return self._state_equation
@state_equation.setter
def state_equation(self, state_equation):
if state_equation is None: # or other checks?
state_equation = empty_array()
else:
assert len(state_equation) == len(self.state)
assert find_dynamicsymbols(state_equation) <= (
set(self.state) | set(self.input)
)
assert state_equation.atoms(sp.Symbol) <= (
set(self.constants_values.keys())
| set([dynamicsymbols._t])
)
self._state_equation = state_equation
self.update_state_equation_function()
self.state_jacobian_equation = grad(self.state_equation, self.state)
self.update_state_jacobian_function()
self.input_jacobian_equation = grad(self.state_equation, self.input)
self.update_input_jacobian_function()
@property
def output_equation(self):
return self._output_equation
@output_equation.setter
def output_equation(self, output_equation):
if isinstance(output_equation, sp.Expr):
output_equation = Array([output_equation])
if output_equation is None and self.dim_state == 0:
output_equation = empty_array()
else:
if output_equation is None:
output_equation = self.state
assert output_equation.atoms(sp.Symbol) <= (
set(self.constants_values.keys())
| set([dynamicsymbols._t])
)
if self.dim_state:
assert find_dynamicsymbols(output_equation) <= set(self.state) # or set(self.input) TODO: augment state to allow inputs in output equation if two systems (one stateless and one statefull) are placed in parallel.
else:
assert find_dynamicsymbols(output_equation) <= set(self.input)
self.dim_output = len(output_equation)
self._output_equation = output_equation
self.update_output_equation_function()
def update_state_equation_function(self):
if not self.dim_state or self.state_equation == empty_array():
return
self.state_equation_function = self.code_generator(
[dynamicsymbols._t] + sp.flatten(self.state) +
sp.flatten(self.input),
self.state_equation.subs(self.constants_values),
**self.code_generator_args
)
def update_state_jacobian_function(self):
if not self.dim_state or self.state_equation == empty_array():
return
self.state_jacobian_equation_function = self.code_generator(
[dynamicsymbols._t] + sp.flatten(self.state) +
sp.flatten(self.input),
self.state_jacobian_equation.subs(self.constants_values),
**self.code_generator_args
)
def update_input_jacobian_function(self):
# TODO: state-less systems should have an input/output jacobian
if not self.dim_state or self.state_equation == empty_array():
return
self.input_jacobian_equation_function = self.code_generator(
[dynamicsymbols._t] + sp.flatten(self.state) +
sp.flatten(self.input),
self.input_jacobian_equation.subs(self.constants_values),
**self.code_generator_args
)
def update_output_equation_function(self):
if not self.dim_output or self.output_equation == empty_array():
return
if self.dim_state:
self.output_equation_function = self.code_generator(
[dynamicsymbols._t] + sp.flatten(self.state),
self.output_equation.subs(self.constants_values),
**self.code_generator_args
)
else:
self.output_equation_function = self.code_generator(
[dynamicsymbols._t] + sp.flatten(self.input),
self.output_equation.subs(self.constants_values),
**self.code_generator_args
)
def prepare_to_integrate(self):
self.update_output_equation_function()
self.update_state_equation_function()
def copy(self):
copy = self.__class__(
state_equation=self.state_equation,
state=self.state,
input_=self.input,
output_equation=self.output_equation,
constants_values=self.constants_values,
dt=self.dt
)
copy.output_equation_function = self.output_equation_function
copy.state_equation_function = self.state_equation_function
return copy
def equilibrium_points(self, input_=None):
return sp.solve(self.state_equation, self.state, dict=True)
class MemorylessSystem(DynamicalSystem):
"""
A system with no state.
With no input, can represent a signal (function of time only). For example,
a stochastic signal could interpolate points and use prepare_to_integrate
to re-seed the data.
"""
def __init__(self, input_=None, output_equation=None, **kwargs):
"""
DynamicalSystem constructor
Parameters
----------
input_ : array_like of sympy symbols
Vector of symbols representing the components of the input, in the
desired order. The output may depend on the system input.
output_equation : array_like of sympy Expressions
Vector valued expression for the output of the system.
"""
super().__init__(
input_=input_, output_equation=output_equation, **kwargs)
@property
def state(self):
return self._state
@state.setter
def state(self, state):
if state is None: # or other checks?
state = empty_array()
else:
raise ValueError("Memoryless system should not have state or " +
"state_equation")
self.dim_state = len(state)
self._state = state
``` |
{
"source": "jjude/euler",
"score": 4
} |
#### File: euler/003/soln.py
```python
import sys
sys.setrecursionlimit(10000)
def findLargestPrimeFactorOf(number, currentPrime = 2):
if number <= 1:
return currentPrime
if number % currentPrime == 0:
return findLargestPrimeFactorOf(number / currentPrime, currentPrime)
else:
return findLargestPrimeFactorOf(number, currentPrime + 1)
print(findLargestPrimeFactorOf(600851475143))
# without recursion
p = 2
n = 600851475143
while n > 1:
if n % p == 0:
n = n / p
else:
p = p + 1
print p
``` |
{
"source": "jjuiddong/KIST-MachineLearning-Tutorial-2016",
"score": 3
} |
#### File: KIST-MachineLearning-Tutorial-2016/KIST_MachineLearning/clustering_kmeans.py
```python
import sklearn.decomposition
import sklearn.preprocessing
import sklearn.cluster
import numpy as np
import pandas as pd
import elice_utils
import kmeans_utils
def main():
champs_df = pd.read_pickle('champ_df.pd')
# 1
champ_pca_array = run_PCA(champs_df, 2)
champ_classes = run_kmeans(champ_pca_array, 5, [0, 30, 60, 90, 120])
# 4
print(elice_utils.plot_champions(champs_df, champ_pca_array, champ_classes))
# 5
print(get_champs_by_cluster(champs_df, champ_classes, 3))
def run_kmeans(champ_pca_array, num_clusters, initial_centroid_indices):
# Implement K-Means algorithm using sklearn.cluster.KMeans.
# 2
classifier = sklearn.cluster.KMeans(n_clusters = num_clusters,
init = np.array(champ_pca_array[initial_centroid_indices]), n_init=1)
classifier.fit(champ_pca_array)
# 3
return classifier.labels_
def run_PCA(champs_df, num_components):
return kmeans_utils.run_PCA(champs_df, num_components)
def get_champs_by_cluster(champs_df, champ_classes, cluster_idx):
return champs_df.index[champ_classes == cluster_idx].values
if __name__ == '__main__':
main()
```
#### File: KIST-MachineLearning-Tutorial-2016/KIST_MachineLearning/clustering_lol.py
```python
import sklearn.decomposition
import sklearn.preprocessing
import numpy as np
import pandas as pd
import elice_utils
import scipy.spatial.distance
import operator
def main():
# 1
champs_df = pd.read_pickle('champ_df.pd')
champ_pca_array = run_PCA(champs_df, 2)
# 5
elice_utils.plot_champions(champs_df, champ_pca_array)
print(get_closest_champions(champs_df, champ_pca_array, "Ashe", 10))
def run_PCA(champs_df, num_components):
# 2
# Normalize Attributes
scaler = sklearn.preprocessing.MinMaxScaler()
for champs_dim in champs_df:
champs_df[champs_dim] = scaler.fit_transform(np.array(champs_df[champs_dim]).astype('float64'))
# 3
# Run PCA
pca = sklearn.decomposition.PCA(n_components=num_components)
pca.fit(champs_df)
# 4
champ_pca_array = pca.transform(champs_df)
return champ_pca_array
def get_closest_champions(champs_df, champ_pca_array, champ_name, num_champions):
# Get the champion index
champ_list = champs_df.index.tolist()
try:
champ_idx = champ_list.index(champ_name)
except:
return "%s is not in the champion list" % champ_name
# Get the euclidean distance
# Use scipy.spatial.distance.euclidean(A, B)
distance_from_current_champ = {}
for i in range(0, len(champ_list)):
distance_from_current_champ[champ_list[i]] = \
scipy.spatial.distance.euclidean(
champ_pca_array[champ_idx],
champ_pca_array[i]
)
# Sort dictionary according to the value
sorted_champs = sorted(distance_from_current_champ.items(), key=operator.itemgetter(1))
# Return top 10 champs except current one
if num_champions > len(champ_list) - 1:
return "num_champions is too large"
else:
return sorted_champs[1:1 + num_champions]
if __name__ == '__main__':
main()
```
#### File: KIST-MachineLearning-Tutorial-2016/KIST_MachineLearning/clustering_pandas.py
```python
import numpy as np
import pandas as pd
def main():
do_exercise()
def do_exercise():
# 1
# 2
aapl_bars = pd.read_csv("./AAPL.csv")
# 3
date_index = aapl_bars.pop('Date')
aapl_bars.index = pd.to_datetime(date_index)
# 4
twoseries_dict = {'Open': aapl_bars.Open, 'Close': aapl_bars.Close, 'Volume': aapl_bars.Volume}
df = pd.DataFrame(twoseries_dict)
#5
print(df[:]['2003-04' : '1989'])
return df
if __name__ == "__main__":
main()
```
#### File: KIST-MachineLearning-Tutorial-2016/KIST_MachineLearning/perceptron_feedforwardpropagation.py
```python
from sklearn.neural_network import MLPClassifier
import numpy as np
#import elice_utils
def main():
# 1
X, Y = read_data('case_2.txt') # try to use different datasets
clf = train_MLP_classifier(X, Y)
report_clf_stats(clf, X, Y)
#elice_utils.visualize(clf, X, Y)
def train_MLP_classifier(X, Y):
# 2
clf = MLPClassifier(hidden_layer_sizes=(1000,1000,1000,1000,1000,1000)) # try changing the number of hidden layers
clf.fit(X, Y)
return clf
def report_clf_stats(clf, X, Y):
# 1. measure accuracy
hit = 0
miss = 0
for x, y in zip(X, Y):
if clf.predict([x])[0] == y:
hit += 1
else:
miss += 1
print("Accuracy: %.1lf%%" % float(100 * hit / (hit + miss)))
def read_data(filename):
X = []
Y = []
with open(filename) as fp:
N, M = fp.readline().split()
N = int(N)
M = int(M)
for i in range(N):
line = fp.readline().split()
for j in range(M):
X.append([i, j])
Y.append(int(line[j]))
X = np.array(X)
Y = np.array(Y)
return (X, Y)
if __name__ == "__main__":
main()
``` |
{
"source": "jjulian91/NLP_QA_Python",
"score": 3
} |
#### File: mainframe/do_magic/dataQuery.py
```python
import mysql.connector
from mysql.connector import Error
import do_magic.answerFinder as answer
#todo refactor the search -- do non %like% search first.. if no result then do %like%.
def dbQuery(select_statement):
try:
connection = mysql.connector.connect(host='localhost',
database='nba_facts',
user='root',
password='<PASSWORD>',
auth_plugin='mysql_native_password')
if connection.is_connected():
db_Info = connection.get_server_info()
# print("Connected to MySQL Server version ", db_Info)
cursor = connection.cursor()
cursor.execute("select database();")
record = cursor.fetchone()
# print("Your connected to database: ", record)
query = connection.cursor()
query.execute(select_statement)
records = query.fetchall()
return records
except Error as e:
print("Error while connecting to MySQL", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
# print("MySQL connection is closed")
def dbInsert(statement):
try:
connection = mysql.connector.connect(host='localhost',
database='nba_facts',
user='root',
password='<PASSWORD>',
auth_plugin='mysql_native_password')
if connection.is_connected():
db_Info = connection.get_server_info()
# print("Connected to MySQL Server version ", db_Info)
cursor = connection.cursor()
cursor.execute("select database();")
cursor.execute(statement)
connection.commit()
except Error as e:
print("Error while connecting to MySQL", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
# print("MySQL connection is closed")
def search_phrase_DB(word):
return dbQuery("select * from phrase join lookup_table as LU on phrase.FK=LU.PK where Phrase"
" like " + "'%" + word + "%'")
def search_EXACT_phrase(word):
return dbQuery("select * from phrase join lookup_table as LU on phrase.FK=LU.PK where LOWER(Phrase) = LOWER(" + "'" + word + "')")
def search_player_dB(word):
return dbQuery("select * from player_data where LOWER(name) LIKE LOWER ('%" + word + "%')")
def search_stats_DB(word):
return dbQuery("select * from stats where LOWER(name) LIKE LOWER ('%" + word + "%')")
def search_player_dB_noLike(word):
return dbQuery("select * from player_data where LOWER(name) = LOWER ('" + word + "')")
def search_stats_DB_noLike(word):
return dbQuery("select * from stats where LOWER(name) = LOWER ('" + word + "')")
def search_stats_DB_exact_match(word): #duplicate for playerDB and Phrase DB
return dbQuery("select * from stats where name = "+ word + "")
# the following functions require word being the RESOLVED column NAME from the stats table.
def search_stats_max_DB(columnName, searchYear):
return dbQuery(
"SELECT * FROM stats WHERE Year = "+ searchYear + " ORDER BY "+columnName+" DESC LIMIT 1")
def search_stats_max_no_year_DB(columnName):
return dbQuery(
"SELECT * FROM stats WHERE "+ columnName +" = ( SELECT MAX("+columnName+") FROM stats) ORDER BY "+columnName
+" DESC LIMIT 1")
def search_stats_min_DB(columnName, searchYear):
return dbQuery(
"SELECT * FROM stats WHERE "+ columnName +" IS NOT NULL) AND Year = "+ searchYear + " ORDER BY "
+ columnName+" ASC LIMIT 1")
def search_stats_min_no_year_DB(columnName):
return dbQuery(
"SELECT * FROM stats WHERE "+ columnName +" = ( SELECT MIN("+columnName+") FROM stats WHERE "+ columnName +
" IS NOT NULL) ORDER BY "+columnName+ " ASC LIMIT 1")
```
#### File: mainframe/do_magic/voila.py
```python
import nltk
from nltk.corpus import stopwords
from spellchecker import SpellChecker
from nltk.tag import StanfordPOSTagger
from nltk.stem import WordNetLemmatizer
from nltk import pos_tag
from nltk.tokenize import word_tokenize
from nltk.chunk import conlltags2tree
from nltk.tree import Tree
counter = y = 0
check = SpellChecker()
def tag_Sentence(tokenized):
import os
jarpath = "C:/Program Files/Java/jdk-11.0.2/bin/java.exe"
java_path = jarpath
os.environ['JAVAHOME'] = java_path
dirname = os.path.dirname(__file__)
jar = os.path.join(dirname, '../../stanford-postagger-full-2018-10-16/stanford-postagger-3.9.2.jar')
model = os.path.join(dirname, '../../stanford-postagger-full-2018-10-16/models/english-left3words-distsim.tagger')
stanfordPOS = StanfordPOSTagger(model, jar, encoding='utf-8')
postaggedwords = stanfordPOS.tag(tokenized)
return postaggedwords
# Process text
def process_text(txt_file):
token_text = word_tokenize(txt_file)
return token_text
# NLTK POS and NER taggers
def nltk_tagger(token_text):
tagged = nltk.pos_tag(token_text)
ne_tagged = nltk.ne_chunk(tagged)
return (ne_tagged)
# Tag tokens with standard NLP BIO tags
def bio_tagger(ne_tagged):
bio_tagged = []
prev_tag = "O"
for token, tag in ne_tagged:
if tag == "O": #O
bio_tagged.append((token, tag))
prev_tag = tag
continue
if tag != "O" and prev_tag == "O": # Begin NE
bio_tagged.append((token, "B-"+tag))
prev_tag = tag
elif prev_tag != "O" and prev_tag == tag: # Inside NE
bio_tagged.append((token, "I-"+tag))
prev_tag = tag
elif prev_tag != "O" and prev_tag != tag: # Adjacent NE
bio_tagged.append((token, "B-"+tag))
prev_tag = tag
return bio_tagged
# Create tree
def stanford_tree(bio_tagged):
tokens, ne_tags = zip(*bio_tagged)
pos_tags = [pos for token, pos in pos_tag(tokens)]
conlltags = [(token, pos, ne) for token, pos, ne in zip(tokens, pos_tags, ne_tags)]
ne_tree = conlltags2tree(conlltags)
return ne_tree
# Parse named entities from tree
def structure_ne(ne_tree):
ne = []
for subtree in ne_tree:
if type(subtree) == Tree: # If subtree is a noun chunk, i.e. NE != "O"
ne_label = subtree.label()
ne_string = " ".join([token for token, pos in subtree.leaves()])
ne.append((ne_string, ne_label))
return ne
def spell_check(tokenized):
spelledWords = check.correction(tokenized)
if spelledWords != tokenized:
print(f'did you mean, {spelledWords}?')
inputval = input('[y/N]: ')
if inputval == 'y':
return spelledWords
else:
# lets go another route and find some other matches
candidates = check.candidates(tokenized)
if len(candidates) > 1:
for i, candidate in enumerate(candidates):
print(f'[{i}] [{candidate}]')
# if yes to any of these then we return this and run a db query on this
print(f'[{len(candidates)}] [None]')
val = int(input('here are some more options, please choose one: '))
if val >= len(candidates):
return
to_list = list(candidates)
return to_list[val]
else:
return tokenized
def get_most_recent(statresults): #finish implementing
max = statresults[0]
for entry in statresults:
if entry[2] > max[2]:
max = entry
return max
def get_basewords(tokenized):
getBase = WordNetLemmatizer()
baseWords = []
for word in tokenized:
baseWords.append(getBase.lemmatize(word))
return baseWords
# we can create/delete stop words as we please to better suit our domain.
def get_stopwords(tokenized):
stop_words = set(stopwords.words('english')) - {"where", "over"}
stop_words.add("go")
_stopwords = [words for words in tokenized if not words in stop_words]
return _stopwords
def runstat():
global counter, y
counter += 1
value = input('Was this helpful? [y/N]: ')
if value == 'y':
y += 1
return print(f'percentage accurate: {float(y / counter)}')
def singlequoteSQLfix(val):
index = val.find("'")
return val[:index] + "''" + val[index + 1:] if index != -1 else val
def addToList(resultsList, results):
for result in results:
resultsList.append(result)
``` |
{
"source": "jjulien/azure-query",
"score": 3
} |
#### File: cli/commands/__init__.py
```python
import argparse
class AQCLIException(Exception):
pass
class AQCommand:
description = "This command has no specific description"
def __init__(self, name, login):
if not name:
raise AQCLIException("You must pass a name when creating an application")
self.name = name
self.login = login
self.parser = argparse.ArgumentParser()
subparsers = self.parser.add_subparsers(help=self.description)
self.sub_parser = subparsers.add_parser(name)
self.sub_parser.add_argument("--beta", action="store_true", default=False)
self.parse_args()
# This method should be used if your application requires any arg parsing
def parse_args(self):
pass
def display_extra_help(self):
pass
def display_help(self):
self.parser.print_help()
self.display_extra_help()
def run(self):
raise AQCLIException(f"No run method was found for application {self.name}")
class ODataAPICommand(AQCommand):
query_params = {}
# OData Query Options Described Here
# https://docs.microsoft.com/en-us/graph/query-parameters#odata-system-query-options
odata_query_options = ['count', 'expand', 'filter', 'format', 'orderby', 'search', 'select', 'skip', 'top']
def __init__(self, name, login):
super().__init__(name, login)
self.add_odata_query_params()
def parse_args(self):
self.sub_parser.add_argument("--count", help="Retrieves the total count of matching resources.\nDetails: "
"https://docs.microsoft.com/en-us/graph/query-parameters#count-parameter")
self.sub_parser.add_argument("--expand", help="Retrieves related resources.\nDetails: "
"https://docs.microsoft.com/en-us/graph/query-parameters#expand-parameter")
self.sub_parser.add_argument("--filter", help="Filters results (rows).\nDetails: "
"https://docs.microsoft.com/en-us/graph/query-parameters#filter-parameter")
self.sub_parser.add_argument("--format", help="Returns the results in the specified media format.\nDetails: "
"https://docs.microsoft.com/en-us/graph/query-parameters#format-parameter")
self.sub_parser.add_argument("--orderby", help="Orders results.\nDetails: "
"https://docs.microsoft.com/en-us/graph/query-parameters#orderby-parameter")
self.sub_parser.add_argument("--search", help="Returns results based on search criteria. Currently supported "
"on messages and person collections."
"https://docs.microsoft.com/en-us/graph/query-parameters#search-parameter")
self.sub_parser.add_argument("--select", help="Filters properties (columns).\nDetails: "
"https://docs.microsoft.com/en-us/graph/query-parameters#select-parameter")
self.sub_parser.add_argument("--skip", help="Indexes into a result set. Also used by some APIs to implement "
"paging and can be used together with $top to manually page "
"results.\nDetails: "
"https://docs.microsoft.com/en-us/graph/query-parameters#skip-parameter")
self.sub_parser.add_argument("--top", help="Sets the page size of results.\nDetails: "
"https://docs.microsoft.com/en-us/graph/query-parameters#top-parameter")
def add_odata_query_params(self):
parsed = vars(self.parser.parse_args())
for option in self.odata_query_options:
if option in parsed.keys():
# The v1 endpoint still requires some options to be prefixed with $, so prepending this to be safe
self.query_params[f'${option}'] = parsed[option]
``` |
{
"source": "jjulik/keezer-pi",
"score": 3
} |
#### File: keezer-pi/client/keezer_client.py
```python
import os
import time
import json
import logging
import requests
import traceback
import time
import ConfigParser
import io
import threading
import RPi.GPIO as GPIO
# The primary sensor will be used for all sensor readings and determines
# whether we need to run the freezer.
# The secondary sensor will be used as a backup if the primary fails
# until the primary comes online again.
config_file = os.environ.get('KEEZER_CLIENT_SETTINGS')
if config_file is None:
config_file = 'keezer_client.cfg'
with open(config_file, 'r') as f:
config_text = f.read()
default_config = { 'secondary_temp': None, 'url': None, 'api_token': None, 'temperature': 40.0, 'deviation': 2.0, 'min_runtime': 60, 'cooldown': 300, 'fridge_name': 'fridgey' }
config = ConfigParser.SafeConfigParser(default_config, allow_no_value=True)
config.readfp(io.BytesIO(config_text))
primary_sensor_name = config.get('sensor', 'primary_temp')
secondary_sensor_name = config.get('sensor', 'secondary_temp')
relay_gpio_pin = config.getint('sensor', 'relay_pin')
server_url = config.get('server', 'url')
api_token = config.get('server', 'api_token')
desired_temperature = config.getfloat('fridge', 'temperature')
deviation = config.getfloat('fridge', 'deviation')
min_runtime = config.getint('fridge', 'min_runtime')
cooldown = config.getint('fridge', 'cooldown')
fridge_name = config.get('fridge', 'fridge_name')
max_temperature = desired_temperature + deviation
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(filename='keezer_client.log',level=logging.ERROR,format=FORMAT)
logger = logging.getLogger('keezer_client')
def post_async(url, headers, data):
try:
requests.post(url, headers=headers, data=json.dumps(data))
except Exception:
# don't log these errors
# it could fill up the log fast if the server goes down
return
def post_exception(formatted_exception):
if server_url is None or api_token is None:
return
url = server_url + 'api/error'
headers = { 'Authorization': api_token, 'Content-Type': 'application/json' }
data = { 'time': time.time(), 'error': formatted_exception }
# fire and forget
postErrorThread = threading.Thread(target=post_async, args=(url,headers,data))
postErrorThread.start()
def post_reading(reading):
if server_url is None or api_token is None:
return
url = server_url + 'api/reading'
headers = { 'Authorization': api_token, 'Content-Type': 'application/json' }
data = { 'time': reading.time, 'reading': reading.reading, 'sensorDescription': reading.sensor_name }
postReadingThread = threading.Thread(target=post_async, args=(url,headers,data))
postReadingThread.start()
class Reading():
def __init__(self, sensor_name, reading, reading_time):
self.sensor_name = sensor_name
self.reading = reading
self.time = reading_time
# Inherit from dictionary for easy JSON serialization.
class Sensor(dict):
def __init__(self, name):
self.file_name = '/sys/bus/w1/devices/' + name + '/w1_slave'
dict.__init__(self, name=name, file_name=self.file_name)
self.name = name
def get_reading(self):
try:
f = open(self.file_name, 'r')
lines = f.readlines()
reading_time = time.time()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
raw_temp = float(lines[1][equals_pos+2:])
#murica
temp_f = (raw_temp / 1000.0) * 9.0 / 5.0 + 32.0
return Reading(self.name, temp_f, reading_time)
except Exception:
logger.exception('Error reading sensor value for {0}'.format(self.name))
post_exception(traceback.format_exc())
return None
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
primary_sensor = Sensor(primary_sensor_name)
secondary_sensor = None
if secondary_sensor_name is not None:
secondary_sensor = Sensor(secondary_sensor_name)
try:
# setup gpio pin for relay
# we are using BCM numbering
GPIO.setmode(GPIO.BCM)
GPIO.setup(relay_gpio_pin, GPIO.OUT)
GPIO.output(relay_gpio_pin, False)
# keep track of when the fridge was last turned on or off
# we don't want to be turning it on/off frequently
fridge_turned_on = 0
fridge_turned_off = 0
fridge_enabled = False
while True:
primary_reading = primary_sensor.get_reading()
secondary_reading = None
if secondary_sensor is not None:
secondary_reading = secondary_sensor.get_reading()
if primary_reading is not None:
post_reading(primary_reading)
if secondary_reading is not None:
post_reading(secondary_reading)
reading_to_use = primary_reading
if reading_to_use is None:
reading_to_use = secondary_reading
if reading_to_use is None and fridge_enabled:
GPIO.output(relay_gpio_pin, False)
fridge_enabled = False
fridge_turned_off = time.time()
power_reading = Reading(fridge_name, 0, fridge_turned_off)
post_reading(power_reading)
if reading_to_use is not None:
if fridge_enabled:
run_time = time.time() - fridge_turned_on
if reading_to_use.reading < desired_temperature and run_time >= min_runtime:
GPIO.output(relay_gpio_pin, False)
fridge_enabled = False
fridge_turned_off = time.time()
power_reading = Reading(fridge_name, 0, fridge_turned_off)
post_reading(power_reading)
else:
time_since_last_ran = time.time() - fridge_turned_off
if reading_to_use.reading > max_temperature and time_since_last_ran >= cooldown:
GPIO.output(relay_gpio_pin, True)
fridge_enabled = True
fridge_turned_on = time.time()
power_reading = Reading(fridge_name, 1, fridge_turned_on)
post_reading(power_reading)
time.sleep(1)
except KeyboardInterrupt:
GPIO.cleanup()
exit(1)
except Exception:
GPIO.cleanup()
logger.exception('Fatal error in main loop')
post_exception(traceback.format_exc())
``` |
{
"source": "jj-umn/metaquantome",
"score": 2
} |
#### File: metaquantome/modules/run_viz.py
```python
import os
import subprocess
import json
from metaquantome.util.utils import BASE_DIR
from metaquantome.classes.SampleGroups import SampleGroups
def run_viz(plottype, img, infile, strip=None,
mode=None, meancol=None, nterms='5', target_rank=None, barcol=6, # barplot, stacked_bar
textannot=None, fc_name=None, fc_corr_p=None, flip_fc=False, gosplit=False, # volcano
sinfo=None, filter_to_sig=False, alpha='0.05', # heatmap
calculate_sep=False, # pca
whichway=None, name=None, id=None, target_onto=None, # ft_dist
width='5', height='5', tabfile=None, feature_cluster_size=2, sample_cluster_size=2):
"""
Wrapper script for the command-line R visualizations
The documentation for each of the arguments is in cli.py
:return: None
"""
r_script_path = os.path.join(BASE_DIR, 'modules', 'viz.R')
cmd = ['Rscript', '--vanilla', r_script_path, plottype, img, infile]
if plottype == "bar":
cmd += [mode, meancol, nterms, width, height, target_rank, target_onto, barcol, tabfile]
elif plottype == "volcano":
cmd += [str(textannot), fc_name, fc_corr_p, flip_fc, gosplit, width, height, tabfile]
elif plottype == "heatmap":
samp_grps = SampleGroups(sinfo)
all_intcols_str = ','.join(samp_grps.all_intcols)
json_dump = json.dumps(samp_grps.sample_names)
cmd += [all_intcols_str, json_dump, filter_to_sig, alpha, width, height, strip, feature_cluster_size, sample_cluster_size, fc_corr_p]
elif plottype == "pca":
samp_grps = SampleGroups(sinfo)
all_intcols_str = ','.join(samp_grps.all_intcols)
json_dump = json.dumps(samp_grps.sample_names)
cmd += [all_intcols_str, json_dump, calculate_sep, width, height, strip]
elif plottype == "ft_dist":
cmd += [whichway, name, id, meancol, nterms, width, height,
target_rank, target_onto, barcol, tabfile]
if plottype == "stacked_bar":
samp_grps = SampleGroups(sinfo)
all_intcols_str = ','.join(samp_grps.all_intcols)
json_dump = json.dumps(samp_grps.sample_names)
cmd += [all_intcols_str, json_dump, nterms, target_rank, width, height, tabfile]
else:
ValueError("Wrong plot type. Must be bar, volcano, heatmap, ft_dist, stacked_bar, or pca.")
# ensure that all elements are strings (even booleans, etc)
cmd_string = [str(elem) for elem in cmd]
# run the visualizations, suppressing any output to stdout
with open(os.devnull, 'w') as fnull:
subprocess.run(cmd_string, stdout=fnull, check=True)
```
#### File: metaquantome/util/expand_io.py
```python
import pandas as pd
from metaquantome.util.check_args import function_check, tax_check
from metaquantome.util.utils import MISSING_VALUES
def read_and_join_files(mode, pep_colname_int, pep_colname_func, pep_colname_tax, samp_grps, int_file, tax_file=None,
func_file=None, func_colname=None, tax_colname=None):
"""
Reads in intensity, function, and/or taxonomy files, and joins all on the peptide column.
:param pep_colname_func: name of the peptide column in the function file
:param pep_colname_tax: name of the peptide column in the taxonomy file
:param pep_colname_int: name of the peptide column in the intensity file
:param mode: analysis mode - either 'f', 't', or 'ft'
:param samp_grps: SampleGroups() object
:param int_file: path to intensity file
:param tax_file: path to taxonomy file. required for 't' and 'ft' modes
:param func_file: path to function file. required for 'f' and 'ft' modes
:param func_colname: column name of functional annotation in function file
:param tax_colname: column name of taxonomic annotation in taxonomy file
:return: joined dataframe; missing intensities as 0.
"""
# intensity
int = read_intensity_table(int_file, samp_grps, pep_colname_int)
# start df list
dfs = [int]
if mode == 't' or mode == 'ft':
tax_check(tax_file, tax_colname)
tax = read_taxonomy_table(tax_file, pep_colname_tax, tax_colname)
dfs.append(tax)
if mode == 'f' or mode == 'ft':
function_check(func_file, func_colname)
func = read_function_table(func_file, pep_colname_func, func_colname)
dfs.append(func)
# join all
dfs_joined = join_on_peptide(dfs)
dfs_joined.index.name = 'peptide'
return dfs_joined
def read_intensity_table(file, samp_grps, pep_colname_int):
"""
read the file containing peptide intensities to a pandas dataframe.
:param file: path to intensity file. must be tab-separated
:param samp_grps: SampleGroups object
:param pep_colname_int: name of peptide column in intensity table
:return: intensity table; missing values as 0
"""
# read in data
df = pd.read_table(file, sep="\t", index_col=pep_colname_int,
dtype=samp_grps.dict_numeric_cols,
na_values=MISSING_VALUES,
low_memory=False)
# only intcols (in case table has extra cols)
int_df = df.loc[:, samp_grps.all_intcols]
# drop rows where all intensities are NA
int_df.dropna(axis=0, how="all", inplace=True)
# change remaining missing intensities to 0, for arithmetic (changed back to NA for export)
values = {x: 0 for x in samp_grps.all_intcols}
int_df.fillna(values, inplace=True)
return int_df
def read_taxonomy_table(file, pep_colname_tax, tax_colname):
"""
read taxonomy table, such as Unipept output.
Peptides with no annotation are dropped.
:param file: path to taxonomy file
:param pep_colname_tax: string, peptide sequence column name
:param tax_colname: string, taxonomy identifier column name
:return: a pandas dataframe where index is peptide sequence and the single column is the associated ncbi taxid
"""
# always read as character
df = pd.read_table(file, sep="\t", index_col=pep_colname_tax,
na_values=MISSING_VALUES, dtype={tax_colname: object})
# take only specified column
df_tax = df.loc[:, [tax_colname]]
# drop nas
df_tax.dropna(inplace=True, axis=0)
return df_tax
def read_function_table(file, pep_colname_func, func_colname):
"""
read functional annotation table to Pandas dataframe. Peptides
with no annotation are dropped.
:param file: path to tab-separated function file
:param pep_colname_func: name of peptide column in function column
:param func_colname: name of functional annotation column in function table
:return: pandas dataframe where index is peptide sequence and single column is associated functional annotation.
"""
df = pd.read_table(file, sep="\t", index_col=pep_colname_func,
na_values=MISSING_VALUES)
df_new = df[[func_colname]].copy()
# drop nas
df_new.dropna(inplace=True, axis=0)
return df_new
def join_on_peptide(dfs):
"""
Inner join a list of dataframes on the index.
:param dfs: list of pandas dataframes
:return: joined dataframe.
"""
# join inner means that only peptides present in all dfs will be kept
df_all = dfs.pop(0)
while len(dfs) > 0:
df_other = dfs.pop(0)
df_all = df_all.join(df_other, how="inner")
return df_all
def read_nopep_table(file, mode, samp_grps, func_colname=None, tax_colname=None):
"""
Read in a pre-joined table (rather than 3 separate tables)
:param file: file with intensity and functional or taxonomic terms
:param mode: f, tax, or ft
:param samp_grps: SampleGroups() object
:param func_colname: name of column with functional terms
:param tax_colname: name of column with taxonomic annotations
:return: dataframe, missing values as 0
"""
newdict = samp_grps.dict_numeric_cols.copy()
newdict[func_colname] = object
newdict[tax_colname] = object
df = pd.read_table(file, sep="\t",
dtype=newdict,
na_values=MISSING_VALUES,
low_memory=False)
# change remaining missing intensities to 0, for arithmetic (changed back to NA for export)
values = {x: 0 for x in samp_grps.all_intcols}
df.fillna(values, inplace=True)
# drop rows where function is missing (for mode 'f'), taxonomy is missing (mode 't'),
# or both function and taxonomy are missing (mode 'ft')
sub = list()
if mode == 'f':
sub = [func_colname]
elif mode == 't':
sub = [tax_colname]
elif mode == 'ft':
sub = [func_colname, tax_colname]
df.dropna(how='all', subset=sub, inplace=True)
return df
def write_out_general(df, outfile, cols):
"""
Write a pandas dataframe as a tab-separated file.
Keeps header, does not write index; missing
values are represented as NA
:param df: dataframe
:param outfile: path to output file
:param cols: columns to be written, in desired order
:return: None
"""
df.to_csv(outfile,
columns=cols,
sep="\t",
header=True,
index=False,
na_rep="NA")
def define_outfile_cols_expand(samp_grps, ontology, mode):
"""
define columns for writing the expand output file
:param samp_grps: SampleGroups object
:param ontology: functional ontology. only required for 'f' or 'ft' modes
:param mode: f, t, or ft
:return: a list of relevant columns in the correct order
"""
int_cols = []
int_cols += samp_grps.mean_names + samp_grps.all_intcols
node_cols = []
if ontology != "cog":
node_cols += samp_grps.n_peptide_names_flat
# ft doesn't have samp_children
if mode != 'ft':
node_cols += samp_grps.samp_children_names_flat
quant_cols = int_cols + node_cols
if mode == 'f':
if ontology == 'go':
cols = ['id', 'name', 'namespace'] + quant_cols
elif ontology == 'cog':
cols = ['id', 'description'] + quant_cols
elif ontology == 'ec':
cols = ['id', 'description'] + quant_cols
else:
raise ValueError("Invalid ontology. Expected one of: %s" % ['go', 'cog', 'ec'])
elif mode == 't':
cols = ['id', 'taxon_name', 'rank'] + quant_cols
elif mode == 'ft':
cols = ['go_id', 'name', 'namespace', 'tax_id', 'taxon_name', 'rank'] + quant_cols
else:
raise ValueError("Invalid mode. Expected one of: %s" % ['f', 't', 'ft'])
return cols
```
#### File: tests/travis/testAnnotationHierarchy.py
```python
import unittest
import pandas as pd
from metaquantome.classes.AnnotationNode import AnnotationNode
from metaquantome.classes.AnnotationHierarchy import AnnotationHierarchy
from metaquantome.databases.NCBITaxonomyDb import NCBITaxonomyDb
from metaquantome.databases.GeneOntologyDb import GeneOntologyDb
from metaquantome.databases.EnzymeDb import EnzymeDb
from metaquantome.util.utils import TEST_DIR
class TestAnnotationHierarchyNcbi(unittest.TestCase):
def _create_sapiens_db(self):
db = NCBITaxonomyDb(TEST_DIR)
sample_set = {9604, 9605, 9606} # hominidae (family), homo (genus), homo sapiens (species)
ah = AnnotationHierarchy(db, sample_set, 'samp1')
return ah, sample_set
def testInit(self):
ah, sample_set = self._create_sapiens_db()
self.assertIsInstance(ah.db, NCBITaxonomyDb)
self.assertSetEqual(ah.sample_set, sample_set)
self.assertDictEqual(ah.nodes, dict())
def testUpdateNode(self):
ah, sample_set = self._create_sapiens_db()
# one sample child
testid = 9605
intensity = 200
ah._add_node_with_ancestors(testid, intensity)
testid2 = 9606
ah._add_node_with_ancestors(testid2, intensity)
ah._define_sample_children()
updated_node = ah.nodes[testid]
self.assertIsInstance(updated_node, AnnotationNode)
self.assertEqual(updated_node.intensity, intensity*2)
self.assertEqual(updated_node.n_sample_children, 1)
def testAggregateNodes(self):
ah, sample_set = self._create_sapiens_db()
testids = [9604, 9605, 9606]
test_intensities = [500, 200, 300]
for i in range(0, 3):
ah._add_node_with_ancestors(testids[i], test_intensities[i])
self.assertEqual(ah.nodes[9604].intensity, 1000)
class TestAnnotationHierarchyGO(unittest.TestCase):
def _create_go_db(self):
db = GeneOntologyDb(TEST_DIR)
sample_set = {'GO:0008150', # biological process
'GO:0008283', # cell proliferation (child of BP)
'GO:0033687', # osteoblast proliferation (child of cell pro)
'GO:0036093', # germ cell proliferation (child of cell pro)
'GO:0022414', # reproductive process (child of BP)
'GO:1903046', # meiotic cell cycle process (child of rep pro)
'GO:0051026'} # chiasma assembly, child of meiotic
ah = AnnotationHierarchy(db, sample_set, 'samp1')
return ah, sample_set
def testInit(self):
ah, sample_set = self._create_go_db()
self.assertIsInstance(ah.db, GeneOntologyDb)
self.assertSetEqual(ah.sample_set, sample_set)
self.assertDictEqual(ah.nodes, dict())
def testUpdateNode(self):
ah, sample_set = self._create_go_db()
# one sample child
testid = 'GO:0051026'
intensity = 100
ah._add_node_with_ancestors(testid, intensity)
updated_node = ah.nodes[testid]
self.assertIsInstance(updated_node, AnnotationNode)
self.assertEqual(updated_node.intensity, intensity)
ah._define_sample_children()
self.assertEqual(updated_node.n_sample_children, 0)
def testAggregateNodes(self):
ah, sample_set = self._create_go_db()
testids = ['GO:0008150', # biological process
'GO:0008283', # cell proliferation (child of BP)
'GO:0033687', # osteoblast proliferation (child of cell pro)
'GO:0036093', # germ cell proliferation (child of cell pro and rep pro)
'GO:0022414', # reproductive process (child of BP)
'GO:1903046', # meiotic cell cycle process (child of rep pro)
'GO:0051026'] # chiasma assembly, child of meiotic
test_intensities = [0, 0, 0, 100, 50, 200, 300]
for i in range(0, len(test_intensities)):
ah._add_node_with_ancestors(testids[i], test_intensities[i])
self.assertEqual(ah.nodes['GO:0022414'].intensity, 650)
class TestAnnotationHierarchyEc(unittest.TestCase):
def _create_ec_db(self):
db = EnzymeDb(TEST_DIR)
sample_set = {'1.1.4.-',
'1.1.4.1',
'1.1.4.2',
'6.5.-.-',
'6.-.-.-'}
ah = AnnotationHierarchy(db, sample_set, 'samp1')
return ah, sample_set
def testInit(self):
ah, sample_set = self._create_ec_db()
self.assertIsInstance(ah.db, EnzymeDb)
self.assertSetEqual(ah.sample_set, sample_set)
self.assertDictEqual(ah.nodes, dict())
def testUpdateNode(self):
ah, sample_set = self._create_ec_db()
# one sample child
testids = ['1.1.4.-', '1.1.4.1', '1.1.4.2']
intensity = 100
for i in testids:
ah._add_node_with_ancestors(i, intensity)
updated_node = ah.nodes[testids[0]]
print(ah.nodes['1.1.-.-'].intensity)
ah._define_sample_children()
self.assertIsInstance(updated_node, AnnotationNode)
self.assertEqual(updated_node.intensity, intensity*3)
self.assertEqual(updated_node.n_sample_children, 2)
def testAggregateNodes(self):
ah, sample_set = self._create_ec_db()
testids = ['1.1.4.-',
'1.1.4.1',
'1.1.4.2',
'6.5.-.-']
test_intensities = [500, 200, 300, 0]
for i in range(0, 3):
ah._add_node_with_ancestors(testids[i], test_intensities[i])
self.assertEqual(ah.nodes['1.1.4.-'].intensity, 1000)
def testToDataframe(self):
ah, sample_set = self._create_ec_db()
test_set = ['1.1.4.-',
'1.1.4.1',
'1.1.4.2',
'1.1.4.-',
'1.1.4.1',
'1.1.4.2',
'6.5.-.-',
'6.5.-.-',
'6.-.-.-',
'6.-.-.-']
# set to one, so it's equal to number of peptides
test_intensity = 1
for id in test_set:
ah._add_node_with_ancestors(id, test_intensity)
# expanded sample set is all nodes
ah._define_sample_children()
# the sample set is as below:
# sample_set = {'1.1.4.-',
# '1.1.4.1',
# '1.1.4.2',
# '6.5.-.-',
# '6.-.-.-'}
# ah.get_informative_nodes(0, 0)
# expected
exp_df = pd.DataFrame({'samp1': [6, 6, 6, 2, 2, 4, 2],
'samp1_n_peptide': [6, 6, 6, 2, 2, 4, 2],
'samp1_n_samp_children': [1, 1, 2, 0, 0, 1, 0]},
index= ['1.-.-.-',
'1.1.-.-',
'1.1.4.-',
'1.1.4.1',
'1.1.4.2',
'6.-.-.-',
'6.5.-.-']).sort_index(axis=0).sort_index(axis=1)
df = ah.to_dataframe().sort_index(axis=0).sort_index(axis=1)
self.assertTrue(df.equals(exp_df))
if __name__ == '__main__':
unittest.main()
```
#### File: tests/travis/testExpand.py
```python
import unittest
import numpy as np
import pandas as pd
from metaquantome.databases import GeneOntologyDb as godb
import metaquantome.modules.expand as expand
from metaquantome.classes.SampleGroups import SampleGroups
from metaquantome.util.testutils import testfile
from metaquantome.util.utils import TEST_DIR
class TestExpandUtils(unittest.TestCase):
# test dataframe
# 9604 has no peptides observed in either sample, but has enough from the other peptides
# 9606 has one peptide observed in one sample and two in the other
# 9605 has two peptides in each sample, but has 1 child and is a leaf
# 9599 only has one peptide
peptide = ['A', 'B', 'C', 'D', 'E', 'F', 'G']
lca = [9604, 9604, 9605, 9605, 9606, 9606, 9599]
samp1 = [1, 1, 1, 1, 1, 0, 2]
samp2 = [0, 1, 1, 1, 1, 1, 2]
samp3 = [1, 1, 0, 1, 1, 0, 2]
samp_grps = SampleGroups('{"grp1": ["samp1", "samp2", "samp3"]}')
test_df = pd.DataFrame({'lca': lca,
'samp1': samp1,
'samp2': samp2,
'samp3': samp3},
index=[peptide])
def testCalcMeans(self):
calk = expand.calc_means(self.test_df, self.samp_grps)['grp1_mean']
expected_means = pd.Series(np.log2([2/3, 1, 2/3, 1, 1, 1/3, 2]),
index=[self.peptide],
name='grp1_mean')
self.assertTrue(calk.equals(expected_means))
df = pd.DataFrame({'s1_1': [4, 4],
's1_2': [2, 2],
's2_1': [5, 10],
's2_2': [7, 16]})
samps = SampleGroups('{"s1": ["s1_1", "s1_2"], "s2": ["s2_1", "s2_2"]}')
means = expand.calc_means(df, samps)
self.assertTrue(means['s1_mean'].equals(pd.Series({0: np.log2(3.0), 1: np.log2(3.0)}, name="s1_mean")))
class TestFunctionalAnalysisExpand(unittest.TestCase):
db = godb.GeneOntologyDb(TEST_DIR, slim_down=True)
def testSingleInt(self):
func=testfile('simple_func.tab')
int=testfile('simple_int.tab')
go_df = expand.expand('f', sinfo='{"s1": ["int"]}', int_file=int, pep_colname_int='peptide',
pep_colname_func='peptide', pep_colname_tax='peptide', data_dir=TEST_DIR, func_file=func,
func_colname='go', ontology='go')
self.assertEqual(go_df.loc["GO:0022610"]['int'], np.log2(200))
self.assertEqual(go_df.loc["GO:0008152"]['int'], np.log2(100))
def testMultipleInt(self):
func = testfile('multiple_func.tab')
int = testfile('multiple_int.tab')
go_df = expand.expand('f', sinfo='{"s1": ["int1", "int2", "int3"]}', int_file=int, pep_colname_int='peptide',
pep_colname_func='peptide', pep_colname_tax='peptide', data_dir=TEST_DIR, func_file=func,
func_colname='go', ontology='go')
self.assertEqual(go_df.loc['GO:0008152']['int1'], np.log2(10))
self.assertEqual(go_df.loc['GO:0022610']['int2'], np.log2(30))
# missing values (zeros, nans, NA's, etc) are turned into NaN's
self.assertTrue(np.isnan(go_df.loc['GO:0000003']['int3']))
return go_df
def testNopep(self):
nopep=testfile('nopep.tab')
go_df = expand.expand('f', sinfo='{"s1": ["int1", "int2", "int3"]}', int_file=None, pep_colname_int='peptide',
pep_colname_func='peptide', pep_colname_tax='peptide', data_dir=TEST_DIR, func_colname='go',
ontology='go', nopep=True, nopep_file=nopep).sort_index(axis=1)
self.assertEqual(go_df.loc['GO:0008152']['int1'], np.log2(10))
self.assertEqual(go_df.loc['GO:0022610']['int2'], np.log2(30))
# missing values (zeros, nans, NA's, etc) are turned into NaN's
self.assertTrue(np.isnan(go_df.loc['GO:0000003']['int3']))
# now, test that the results are the same as obtained through the peptide method
df = self.testMultipleInt().sort_index(axis=1)
self.assertTrue(df.equals(go_df))
def testSlimDown(self):
func=testfile('func_eggnog.tab')
int=testfile('int_eggnog.tab')
outfile=testfile('eggnog_out.tab')
sinfo='{"NS": ["int737NS", "int852NS", "int867NS"], "WS": ["int737WS", "int852WS", "int867WS"]}'
go_df = expand.expand('f', sinfo=sinfo, int_file=int, pep_colname_int='peptide', pep_colname_func='peptide',
pep_colname_tax='peptide', data_dir=TEST_DIR, func_file=func, func_colname='go', ontology='go',
slim_down=True, outfile=outfile)
# test that all go terms are in slim
# load slim
returned_gos = set(go_df['id'])
# potential of unknown, so just drop that
returned_gos.discard('unknown')
self.assertTrue(returned_gos.issubset(self.db.goslim.keys()))
def testCog(self):
func=testfile('multiple_func.tab')
int=testfile('multiple_int.tab')
cog_df = expand.expand('f', sinfo='{"s1": ["int1", "int2", "int3"]}', int_file=int, pep_colname_int='peptide',
pep_colname_func='peptide', pep_colname_tax='peptide', func_file=func, func_colname='cog',
ontology='cog')
self.assertEqual(cog_df.loc["C"]['s1_mean'], np.log2((10+20+70)/3))
self.assertEqual(cog_df.loc["N"]['int2'], np.log2(30))
def testSimpleEc(self):
func=testfile('simple_ec.tab')
int=testfile('simple_int.tab')
ec_df = expand.expand('f', sinfo='{"s1": ["int"]}', int_file=int, pep_colname_int='peptide',
pep_colname_func='peptide', pep_colname_tax='peptide', data_dir=TEST_DIR, func_file=func,
func_colname='ec', ontology='ec')
self.assertEqual(ec_df.loc["3.4.11.-"]['int'], np.log2(100))
self.assertEqual(ec_df.loc["3.4.-.-"]['int'], np.log2(300))
def testMultipleEc(self):
func=testfile('multiple_func.tab')
int=testfile('multiple_int.tab')
ec_df = expand.expand('f', sinfo='{"s1": ["int1", "int2", "int3"]}', int_file=int, pep_colname_int='peptide',
pep_colname_func='peptide', pep_colname_tax='peptide', data_dir=TEST_DIR, func_file=func,
func_colname='ec', ontology='ec')
self.assertEqual(ec_df.loc['3.4.-.-']['int1'], np.log2(50))
self.assertEqual(ec_df.loc['1.2.-.-']['int2'], np.log2(50))
# missing values (zeros, nans, NA's, etc) are turned into NaN's
self.assertTrue(np.isnan(ec_df.loc['1.2.-.-']['int3']))
class TestTaxonomyAnalysisExpand(unittest.TestCase):
def testSingleBasic(self):
tax = testfile('simple_tax.tab')
int = testfile('simple_int.tab')
tax_df = expand.expand('t', sinfo='{"s1": ["int"]}', int_file=int, pep_colname_int='peptide',
pep_colname_func='peptide', pep_colname_tax='peptide', data_dir=TEST_DIR, tax_file=tax,
tax_colname='lca')
self.assertEqual(tax_df.query("taxon_name == 'Helicobacter pylori'")['int'].values, np.log2(100))
def testWrite(self):
tax = testfile('simple_tax.tab')
int = testfile('simple_int.tab')
out = testfile('taxonomy_write_simple.tab')
df = expand.expand(mode='t', sinfo='{"samp1": ["int"]}', int_file=int, pep_colname_int='peptide',
pep_colname_func='peptide', pep_colname_tax='peptide', data_dir=TEST_DIR, outfile=out, tax_file=tax,
tax_colname='lca')
written = pd.read_table(out)
self.assertAlmostEqual(written.query("taxon_name == 'Clostridioides difficile'")['samp1_mean'].values[0], np.log2(200))
def testMultCols(self):
tax=testfile('multiple_tax.tab')
int=testfile('multiple_int.tab')
tax_df = expand.expand('t', sinfo='{"s1": ["int1", "int2", "int3"]}', int_file=int, pep_colname_int='peptide',
pep_colname_func='peptide', pep_colname_tax='peptide', data_dir=TEST_DIR, tax_file=tax,
tax_colname='lca')
self.assertEqual(tax_df.query("rank == 'phylum' and taxon_name == 'Proteobacteria'")['int3'].values[0], np.log2(70))
def testNopep(self):
nopep=testfile('nopep.tab')
tax_df = expand.expand('t', sinfo='{"s1": ["int1", "int2", "int3"]}', int_file=None, pep_colname_int='peptide',
pep_colname_func='peptide', pep_colname_tax='peptide', data_dir=TEST_DIR, tax_colname='lca',
nopep=True, nopep_file=nopep)
self.assertEqual(tax_df.query("rank == 'phylum' and taxon_name == 'Proteobacteria'")['int3'].values[0],
np.log2(70))
def testParentIntensityHigher(self):
"""
make sure that parents always have higher intensity than children
"""
tax=testfile('test_root_sum_uni.tab')
int=testfile('test_root_sum_int.tab')
tax_df = expand.expand('t', sinfo='{"A": ["int"]}', int_file=int, pep_colname_int='peptide',
pep_colname_func='peptide', pep_colname_tax='peptide', data_dir=TEST_DIR, tax_file=tax,
tax_colname='taxon_id')
# filter to phylum and below
tax_df_filt = tax_df[(tax_df["rank"] != 'no rank') & (tax_df["rank"] != 'superkingdom')]
# firmicutes phylum should be highest
ints = tax_df_filt['int']
self.assertEqual(ints.max(), ints[1239])
# strep genus intensity should be greater than or equal to that of strep species
self.assertGreaterEqual(ints[1301], ints[1302])
self.assertGreaterEqual(ints[1301], ints[1305])
class TestFunctionTaxonomyAnalysis(unittest.TestCase):
def testDifferentNames(self):
tax = testfile('ft_tax.tab')
func = testfile('ft_func.tab')
int = testfile('ft_int.tab')
ft = expand.expand('ft', sinfo='{"A": ["int"]}', int_file=int, pep_colname_int='Sequence',
pep_colname_func='peptide', pep_colname_tax='peptide', data_dir=TEST_DIR, tax_file=tax,
tax_colname='lca', func_file=func, func_colname="go")
self.assertIn("A_mean", list(ft))
if __name__=='__main__':
unittest.main()
``` |
{
"source": "jjunhyub/2021_timetraveler",
"score": 2
} |
#### File: configs/photorealistic_model_nas_11111110101100001100011011110100/train_decoder.py
```python
import sys
sys.path.insert(0, '/mnt/home/xiaoxiang/haozhe/style_nas_2/models/')
import numpy as np
import torch
import os
import cv2
from torch import optim
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision.utils import make_grid
from torch.utils.data.sampler import RandomSampler
import torch.autograd as autograd
from tqdm import tqdm
from matplotlib import pyplot as plt
from dataset import TransferDataset
from torch.utils.serialization import load_lua
import argparse
from models_photorealistic_nas.VGG_with_decoder import encoder, decoder
abs_dir = os.path.abspath(os.path.dirname(__file__))
def load_nets():
encoder_param = load_lua('/mnt/home/xiaoxiang/haozhe/style_nas_2/models/models_photorealistic_nas/vgg_normalised_conv5_1.t7')
net_e = encoder(encoder_param)
net_d = decoder()
return net_e, net_d
def get_gram_matrix(f):
n, c, h, w = f.size(0), f.size(1), f.size(2), f.size(3)
f = f.view(n, c, -1)
gram = f.bmm(f.transpose(1, 2)) / (h * w)
return gram
def get_loss(encoder, decoder, content, d0_control, d1_control, d2_control, d3_control, d4_control, d5_control):
fc = encoder(content)
content_new = decoder(*fc, d0_control, d1_control, d2_control, d3_control, d4_control, d5_control)
fc_new = encoder(content_new)
mse_loss = nn.MSELoss()
loss_r = mse_loss(content_new, content)
loss_p_list = []
for i in range(5):
loss_p_list.append(mse_loss(fc_new[i], fc[i]))
loss_p = sum(loss_p_list) / len(loss_p_list)
loss = 0.5 * loss_r + 0.5 * loss_p
return loss
def get_dataloader(content_root):
transferset = TransferDataset(content_root)
loader = DataLoader(transferset, 16, True, num_workers=16, drop_last=True)
return loader
def adjust_learning_rate(optimizer, epoch):
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * (0.95 ** epoch)
def show_results(content, style, no_train, train):
plt.subplot(221)
plt.imshow(content)
plt.title('content')
plt.subplot(222)
plt.imshow(style)
plt.title('style')
plt.subplot(223)
plt.imshow(no_train)
plt.title('close_form')
plt.subplot(224)
plt.imshow(train)
plt.title('close_form + train_decoder')
plt.show()
def train_single_epoch(args, epoch, encoder, decoder, loader, optimizer, alpha_train=0):
for i, content_batch in enumerate(loader):
content_batch.requires_grad = False
d0_control = args.d_control[:5]
d1_control = args.d_control[5: 8]
d2_control = args.d_control[9: 16]
d3_control = args.d_control[16: 23]
d4_control = args.d_control[23: 28]
d5_control = args.d_control[28: 32]
d0_control = [int(i) for i in d0_control]
d1_control = [int(i) for i in d1_control]
d2_control = [int(i) for i in d2_control]
d3_control = [int(i) for i in d3_control]
d4_control = [int(i) for i in d4_control]
d5_control = [int(i) for i in d5_control]
if args.gpu is not None:
content_batch = content_batch.cuda()
loss = get_loss(encoder, decoder, content_batch, d0_control, d1_control, d2_control, d3_control, d4_control, d5_control)
if i % 1000 == 0:
print('epoch: %d | batch: %d | loss: %.4f' % (epoch, i, loss.cpu().data))
optimizer.zero_grad()
loss.backward(retain_graph=False)
optimizer.step()
def train(args, content_root, encoder, decoder):
MAX_EPOCH = args.max_epoch
for param in encoder.parameters():
param.requires_grad = False
decoder.train(), encoder.eval()
loader = get_dataloader(content_root)
optimizer = optim.Adam(decoder.parameters(), lr=1e-4, betas=(0.5, 0.9))
for i in range(MAX_EPOCH):
train_single_epoch(args, i, encoder, decoder, loader, optimizer)
state_dict = decoder.state_dict()
for key in state_dict.keys():
state_dict[key] = state_dict[key].cpu()
torch.save(state_dict, '{:s}/decoder_epoch_{:d}.pth.tar'.format(args.save_dir, i + 1))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--gpu', default=0)
parser.add_argument('-s', '--save_dir', default=os.path.join(abs_dir, 'trained_models_nas'))
parser.add_argument('-d', '--d_control')
parser.add_argument('-me', '--max_epoch', default=5, type=int)
args = parser.parse_args()
if not os.path.isdir(args.save_dir):
os.mkdir(args.save_dir)
if args.gpu is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
net_e, net_d = load_nets()
if args.gpu is not None:
net_e.cuda()
net_d.cuda()
train(args, '/mnt/home/xiaoxiang/haozhe/VOCdevkit/VOC2012/JPEGImages', net_e, net_d)
```
#### File: configs/photorealistic_model_nas_11111110101100001100011011110100/validation.py
```python
import sys
sys.path.insert(0, '/mnt/home/xiaoxiang/haozhe/style_nas_2/models/')
import numpy as np
import os
import cv2
import torch
import torch.nn as nn
from matplotlib import pyplot as plt
from dataset import TransferDataset
from torch.utils.serialization import load_lua
import argparse
from torchvision import utils
from torchvision import transforms
from models_photorealistic_nas.VGG_with_decoder import encoder, decoder0, decoder1, decoder2, decoder3, decoder4, decoder5
from wct import transform
abs_dir = os.path.abspath(os.path.dirname(__file__))
def load_net():
encoder_param = load_lua('/mnt/home/xiaoxiang/haozhe/style_nas_2/models/models_photorealistic_nas/vgg_normalised_conv5_1.t7')
net_e = encoder(encoder_param)
net_d0 = decoder0()
net_d0.load_state_dict(torch.load(os.path.join(abs_dir, 'trained_models_nas/decoder_epoch_2.pth.tar')))
net_d1 = decoder1()
net_d1.load_state_dict(torch.load(os.path.join(abs_dir, 'trained_models_nas/decoder_epoch_2.pth.tar')))
net_d2 = decoder2()
net_d2.load_state_dict(torch.load(os.path.join(abs_dir, 'trained_models_nas/decoder_epoch_2.pth.tar')))
net_d3 = decoder3()
net_d3.load_state_dict(torch.load(os.path.join(abs_dir, 'trained_models_nas/decoder_epoch_2.pth.tar')))
net_d4 = decoder4()
net_d4.load_state_dict(torch.load(os.path.join(abs_dir, 'trained_models_nas/decoder_epoch_2.pth.tar')))
net_d5 = decoder5()
net_d5.load_state_dict(torch.load(os.path.join(abs_dir, 'trained_models_nas/decoder_epoch_2.pth.tar')))
return net_e, net_d0, net_d1, net_d2, net_d3, net_d4, net_d5
def get_test_list(root_dir):
test_list = os.listdir(root_dir)
test_list = [os.path.join(root_dir, i) for i in test_list]
return test_list
def get_a_image(path):
img = cv2.imread(path, cv2.IMREAD_COLOR)
return img
def resize_save(content, style, out):
if content.shape[0] < content.shape[1]:
out_h = 512
out_w = np.int32(512.0 * content.shape[1] / content.shape[0])
else:
out_w = 512
out_h = np.int32(512.0 * content.shape[0] / content.shape[1])
content = cv2.resize(content, (out_w, out_h), cv2.INTER_AREA)
style = cv2.resize(style, (out_w, out_h), cv2.INTER_AREA)
out = cv2.resize(out, (out_w, out_h), cv2.INTER_AREA)
return content, style, out
def resize_imgs(content, style):
c_h = 384
c_w = 768
s_h = 384
s_w = 768
# c_h = content.shape[0]
# c_w = content.shape[1]
# s_h = style.shape[0]
# s_w = style.shape[1]
# c_ratio = np.float32(c_h) / c_w
# s_ratio = np.float32(s_h) / s_w
# if (c_ratio / s_ratio > 4.0) or (s_ratio / c_ratio > 4.0):
# c_h_out = 512
# c_w_out = 512
# s_h_out = 512
# s_w_out = 512
# elif c_ratio < 1:
# c_h = 512
# c_w = np.int32(c_h / c_ratio)
# s_h = c_h
# s_w = c_w
# elif c_ratio >= 1:
# c_w = 512
# c_h = np.int32(c_w * c_ratio)
# s_h = c_h
# s_w = c_w
content = cv2.resize(content, (c_w, c_h), cv2.INTER_AREA)
style = cv2.resize(style, (s_w, s_h), cv2.INTER_AREA)
return content, style
def handmake_mse(result, target):
return torch.mean((result - target) ** 2)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--gpu', default=0)
args = parser.parse_args()
net_e, _, _, _, _, _, _ = load_net()
if args.gpu is not None:
net_e.cuda(), net_e.eval()
validation_list = get_test_list(os.path.join(abs_dir, 'result_val_nas'))
benchmark_list = get_test_list('/mnt/home/xiaoxiang/haozhe/style_nas_2/benchmark')
validation_list = [i for i in validation_list if '.jpg' in i]
benchmark_list = [i for i in benchmark_list if '.jpg' in i]
validation_list.sort()
benchmark_list.sort()
mse_loss = nn.MSELoss()
loss_r_list = []
loss_p_list = []
for k in range(len(validation_list[:])):
validation_path = validation_list[k]
benchmark_path = benchmark_list[k]
print('----- validate pair %d -------' % (k))
validation_img = get_a_image(validation_path)
benchmark_img = get_a_image(benchmark_path)
validation_img, benchmark_img = resize_imgs(validation_img, benchmark_img)
validation_img = transforms.ToTensor()(validation_img)
benchmark_img = transforms.ToTensor()(benchmark_img)
validation_img = validation_img.unsqueeze(0)
benchmark_img = benchmark_img.unsqueeze(0)
if args.gpu is not None:
validation_img = validation_img.cuda()
benchmark_img = benchmark_img.cuda()
validation_f = list(net_e(validation_img))
benchmark_f = list(net_e(benchmark_img))
loss_r = handmake_mse(validation_img, benchmark_img).cpu().data.numpy()
loss_p_list = []
for i in range(5):
loss_p_list.append(handmake_mse(validation_f[i], benchmark_f[i]).cpu().data.numpy())
loss_p = sum(loss_p_list) / len(loss_p_list)
loss_r_list.append(loss_r)
loss_p_list.append(loss_p)
overall_loss_r = '%.4f' % (sum(loss_r_list) / len(loss_r_list))
overall_loss_p = '%.4f' % (sum(loss_p_list) / len(loss_p_list))
with open(os.path.join(abs_dir, 'result.txt'), 'w') as f:
f.write('%s %s' % (overall_loss_r, overall_loss_p))
```
#### File: jjunhyub/2021_timetraveler/convert_torch.py
```python
from __future__ import print_function
import sys
sys.path.append('/home/junhyub/documents/StyleNAS/')
import os
import math
import torch
import argparse
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.legacy.nn as lnn
import torch.nn.functional as F
from functools import reduce
from torch.autograd import Variable
from torch.utils.serialization import load_lua
class LambdaBase(nn.Sequential):
def __init__(self, fn, *args):
super(LambdaBase, self).__init__(*args)
self.lambda_func = fn
def forward_prepare(self, input):
output = []
for module in self._modules.values():
output.append(module(input))
return output if output else input
class Lambda(LambdaBase):
def forward(self, input):
return self.lambda_func(self.forward_prepare(input))
class LambdaMap(LambdaBase):
def forward(self, input):
# result is Variables list [Variable1, Variable2, ...]
return list(map(self.lambda_func,self.forward_prepare(input)))
class LambdaReduce(LambdaBase):
def forward(self, input):
# result is a Variable
return reduce(self.lambda_func,self.forward_prepare(input))
def copy_param(m,n):
if m.weight is not None: n.weight.data.copy_(m.weight)
if m.bias is not None: n.bias.data.copy_(m.bias)
if hasattr(n,'running_mean'): n.running_mean.copy_(m.running_mean)
if hasattr(n,'running_var'): n.running_var.copy_(m.running_var)
def add_submodule(seq, *args):
for n in args:
seq.add_module(str(len(seq._modules)),n)
def lua_recursive_model(module,seq):
for m in module.modules:
name = type(m).__name__
real = m
if name == 'TorchObject':
name = m._typename.replace('cudnn.','')
m = m._obj
if name == 'SpatialConvolution' or name == 'nn.SpatialConvolutionMM':
if not hasattr(m,'groups') or m.groups is None: m.groups=1
n = nn.Conv2d(m.nInputPlane,m.nOutputPlane,(m.kW,m.kH),(m.dW,m.dH),(m.padW,m.padH),1,m.groups,bias=(m.bias is not None))
copy_param(m,n)
add_submodule(seq,n)
elif name == 'SpatialBatchNormalization':
n = nn.BatchNorm2d(m.running_mean.size(0), m.eps, m.momentum, m.affine)
copy_param(m,n)
add_submodule(seq,n)
elif name == 'VolumetricBatchNormalization':
n = nn.BatchNorm3d(m.running_mean.size(0), m.eps, m.momentum, m.affine)
copy_param(m, n)
add_submodule(seq, n)
elif name == 'ReLU':
n = nn.ReLU()
add_submodule(seq,n)
elif name == 'Sigmoid':
n = nn.Sigmoid()
add_submodule(seq,n)
elif name == 'SpatialMaxPooling':
n = nn.MaxPool2d((m.kW,m.kH),(m.dW,m.dH),(m.padW,m.padH),ceil_mode=m.ceil_mode)
add_submodule(seq,n)
elif name == 'SpatialAveragePooling':
n = nn.AvgPool2d((m.kW,m.kH),(m.dW,m.dH),(m.padW,m.padH),ceil_mode=m.ceil_mode)
add_submodule(seq,n)
elif name == 'SpatialUpSamplingNearest':
n = nn.UpsamplingNearest2d(scale_factor=m.scale_factor)
add_submodule(seq,n)
elif name == 'View':
n = Lambda(lambda x: x.view(x.size(0),-1))
add_submodule(seq,n)
elif name == 'Reshape':
n = Lambda(lambda x: x.view(x.size(0),-1))
add_submodule(seq,n)
elif name == 'Linear':
# Linear in pytorch only accept 2D input
n1 = Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x )
n2 = nn.Linear(m.weight.size(1),m.weight.size(0),bias=(m.bias is not None))
copy_param(m,n2)
n = nn.Sequential(n1,n2)
add_submodule(seq,n)
elif name == 'Dropout':
m.inplace = False
n = nn.Dropout(m.p)
add_submodule(seq,n)
elif name == 'SoftMax':
n = nn.Softmax()
add_submodule(seq,n)
elif name == 'Identity':
n = Lambda(lambda x: x) # do nothing
add_submodule(seq,n)
elif name == 'SpatialFullConvolution':
n = nn.ConvTranspose2d(m.nInputPlane,m.nOutputPlane,(m.kW,m.kH),(m.dW,m.dH),(m.padW,m.padH),(m.adjW,m.adjH))
copy_param(m,n)
add_submodule(seq,n)
elif name == 'VolumetricFullConvolution':
n = nn.ConvTranspose3d(m.nInputPlane,m.nOutputPlane,(m.kT,m.kW,m.kH),(m.dT,m.dW,m.dH),(m.padT,m.padW,m.padH),(m.adjT,m.adjW,m.adjH),m.groups)
copy_param(m,n)
add_submodule(seq, n)
elif name == 'SpatialReplicationPadding':
n = nn.ReplicationPad2d((m.pad_l,m.pad_r,m.pad_t,m.pad_b))
add_submodule(seq,n)
elif name == 'SpatialReflectionPadding':
n = nn.ReflectionPad2d((m.pad_l,m.pad_r,m.pad_t,m.pad_b))
add_submodule(seq,n)
elif name == 'Copy':
n = Lambda(lambda x: x) # do nothing
add_submodule(seq,n)
elif name == 'Narrow':
n = Lambda(lambda x,a=(m.dimension,m.index,m.length): x.narrow(*a))
add_submodule(seq,n)
elif name == 'SpatialCrossMapLRN':
lrn = lnn.SpatialCrossMapLRN(m.size,m.alpha,m.beta,m.k)
n = Lambda(lambda x,lrn=lrn: Variable(lrn.forward(x.data)))
add_submodule(seq,n)
elif name == 'Sequential':
n = nn.Sequential()
lua_recursive_model(m,n)
add_submodule(seq,n)
elif name == 'ConcatTable': # output is list
n = LambdaMap(lambda x: x)
lua_recursive_model(m,n)
add_submodule(seq,n)
elif name == 'CAddTable': # input is list
n = LambdaReduce(lambda x,y: x+y)
add_submodule(seq,n)
elif name == 'Concat':
dim = m.dimension
n = LambdaReduce(lambda x,y,dim=dim: torch.cat((x,y),dim))
lua_recursive_model(m,n)
add_submodule(seq,n)
elif name == 'TorchObject':
print('Not Implement',name,real._typename)
else:
print('Not Implement',name)
def lua_recursive_source(module):
s = []
for m in module.modules:
name = type(m).__name__
real = m
if name == 'TorchObject':
name = m._typename.replace('cudnn.','')
m = m._obj
if name == 'SpatialConvolution' or name == 'nn.SpatialConvolutionMM':
if not hasattr(m,'groups') or m.groups is None: m.groups=1
s += ['nn.Conv2d({},{},{},{},{},{},{},bias={}),#Conv2d'.format(m.nInputPlane,
m.nOutputPlane,(m.kW,m.kH),(m.dW,m.dH),(m.padW,m.padH),1,m.groups,m.bias is not None)]
elif name == 'SpatialBatchNormalization':
s += ['nn.BatchNorm2d({},{},{},{}),#BatchNorm2d'.format(m.running_mean.size(0), m.eps, m.momentum, m.affine)]
elif name == 'VolumetricBatchNormalization':
s += ['nn.BatchNorm3d({},{},{},{}),#BatchNorm3d'.format(m.running_mean.size(0), m.eps, m.momentum, m.affine)]
elif name == 'ReLU':
s += ['nn.ReLU()']
elif name == 'Sigmoid':
s += ['nn.Sigmoid()']
elif name == 'SpatialMaxPooling':
s += ['nn.MaxPool2d({},{},{},ceil_mode={}),#MaxPool2d'.format((m.kW,m.kH),(m.dW,m.dH),(m.padW,m.padH),m.ceil_mode)]
elif name == 'SpatialAveragePooling':
s += ['nn.AvgPool2d({},{},{},ceil_mode={}),#AvgPool2d'.format((m.kW,m.kH),(m.dW,m.dH),(m.padW,m.padH),m.ceil_mode)]
elif name == 'SpatialUpSamplingNearest':
s += ['nn.UpsamplingNearest2d(scale_factor={})'.format(m.scale_factor)]
elif name == 'View':
s += ['Lambda(lambda x: x.view(x.size(0),-1)), # View']
elif name == 'Reshape':
s += ['Lambda(lambda x: x.view(x.size(0),-1)), # Reshape']
elif name == 'Linear':
s1 = 'Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x )'
s2 = 'nn.Linear({},{},bias={})'.format(m.weight.size(1),m.weight.size(0),(m.bias is not None))
s += ['nn.Sequential({},{}),#Linear'.format(s1,s2)]
elif name == 'Dropout':
s += ['nn.Dropout({})'.format(m.p)]
elif name == 'SoftMax':
s += ['nn.Softmax()']
elif name == 'Identity':
s += ['Lambda(lambda x: x), # Identity']
elif name == 'SpatialFullConvolution':
s += ['nn.ConvTranspose2d({},{},{},{},{},{})'.format(m.nInputPlane,
m.nOutputPlane,(m.kW,m.kH),(m.dW,m.dH),(m.padW,m.padH),(m.adjW,m.adjH))]
elif name == 'VolumetricFullConvolution':
s += ['nn.ConvTranspose3d({},{},{},{},{},{},{})'.format(m.nInputPlane,
m.nOutputPlane,(m.kT,m.kW,m.kH),(m.dT,m.dW,m.dH),(m.padT,m.padW,m.padH),(m.adjT,m.adjW,m.adjH),m.groups)]
elif name == 'SpatialReplicationPadding':
s += ['nn.ReplicationPad2d({})'.format((m.pad_l,m.pad_r,m.pad_t,m.pad_b))]
elif name == 'SpatialReflectionPadding':
s += ['nn.ReflectionPad2d({})'.format((m.pad_l,m.pad_r,m.pad_t,m.pad_b))]
elif name == 'Copy':
s += ['Lambda(lambda x: x), # Copy']
elif name == 'Narrow':
s += ['Lambda(lambda x,a={}: x.narrow(*a))'.format((m.dimension,m.index,m.length))]
elif name == 'SpatialCrossMapLRN':
lrn = 'lnn.SpatialCrossMapLRN(*{})'.format((m.size,m.alpha,m.beta,m.k))
s += ['Lambda(lambda x,lrn={}: Variable(lrn.forward(x.data)))'.format(lrn)]
elif name == 'Sequential':
s += ['nn.Sequential( # Sequential']
s += lua_recursive_source(m)
s += [')']
elif name == 'ConcatTable':
s += ['LambdaMap(lambda x: x, # ConcatTable']
s += lua_recursive_source(m)
s += [')']
elif name == 'CAddTable':
s += ['LambdaReduce(lambda x,y: x+y), # CAddTable']
elif name == 'Concat':
dim = m.dimension
s += ['LambdaReduce(lambda x,y,dim={}: torch.cat((x,y),dim), # Concat'.format(m.dimension)]
s += lua_recursive_source(m)
s += [')']
else:
s += '# ' + name + ' Not Implement,\n'
s = map(lambda x: '\t{}'.format(x),s)
return s
def simplify_source(s):
s = map(lambda x: x.replace(',(1, 1),(0, 0),1,1,bias=True),#Conv2d',')'),s)
s = map(lambda x: x.replace(',(0, 0),1,1,bias=True),#Conv2d',')'),s)
s = map(lambda x: x.replace(',1,1,bias=True),#Conv2d',')'),s)
s = map(lambda x: x.replace(',bias=True),#Conv2d',')'),s)
s = map(lambda x: x.replace('),#Conv2d',')'),s)
s = map(lambda x: x.replace(',1e-05,0.1,True),#BatchNorm2d',')'),s)
s = map(lambda x: x.replace('),#BatchNorm2d',')'),s)
s = map(lambda x: x.replace(',(0, 0),ceil_mode=False),#MaxPool2d',')'),s)
s = map(lambda x: x.replace(',ceil_mode=False),#MaxPool2d',')'),s)
s = map(lambda x: x.replace('),#MaxPool2d',')'),s)
s = map(lambda x: x.replace(',(0, 0),ceil_mode=False),#AvgPool2d',')'),s)
s = map(lambda x: x.replace(',ceil_mode=False),#AvgPool2d',')'),s)
s = map(lambda x: x.replace(',bias=True)),#Linear',')), # Linear'),s)
s = map(lambda x: x.replace(')),#Linear',')), # Linear'),s)
s = map(lambda x: '{},\n'.format(x),s)
s = map(lambda x: x[1:],s)
s = reduce(lambda x,y: x+y, s)
return s
def torch_to_pytorch(t7_filename,outputname=None):
model = load_lua(t7_filename,unknown_classes=True)
if type(model).__name__=='hashable_uniq_dict': model=model.model
model.gradInput = None
slist = lua_recursive_source(lnn.Sequential().add(model))
s = simplify_source(slist)
header = '''
import torch
import torch.nn as nn
import torch.legacy.nn as lnn
from functools import reduce
from torch.autograd import Variable
class LambdaBase(nn.Sequential):
def __init__(self, fn, *args):
super(LambdaBase, self).__init__(*args)
self.lambda_func = fn
def forward_prepare(self, input):
output = []
for module in self._modules.values():
output.append(module(input))
return output if output else input
class Lambda(LambdaBase):
def forward(self, input):
return self.lambda_func(self.forward_prepare(input))
class LambdaMap(LambdaBase):
def forward(self, input):
return list(map(self.lambda_func,self.forward_prepare(input)))
class LambdaReduce(LambdaBase):
def forward(self, input):
return reduce(self.lambda_func,self.forward_prepare(input))
'''
varname = t7_filename.replace('.t7','').replace('.','_').replace('-','_')
s = '{}\n\n{} = {}'.format(header,varname,s[:-2])
if outputname is None: outputname=varname
with open(outputname+'.py', "w") as pyfile:
pyfile.write(s)
n = nn.Sequential()
lua_recursive_model(model,n)
torch.save(n.state_dict(),outputname+'.pth')
parser = argparse.ArgumentParser(description='Convert torch t7 model to pytorch')
parser.add_argument('--model','-m', type=str, required=True,
help='torch model file in t7 format')
parser.add_argument('--output', '-o', type=str, default=None,
help='output file name prefix, xxx.py xxx.pth')
args = parser.parse_args()
torch_to_pytorch(args.model,args.output)
```
#### File: models/models_photorealistic_nas/wct.py
```python
import torch
def whiten_and_color(cF,sF):
cFSize = cF.size()
c_mean = torch.mean(cF,1) # c x (h x w)
c_mean = c_mean.unsqueeze(1).expand_as(cF)
cF = cF - c_mean
contentConv = torch.mm(cF,cF.t()).div(cFSize[1]-1) + torch.eye(cFSize[0]).double().cuda()
c_u,c_e,c_v = torch.svd(contentConv,some=False)
k_c = cFSize[0]
for i in range(cFSize[0]):
if c_e[i] < 0.00001:
k_c = i
break
sFSize = sF.size()
s_mean = torch.mean(sF,1)
sF = sF - s_mean.unsqueeze(1).expand_as(sF)
styleConv = torch.mm(sF,sF.t()).div(sFSize[1]-1)
s_u,s_e,s_v = torch.svd(styleConv,some=False)
k_s = sFSize[0]
for i in range(sFSize[0]):
if s_e[i] < 0.00001:
k_s = i
break
c_d = (c_e[0:k_c]).pow(-0.5)
step1 = torch.mm(c_v[:,0:k_c],torch.diag(c_d))
step2 = torch.mm(step1,(c_v[:,0:k_c].t()))
whiten_cF = torch.mm(step2,cF)
s_d = (s_e[0:k_s]).pow(0.5)
targetFeature = torch.mm(torch.mm(torch.mm(s_v[:,0:k_s],torch.diag(s_d)),(s_v[:,0:k_s].t())),whiten_cF)
targetFeature = targetFeature + s_mean.unsqueeze(1).expand_as(targetFeature)
return targetFeature
def transform(cF,sF,alpha):
cF = cF.double()
sF = sF.double()
if len(cF.size()) == 4:
cF = cF[0]
if len(sF.size()) == 4:
sF = sF[0]
C,W,H = cF.size(0),cF.size(1),cF.size(2)
_,W1,H1 = sF.size(0),sF.size(1),sF.size(2)
cFView = cF.view(C,-1)
sFView = sF.view(C,-1)
targetFeature = whiten_and_color(cFView,sFView)
targetFeature = targetFeature.view_as(cF)
csF = alpha * targetFeature + (1.0 - alpha) * cF
csF = csF.float().unsqueeze(0)
return csF
```
#### File: 2021_timetraveler/PhotoNAS/dataset.py
```python
import numpy as np
import os
import cv2
from torch.utils.data import Dataset
from torchvision import transforms
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
from matplotlib import pyplot as plt
class TransferDataset(Dataset):
def __init__(self, content_dir):
super(TransferDataset, self).__init__()
self.content_dir = content_dir
self.content_name_list = self.get_name_list(self.content_dir)
self.transforms = self.transform()
def get_name_list(self, name):
name_list = os.listdir(name)
name_list = [os.path.join(name, i) for i in name_list]
np.random.shuffle(name_list)
return name_list
def transform(self):
data_transform = transforms.Compose([
# transforms.RandomRotation(15),
# transforms.RandomResizedCrop(size=512, scale=(0.5, 1.0)),
# transforms.RandomHorizontalFlip(),
transforms.Resize((512, 512)),
transforms.ToTensor()
])
return data_transform
def __len__(self):
a = len(self.content_name_list)
return a
def __getitem__(self, item):
img = Image.open(self.content_name_list[item]).convert('RGB')
img_out = self.transforms(img)
return img_out
```
#### File: 2021_timetraveler/PhotoNAS/test_decoder.py
```python
import sys
sys.path.insert(0, '/gpfs/share/home/1601210097/projects/style_transfer_aaai/stylenas/')
import numpy as np
import os
import cv2
import torch
import torch.nn as nn
from matplotlib import pyplot as plt
from dataset import TransferDataset
from torch.utils.serialization import load_lua
import argparse
from torchvision import utils
from torchvision import transforms
from models_photorealistic_nas.VGG_with_decoder import encoder, decoder0, decoder1, decoder2, decoder3, decoder4, decoder5
def load_net():
encoder_param = load_lua('../../models_anti_multi_level_pyramid_stage_decoder_in/vgg_normalised_conv5_1.t7')
net_e = encoder(encoder_param)
net_d0 = decoder0()
net_d0.load_state_dict(torch.load('./trained_models_anti_multi_level/decoder_epoch_5.pth.tar'))
net_d1 = decoder1()
net_d1.load_state_dict(torch.load('./trained_models_anti_multi_level/decoder_epoch_5.pth.tar'))
net_d2 = decoder2()
net_d2.load_state_dict(torch.load('./trained_models_anti_multi_level/decoder_epoch_5.pth.tar'))
net_d3 = decoder3()
net_d3.load_state_dict(torch.load('./trained_models_anti_multi_level/decoder_epoch_5.pth.tar'))
net_d4 = decoder4()
net_d4.load_state_dict(torch.load('./trained_models_anti_multi_level/decoder_epoch_5.pth.tar'))
net_d5 = decoder5()
net_d5.load_state_dict(torch.load('./trained_models_anti_multi_level/decoder_epoch_5.pth.tar'))
return net_e, net_d0, net_d1, net_d2, net_d3, net_d4, net_d5
def get_test_list(root_dir):
test_list = os.listdir(root_dir)
test_list = [os.path.join(root_dir, i) for i in test_list]
return test_list
def get_a_image(path):
img = cv2.imread(path, cv2.IMREAD_COLOR)
return img
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--gpu', default=0)
parser.add_argument('-s', '--save_dir', default='./test_results')
parser.add_argument('-c', '--content', default='/gpfs/share/home/1601210097/projects/style_transfer/content_images')
args = parser.parse_args()
if not os.path.isdir(args.save_dir):
os.mkdir(args.save_dir)
net_e, net_d0, net_d1, net_d2, net_d3, net_d4, net_d5 = load_net()
if args.gpu is not None:
net_e.cuda(), net_e.eval()
net_d0.cuda(), net_d0.eval()
net_d1.cuda(), net_d1.eval()
net_d2.cuda(), net_d2.eval()
net_d3.cuda(), net_d3.eval()
net_d4.cuda(), net_d4.eval()
net_d5.cuda(), net_d5.eval()
test_img_list = get_test_list(args.content)
for i, img_path in enumerate(test_img_list):
print('----- testing img %d -------' % i)
img_save_in = get_a_image(img_path)
img_save_in = cv2.resize(img_save_in, (512, 512), cv2.INTER_AREA)
img = transforms.ToTensor()(img_save_in)
img = img.unsqueeze(0)
if args.gpu is not None:
img = img.cuda()
features = list(net_e(img))
features[0] = net_d0(*features)
features[0] = net_d1(*features)
features[0] = net_d2(*features)
features[0] = net_d3(*features)
features[0] = net_d4(*features)
features[0] = net_d5(*features)
out = features[0].cpu().data.float()
utils.save_image(out, os.path.join(args.save_dir, 'comp_%d.jpg' % i))
out = cv2.imread(os.path.join(args.save_dir, 'comp_%d.jpg' % i))
out = cv2.cvtColor(out, cv2.COLOR_BGR2RGB)
out_compare = np.concatenate((img_save_in, out), 1)
cv2.imwrite(os.path.join(args.save_dir, 'comp_%d.jpg' % i), out_compare)
```
#### File: jjunhyub/2021_timetraveler/style_nas.py
```python
import os
import numpy
import random
import collections
import multiprocessing as mp
DIM = 32
class Model(object):
def __init__(self):
self.dim = DIM
self.path = None
self.arch = None
self.arch_str = None
self.loss_r = None
self.loss_p = None
self.loss_m = None
self.accuracy = None
def make_dir(self):
os.system('cp -r ./configs/photorealistic_model_nas ./configs/photorealistic_model_nas_%s' % self.arch_str)
self.path = './configs/photorealistic_model_nas_%s' % self.arch_str
def train(self):
# This command is used to allocate computing resource and train the decoder. Please change the command ``srun -p 1080Ti --gres=gpu:1 --cpus-per-task 5 -n 1'' according to settings of your server cluster.
os.system('srun -p 1080Ti --gres=gpu:1 --cpus-per-task 5 -n 1 python3 %s/train_decoder.py -d %s -me 2' % (self.path, self.arch_str))
def evaluate(self):
# This command is used to allocate computing resource and make photorealistic style transfer. Please change the command ``srun -p 1080Ti_dbg --gres=gpu:1 --cpus-per-task 5 -n 1'' according to settings of your server cluster.
os.system('srun -p 1080Ti_dbg --gres=gpu:1 --cpus-per-task 5 -n 1 python3 %s/photo_transfer.py -d %s' % (self.path, self.arch_str))
# This command is used to allocate computing resource and validate style-transferred results. Please change the command ``srun -p 1080Ti_dbg --gres=gpu:1 --cpus-per-task 5 -n 1'' according to settings of your server cluster.
os.system('srun -p 1080Ti_dbg --gres=gpu:1 --cpus-per-task 5 -n 1 python3 %s/validation.py' % self.path)
with open('%s/result.txt' % self.path, 'r') as f:
acc = f.readline()
this_arch = bin(self.arch)[2:]
while len(this_arch) != DIM:
this_arch = '0' + this_arch
control_index = [int(i) for i in this_arch]
self.loss_r = acc.split(' ')[0]
self.loss_p = acc.split(' ')[1]
self.loss_r = float(self.loss_r)
self.loss_p = float(self.loss_p)
self.loss_m = sum(control_index) / len(control_index)
acc = 0.8 * self.loss_r + 0.1 * self.loss_p + 0.1 * self.loss_m
return acc, self.loss_r, self.loss_p, self.loss_m
def random_architecture():
return random.randint(0, 2**DIM - 1)
def mutate_arch(parent_arch):
position = random.randint(0, DIM - 1)
child_arch = parent_arch ^ (1 << position)
return child_arch
if __name__ == '__main__':
cycles = 200
population_size = 50
sample_size = 10
population = collections.deque()
history = []
def train_val_model(i):
model = Model()
model.arch = random_architecture()
model.arch_str = bin(model.arch)[2:]
while len(model.arch_str) != DIM:
model.arch_str = '0' + model.arch_str
model.make_dir()
model.train()
model.accuracy, loss_r, loss_p, loss_m = model.evaluate()
print('| acc: %.4f | loss_recon: %.4f | loss_perc: %.4f | loss_mode: %.4f |' % (model.accuracy, loss_r, loss_p, loss_m))
return model
p1 = mp.Pool()
res = p1.map(train_val_model, range(population_size))
p1.close()
p1.join()
for model in res:
with open('./record.txt', 'a') as f:
f.write('%s, %.4f, %.4f, %.4f, %.4f\n' % (model.arch_str, model.accuracy, model.loss_r, model.loss_p, model.loss_m))
population.append(model)
history.append(model)
while len(history) < cycles:
childs = []
sample = []
while len(sample) < sample_size:
candidate = random.choice(list(population))
sample.append(candidate)
parent = min(sample, key=lambda i: i.accuracy)
child = Model()
child.arch = mutate_arch(parent.arch)
child.arch_str = bin(child.arch)[2:]
while len(child.arch_str) != DIM:
child.arch_str = '0' + child.arch_str
child.make_dir()
child.train()
child.accuracy, loss_r, loss_p, loss_m = child.evaluate()
print('| acc: %.4f | loss_recon: %.4f | loss_perc: %.4f | loss_mode: %.4f |' % (child.accuracy, loss_r, loss_p, loss_m))
childs.append(child)
population.append(child)
history.append(child)
population.popleft()
with open('./record.txt', 'a') as f:
f.write('%s, %.4f, %.4f, %.4f, %.4f\n' % (child.arch_str, child.accuracy, child.loss_r, child.loss_p, child.loss_m))
``` |
{
"source": "Jjunxi/wintoto",
"score": 3
} |
#### File: wintoto/server/superspider.py
```python
import time
import json
import datetime
import requests
from urllib.parse import urlencode
import re
from bs4 import BeautifulSoup
from requests.exceptions import ConnectionError
import threading
import queue
NUMBER_THREAD = 50
all_lucks = []
lock = threading.RLock()
q = queue.Queue()
index_url = 'http://www.singaporepools.com.sg/DataFileArchive/Lottery/Output/toto_result_draw_list_en.html'
detail_url = 'http://www.singaporepools.com.sg/en/product/sr/Pages/toto_results.aspx'
def handle_detail_page(q):
while not q.empty():
item = q.get()
detail = get_page_detail(item[0])
if detail:
flag = parse_page_detail(detail)
if not flag:
print('Fail:{}'.format(item[1]))
q.put(item)
else:
# print(item[1])
q.task_done()
def get_page_index():
val = datetime.datetime.now().strftime("%Yy%mm%dd%Hh%Mm")
data = {'v': val}
params = urlencode(data)
url = index_url + '?' + params
response = requests.get(url)
if response.status_code == 200:
return response.text
def parse_page_index(html):
pattern = re.compile("queryString='(.*?)' value=.*?'True'>(.*?)</option>", re.S)
items = re.findall(pattern, html)
print(len(items))
for item in items:
yield item
def get_page_detail(param):
url = detail_url + '?' + param
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
except ConnectionError:
print('connection')
return None
def get_text(soup, tag, class_name):
ele = soup.find(tag, class_=class_name)
if ele:
return ele.text
else:
print('fail parse')
return None
def parse_page_detail(html):
soup = BeautifulSoup(html, 'html.parser')
lucks = []
for i in range(6):
luck = get_text(soup, 'td', 'win'+str(i+1))
if luck:
lucks.append(int(luck))
else:
return False
additional = get_text(soup, 'td', 'additional')
if additional:
lucks.append(int(additional))
else:
return False
print(lucks)
global all_lucks
with lock:
all_lucks.append(lucks)
with open('lucks.txt', 'w') as outfile:
json.dump(all_lucks, outfile)
outfile.close()
return True
def main():
threads = []
text = get_page_index()
for item in parse_page_index(text):
q.put(item)
# th = threading.Thread(target=handle_detail_page, args=(item[0], item[1], ))
# th.setDaemon(False)
# threads.append(th)
# th.start()
# here sleep is must
time.sleep(0.1)
# the number of thread cannot be small
for i in range(NUMBER_THREAD):
th = threading.Thread(target=handle_detail_page, args=(q, ))
# th.setDaemon(True)
threads.append(th)
th.start()
# q.join()
for th in threads:
th.join()
print(len(all_lucks))
if __name__ == '__main__':
# start = datetime.datetime.now()
main()
# end = datetime.datetime.now()
# print(end-start)
``` |
{
"source": "JJuOn/Few-shot_Class_Incremental_Learning",
"score": 2
} |
#### File: Few-shot_Class_Incremental_Learning/eval/util.py
```python
import torch
import time
import numpy as np
import io
import base64
from PIL import Image
import scipy
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * t._ppf((1+confidence)/2., n-1)
return m, h
def normalize(x):
norm = x.pow(2).sum(1, keepdim=True).pow(1. / 2)
return x.div(norm)
def image_formatter(im):
im = ((im / np.max(im, axis=(1,2), keepdims=True)) * 255).astype('uint8').transpose((1,2,0))
im = Image.fromarray(im)
rawBytes = io.BytesIO()
im.save(rawBytes, "PNG") # TODO: why this is required here ?
rawBytes.seek(0) # return to the start of the file
decoded = base64.b64encode(rawBytes.read()).decode()
return f'<img src="data:image/jpeg;base64,{decoded}">'
def freeze_backbone_weights(backbone, opt, epoch, exclude=['classifier.transform']):
if opt.freeze_backbone_at == epoch:
print("Freezing the backbone.")
for name, param in backbone.named_parameters():
param.requires_grad = False
if any(map(lambda s: name.startswith(s), exclude)): # why not; name in exclude:
print("Not frozen: ", name)
param.requires_grad = True
def NN(support, support_ys, query):
"""nearest classifier"""
support = np.expand_dims(support.transpose(), 0)
query = np.expand_dims(query, 2)
diff = np.multiply(query - support, query - support)
distance = diff.sum(1)
min_idx = np.argmin(distance, axis=1)
pred = [support_ys[idx] for idx in min_idx]
return pred
def Cosine(support, support_ys, query):
"""Cosine classifier"""
support_norm = np.linalg.norm(support, axis=1, keepdims=True)
support = support / support_norm
query_norm = np.linalg.norm(query, axis=1, keepdims=True)
query = query / query_norm
cosine_distance = query @ support.transpose()
max_idx = np.argmax(cosine_distance, axis=1)
pred = [support_ys[idx] for idx in max_idx]
return pred
def get_optim(net, opt):
if opt.adam:
optimizer = torch.optim.Adam(net.parameters(),
lr=opt.learning_rate,
weight_decay=0.0005)
else:
optimizer = torch.optim.SGD(net.parameters(),
lr=opt.learning_rate,
momentum=opt.momentum,
weight_decay=opt.weight_decay)
return optimizer
def get_vocab(loaders):
vocabs = []
for loader in loaders:
label2human = loader.dataset.label2human
vocab = [name for name in label2human if name != '']
vocabs.append(vocab)
return vocabs
def get_vocabs(base_loader=None, novel_loader=None, query_ys=None):
vocab_all = []
vocab_base = None
if base_loader is not None:
label2human_base = base_loader.dataset.label2human
vocab_base = [name for name in label2human_base if name != '']
vocab_all += vocab_base
vocab_novel, orig2id = None, None
if novel_loader is not None:
novel_ids = np.sort(np.unique(query_ys))
label2human_novel = novel_loader.dataset.label2human
vocab_novel = [label2human_novel[i] for i in novel_ids]
orig2id = dict(zip(novel_ids, len(vocab_base) + np.arange(len(novel_ids))))
vocab_all += vocab_novel
return vocab_base, vocab_all, vocab_novel, orig2id
def drop_a_dim(data): #TODO why do we need this in the first place?
support_xs, support_ys, query_xs, query_ys = data
batch_size, _, height, width, channel = support_xs.size()
support_xs = support_xs.view(-1, height, width, channel)
query_xs = query_xs.view(-1, height, width, channel)
support_ys = support_ys.view(-1).detach().numpy() # TODO
query_ys = query_ys.view(-1).detach().numpy()
return (support_xs, support_ys, query_xs, query_ys)
def get_batch_cycle(meta_trainloader_it, meta_trainloader):
try:
data = next(meta_trainloader_it)
except StopIteration:
meta_trainloader_it = iter(meta_trainloader)
data = next(meta_trainloader_it)
return data
def log_episode(novel_labels,
vocab_novel,
epoch,
novel_acc,
base_acc,
running_base,
running_novel):
avg_score = (novel_acc + base_acc) / 2
running_avg = (running_base + running_novel) / 2
print('\n{:25} {:}\n'
'{:25} {:}\n'
'{:25} {:}\n'
'{:25} {:.4f}\n'
'{:25} {:.4f}\n'
'{:25} {:.4f}\n'
'{:25} {:.4f}\n'
'{:25} {:.4f}\n'
'{:25} {:.4f}\n'.format("Classes:",
novel_labels,
"Labels:",
vocab_novel,
"Fine-tuning epochs:",
epoch-1,
"Novel acc:",
novel_acc,
"Base acc:",
base_acc,
"Average:",
avg_score,
"Runnning Base Avg:",
running_base,
"Running Novel Avg:",
running_novel,
"Running Average:",
running_avg,
), flush=True)
def validate(val_loader, model, criterion, opt):
"""One epoch validation"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for idx, (input, target, _) in enumerate(val_loader):
input = input.float()
if torch.cuda.is_available():
input = input.cuda()
target = target.cuda().long()
# compute output
output = model(input)
if opt.dataset == "tieredImageNet" and opt.augment_pretrain_wtrainb:
output = output[:,:200]
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % opt.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
idx, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg, losses.avg
``` |
{
"source": "jjuppe/Hybrid-Genetic-Search-with-Adaptive-Diversity-Control",
"score": 4
} |
#### File: jjuppe/Hybrid-Genetic-Search-with-Adaptive-Diversity-Control/tools.py
```python
import numpy as np
def get_rank_array(arr, type="asc"):
"""
This helper function returns an array indicating the rank of the value at each position
Args:
arr: NumpyArray
type: asc or desc
"""
length_array = len(arr)
if type == "asc":
sort_key = arr.argsort()
else:
sort_key = (-arr).argsort()
rank_array = [True] * length_array
for i in range(length_array):
# get the position of the value in the original array that is at rank i
val_pos = sort_key[i]
# set the rank i at position val_pos
rank_array[val_pos] = i
return rank_array
def distance(start, stop):
"""
Basic distance function for euclidean distance
Args:
start: VRPNode as start
stop: VRPNode as stop
Returns: euclidean distance
"""
x_dist = np.subtract(start.x, stop.x)
y_dist = np.subtract(start.y, stop.y)
x_dist_square = np.square(x_dist)
y_dist_square = np.square(y_dist)
return np.sqrt(np.add(x_dist_square, y_dist_square))
def normalized_hamming_distance(instance1, instance2):
"""
Annotation: The paper computes the difference based on the depot and service chromosome.
As we simplified the problem to cut the time-window constraint and therefore only performed it on the depot chrom.
Args:
instance1: object of type instance
instance2: object of type instance
Returns:
"""
depot_chromosome1 = instance1.depot_chromosome
depot_chromosome2 = instance2.depot_chromosome
n = instance1.vrp_data.nr_customers
# get sum of equal depot_allocation of customers
sum_val = 0
for key in depot_chromosome1:
if depot_chromosome1[key] != depot_chromosome2[key]:
sum_val += 1
return sum_val / n
def get_hemming_distance_matrix(merged_population):
"""
Small helper function to get the normalized hemming distance
Args:
merged_population: merged population of instances of type "individual"
Returns: distance matrix of all individuals contained
"""
dist_matrix = []
for indiv1 in merged_population:
dist_arr = [normalized_hamming_distance(indiv1, indiv2) for indiv2 in merged_population]
dist_matrix.append(dist_arr)
return dist_matrix
def remove_column(matrix, i):
"""
remove all column entries at position i
"""
for row in matrix:
try:
del row[i]
except IndexError:
print(None)
raise IndexError("blaaaa")
class Queue:
""" Helper class for the Split algorithm
This class implements a double-ended queue and some operations that are necessary
for performing the Split algorithm
"""
def __init__(self):
self.queue = list()
def pop_front(self):
self.queue.__delitem__(0)
def pop_back(self):
self.queue.pop()
def front(self):
return self.queue[0]
def front2(self):
return self.queue[1]
def back(self):
return self.queue[-1]
def push_back(self, t):
self.queue.append(t)
if __name__ == "__main__":
# TODO Test diversity population array
print(None)
``` |
{
"source": "jjurm/bonedoctor",
"score": 3
} |
#### File: python/abnormality_classifier/train_model.py
```python
from keras.applications.densenet import DenseNet169
from keras.layers import Dense, Flatten
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint
import numpy
# load data
print('loading data')
training_images = numpy.load('training_images')
training_labels = numpy.load('training_labels')
validation_images = numpy.load('validation_images')
validation_labels = numpy.load('validation_labels')
# build model and initialise with imagenet pretrained weights
print('building model and initialising weights')
densenet = DenseNet169(include_top=False, weights=None, input_tensor=None, input_shape=(320,320,3), pooling=None, classes=False)
densenet.load_weights('densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5')
model = Sequential()
model.add(densenet)
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
# loss function
normal_fraction =
abnormal_fraction =
def weighted_binary_crossentropy(y_true, y_pred):
return - normal_fraction * y_true * math.log(y_pred) - abnormal_fraction * (1 - y_true) * math.log(1 - y_pred)
# compile model
print('compiling model')
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# set checkpoints
print('setting checkpoints')
filepath="weights-{epoch:02d}-{val_acc:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=False)
callbacks_list = [checkpoint]
# fit model and evaluate on validation dataset
print('fitting model')
model.fit(x=training_images, y=training_labels, validation_data=(validation_images, validation_labels), callbacks=callbacks_list, epochs=3, batch_size=8)
``` |
{
"source": "jjurm/py-lcd",
"score": 3
} |
#### File: jjurm/py-lcd/CharLCD.py
```python
from time import sleep
class CharLCD:
MODE_COMMAND = 0
MODE_DATA = 1
LOW = 0
HIGH = 1
# commands
CMD_CLEARDISPLAY = 0x01
CMD_RETURNHOME = 0x02
CMD_ENTRYMODESET = 0x04
CMD_DISPLAYCONTROL = 0x08
CMD_CURSORSHIFT = 0x10
CMD_FUNCTIONSET = 0x20
CMD_SETCGRAMADDR = 0x40
CMD_SETDDRAMADDR = 0x80
# Entry mode
SHIFTED_TRUE = 1
SHIFTED_FALSE = 0
CURSORMOVEMENT_INCREASE = 1
CURSORMOVEMENT_DECREASE = 0
# Display control
BLINKING_ON = 1
BLINKING_OFF = 0
CURSOR_ON = 1
CURSOR_OFF = 0
DISPLAY_ON = 1
DISPLAY_OFF = 0
# Cursor shift
SHIFTDIRECTION_RIGHT = 1
SHIFTDIRECTION_LEFT = 0
SHIFT_DISPLAYSHIFT = 1
SHIFT_CURSORMOVE = 0
# Function set
DOTSIZE_5x10DOTS = 1 # or 5x11
DOTSIZE_5x7DOTS = 0 # or 5x8
MULTILINE_2LINE = 1
MULTILINE_1LINE = 0
BITMODE_8BIT = 1
BITMODE_4BIT = 0
def __init__(self, pin_rs=25, pin_e=24, pins_db=[12, 16, 20, 21],
pin_backlight=None,
cols=16, rows=2, dotsize=None):
# === Default configuration ===
# Entry mode
self.shifted = self.SHIFTED_FALSE
self.cursorMovement = self.CURSORMOVEMENT_INCREASE
# Display control
self.blinking = self.BLINKING_OFF
self.cursor = self.CURSOR_OFF
self.display = self.DISPLAY_ON
# Function set
self.dotsize = self.DOTSIZE_5x7DOTS
self.multiline = self.MULTILINE_1LINE
self.bitmode = self.BITMODE_8BIT
# === Arguments ===
self.pin_rs = pin_rs
self.pin_e = pin_e
self.pins_db = pins_db
if len(self.pins_db) < 8:
self.bitmode = self.BITMODE_4BIT
self.cols = cols
self.rows = rows
if dotsize == None:
dotsize = self.DOTSIZE_5x7DOTS
self.dotsize = dotsize
self.multiline = (self.MULTILINE_2LINE if rows >= 2 else self.MULTILINE_1LINE)
# === GPIO ===
import RPi.GPIO as GPIO
self.GPIO = GPIO
self.GPIO.setmode(self.GPIO.BCM)
self.GPIO.setwarnings(False)
self.GPIO.setup(self.pin_e, self.GPIO.OUT)
self.GPIO.setup(self.pin_rs, self.GPIO.OUT)
pins = 4 if self.bitmode == self.BITMODE_4BIT else 8
for i in range(pins):
self.GPIO.setup(self.pins_db[-pins + i], self.GPIO.OUT)
self.GPIO.output(self.pin_e, self.LOW)
# Backlight pin
self.pin_backlight = pin_backlight
if self.pin_backlight is not None:
self.GPIO.setup(self.pin_backlight, self.GPIO.OUT)
self.GPIO.output(self.pin_backlight, self.LOW)
# === Inicialization ===
if self.bitmode == self.BITMODE_8BIT:
# 8bit mode
# initialisation sequence of 3 function set commands
self.pushFunctionSet()
self.msleep(4.5) # wait > 4.1ms
# second attempt
self.pushFunctionSet()
self.usleep(150) # wait > 100us
# third attempt
self.pushFunctionSet()
else:
# 4bit mode
# initialisation starts in 8bit mode
self.write4bits(0x03)
self.msleep(4.5) # wait > 4.1ms
# second attempt
self.write4bits(0x03)
self.usleep(150) # wait > 100us
# third attempt
self.write4bits(0x03)
# proceed to 4bit communication
self.write4bits(0x02)
# finally start configuration
self.pushFunctionSet()
self.pushDisplayControl()
self.clear()
self.pushEntryMode()
def toRange(self, val, minimum, maximum):
''' Ensures that the value will be in the specified range '''
if val > maximum:
val = maximum
if val < minimum:
val = minimum
return val
def msleep(self, milliseconds):
''' Sleeps for specified number of milliseconds '''
sleep(milliseconds / float(1000))
def usleep(self, microseconds):
''' Sleeps for specified number of microseconds '''
sleep(microseconds / float(1000000))
def pulseEnable(self):
''' Makes standard short pulse on Enable pin '''
self.GPIO.output(self.pin_e, False)
self.usleep(10) # enable pulse must be > 450ns
self.GPIO.output(self.pin_e, True)
self.usleep(10) # enable pulse must be > 450ns
self.GPIO.output(self.pin_e, False)
self.usleep(100) # commands need > 37us to settle
def writeBits(self, bits, value):
''' Writes specific number of bits of value and makes pulse '''
for i in range(bits):
self.GPIO.output(self.pins_db[-bits + i], (value >> i) & 0x01)
self.pulseEnable()
def write4bits(self, value):
''' Writes last 4 bits of value and makes pulse '''
self.writeBits(4, value)
def write8bits(self, value):
''' Writes last 8 bits of value and makes pulse '''
self.writeBits(8, value)
def send(self, value, mode):
''' Writes value with given mode, auto 4/8-bit selection '''
self.GPIO.output(self.pin_rs, mode)
if self.bitmode == self.BITMODE_8BIT:
self.write8bits(value & 0xFF)
else:
self.write4bits((value >> 4) & 0xF)
self.write4bits(value & 0xF)
def command(self, value):
''' Sends value as command '''
self.send(value, self.MODE_COMMAND)
def data(self, value):
''' Sends value as data '''
self.send(value, self.MODE_DATA)
def pushEntryMode(self):
self.command(self.CMD_ENTRYMODESET
| (0x01 * self.shifted)
| (0x02 * self.cursorMovement)
)
def pushDisplayControl(self):
self.command(self.CMD_DISPLAYCONTROL
| (0x01 * self.blinking)
| (0x02 * self.cursor)
| (0x04 * self.display)
)
def pushFunctionSet(self):
self.command(self.CMD_FUNCTIONSET
| (0x04 * self.dotsize)
| (0x08 * self.multiline)
| (0x10 * self.bitmode)
)
def clear(self):
''' Clears display (and returns cursor home) '''
self.command(self.CMD_CLEARDISPLAY)
def home(self):
''' Returns cursor home '''
self.command(self.CMD_RETURNHOME)
def close(self, clear=False):
if clear:
self.clear()
self.GPIO.cleanup()
def moveCursor(self, col=0, row=0):
''' Moves cursor to specified position '''
col = self.toRange(col, 0, self.cols - 1)
row = self.toRange(row, 0, self.rows - 1)
offsets = [0x00, 0x40, 0x00 + self.cols, 0x40 + self.cols]
self.command(self.CMD_SETDDRAMADDR | (offsets[row] + col))
def shift(self, count=1, display=True):
''' Shifts the cursor given # of times to the left (count can be negative); can also shift display (default) '''
if count > 0:
direction = self.SHIFTDIRECTION_LEFT
elif count < 0:
direction = self.SHIFTDIRECTION_RIGHT
else:
return
count = abs(count)
for i in range(count):
self.command(self.CMD_CURSORSHIFT
| (0x04 * direction)
| (0x08 * (self.SHIFT_DISPLAYSHIFT if display else self.SHIFT_CURSORMOVE))
)
def setShifted(self, shifted):
''' When enabled, display will shift after each data operation '''
self.shifted = bool(shifted)
self.pushEntryMode()
def setCursorMovement(self, cursorMovement):
''' Set direction to move cursor after each data operation '''
if cursorMovement == -1:
self.cursorMovement = cursorMovement
else:
self.cursorMovement = bool(cursorMovement)
self.pushEntryMode()
def setBlinking(self, blinking):
''' Turns blinking cursor on/off '''
self.blinking = bool(blinking)
self.pushDisplayControl()
def setCursor(self, cursor):
''' Turns cursor pattern on/off '''
self.cursor = bool(cursor)
self.pushDisplayControl()
def setDisplay(self, display):
''' Turns display on/off '''
self.display = bool(display)
self.pushDisplayControl()
def createChar(self, addr, bytemap):
''' Creates character at given address (0-7) '''
addr &= 0x7
self.command(self.CMD_SETCGRAMADDR | (addr << 3))
for i in range(8):
if i < len(bytemap):
self.data(bytemap[i])
else:
self.data(0x00)
def setBackLight(self, on):
if self.pin_backlight is not None:
self.GPIO.output(self.pin_backlight, on)
def write(self, string):
''' Writes string char by char '''
for char in string:
self.data(ord(char))
def wline(self, line, string=""):
''' Writes string to specified line (clears whole line) '''
string = string.ljust(self.cols)
self.moveCursor(0, line)
self.write(string)
``` |
{
"source": "jjur/nbpickup-client-python",
"score": 2
} |
#### File: nbpickup/EventHandlers/autosave_gradebook.py
```python
import logging
from watchdog.events import FileSystemEventHandler
# Setting up the logging
logger = logging.getLogger(__name__)
log_file = logging.FileHandler("nbpickup_autosaving.log")
log_console = logging.StreamHandler()
log_file.setLevel(logging.DEBUG)
log_console.setLevel(logging.WARNING)
log_file.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
log_console.setFormatter(logging.Formatter('%(name)s - %(levelname)s - %(message)s'))
logger.addHandler(log_file)
logger.addHandler(log_console)
logger.setLevel(logging.DEBUG)
class GradebookAutoSaveEventHandler(FileSystemEventHandler):
"""Captures and deals with autosaving of nbpickup files"""
def __init__(self, nbpickup_client, folder="/", callback=False):
super().__init__()
self.nbpickup = nbpickup_client
self.folder = folder
self.callback = callback
def on_moved(self, event):
"""Handles both rename and move events"""
super().on_moved(event)
logger.warning("Gradebook Moved: from %s to %s" % (event.src_path, event.dest_path))
def on_created(self, event):
super().on_created(event)
what = 'directory' if event.is_directory else 'file'
logger.info("Created %s: %s" % (what, event.src_path))
if not event.is_directory:
path = "/".join(event.src_path.split("/")[:-1])
filename = event.src_path.split("/")[-1]
self.nbpickup.upload_gradebook_file(filename, path)
def on_deleted(self, event):
super().on_deleted(event)
logger.warning("Gradebook Deleted: %s" % (event.src_path))
def on_modified(self, event):
super().on_modified(event)
what = 'directory' if event.is_directory else 'file'
logger.info("Modified %s: %s" % (what, event.src_path))
if not event.is_directory:
path = "/".join(event.src_path.split("/")[:-1])
filename = event.src_path.split("/")[-1]
self.nbpickup.upload_gradebook_file(filename, path)
# Callback for updating grades
if self.callback:
self.callback()
``` |
{
"source": "JJusti/CrypTen",
"score": 2
} |
#### File: CrypTen/test/test_mpc.py
```python
import itertools
import logging
import math
import unittest
import crypten
import crypten.communicator as comm
import torch
import torch.nn.functional as F
from crypten.common.functions.pooling import _pool2d_reshape
from crypten.common.rng import generate_kbit_random_tensor, generate_random_ring_element
from crypten.common.tensor_types import is_float_tensor
from crypten.config import cfg
from crypten.mpc import MPCTensor, ptype as Ptype
from crypten.mpc.primitives import ArithmeticSharedTensor, BinarySharedTensor
from test.multiprocess_test_case import MultiProcessTestCase, get_random_test_tensor
class TestMPC(object):
"""
This class tests all functions of MPCTensor.
"""
def _get_random_test_tensor(self, *args, **kwargs):
return get_random_test_tensor(device=self.device, *args, **kwargs)
def _check(self, encrypted_tensor, reference, msg, dst=None, tolerance=None):
if tolerance is None:
tolerance = getattr(self, "default_tolerance", 0.05)
tensor = encrypted_tensor.get_plain_text(dst=dst)
if dst is not None and dst != self.rank:
self.assertIsNone(tensor)
return
# Check sizes match
self.assertTrue(tensor.size() == reference.size(), msg)
self.assertTrue(is_float_tensor(reference), "reference must be a float")
if tensor.device != reference.device:
tensor = tensor.cpu()
reference = reference.cpu()
diff = (tensor - reference).abs_()
norm_diff = diff.div(tensor.abs() + reference.abs()).abs_()
test_passed = norm_diff.le(tolerance) + diff.le(tolerance * 0.1)
test_passed = test_passed.gt(0).all().item() == 1
if not test_passed:
logging.info(msg)
logging.info("Result %s" % tensor)
logging.info("Reference %s" % reference)
logging.info("Result - Reference = %s" % (tensor - reference))
self.assertTrue(test_passed, msg=msg)
def _check_tuple(self, encrypted_tuple, reference, msg, tolerance=None):
self.assertTrue(isinstance(encrypted_tuple, tuple))
self.assertEqual(len(encrypted_tuple), len(reference))
for i in range(len(reference)):
self._check(encrypted_tuple[i], reference[i], msg, tolerance=tolerance)
def test_repr(self):
a = self._get_random_test_tensor(size=(1,))
arithmetic = MPCTensor(a, ptype=Ptype.arithmetic)
binary = MPCTensor(a, ptype=Ptype.binary)
# Make sure these don't crash
print(arithmetic)
repr(arithmetic)
print(binary)
repr(binary)
def test_from_shares(self):
"""Tests MPCTensor.from_shares() functionality."""
# settings for test:
num_parties = int(self.world_size)
size = (5, 4)
def _generate_tensor(ptype):
reference = self._get_random_test_tensor(size=size, is_float=False)
# generate arithmetic sharing of reference tensor:
if ptype == Ptype.arithmetic:
zero_shares = generate_random_ring_element(
(num_parties, *size), device=self.device
)
zero_shares = zero_shares - zero_shares.roll(1, dims=0)
shares = list(zero_shares.unbind(0))
shares[0] += reference
# generate binary sharing of reference tensor:
else:
zero_shares = generate_kbit_random_tensor(
(num_parties, *size), device=self.device
)
zero_shares = zero_shares ^ zero_shares.roll(1, dims=0)
shares = list(zero_shares.unbind(0))
shares[0] ^= reference
# return shares and reference:
return shares, reference
# test both types:
for ptype in [Ptype.arithmetic, Ptype.binary]:
# generate shares, sync them between parties, and create tensor:
shares, reference = _generate_tensor(ptype)
share = comm.get().scatter(shares, 0)
encrypted_tensor = MPCTensor.from_shares(share, ptype=ptype)
# check resulting tensor:
self.assertIsInstance(encrypted_tensor, MPCTensor)
self.assertEqual(encrypted_tensor.ptype, ptype)
self.assertIsInstance(encrypted_tensor._tensor, ptype.to_tensor())
decrypted_tensor = encrypted_tensor.reveal()
self.assertTrue(torch.all(decrypted_tensor.eq(reference)).item())
def test_share_attr(self):
"""Tests share attribute getter and setter"""
for is_float in (True, False):
reference = self._get_random_test_tensor(is_float=is_float)
encrypted_tensor = MPCTensor(reference)
underlying_tensor = encrypted_tensor.share
self.assertTrue(
torch.equal(encrypted_tensor.share, underlying_tensor),
"share getter failed",
)
new_share = self._get_random_test_tensor(is_float=False)
encrypted_tensor.share = new_share
self.assertTrue(
torch.equal(encrypted_tensor.share, new_share), "share setter failed"
)
def test_encrypt_decrypt(self):
"""
Tests tensor encryption and decryption for both positive
and negative values.
"""
sizes = [
(),
(1,),
(5,),
(1, 1),
(1, 5),
(5, 1),
(5, 5),
(1, 5, 5),
(5, 1, 5),
(5, 5, 1),
(5, 5, 5),
(1, 3, 32, 32),
(5, 3, 32, 32),
]
for size in sizes:
# encryption and decryption without source:
reference = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(reference)
self._check(encrypted_tensor, reference, "en/decryption failed")
for dst in range(self.world_size):
self._check(
encrypted_tensor, reference, "en/decryption failed", dst=dst
)
# test creation via new() function:
encrypted_tensor2 = encrypted_tensor.new(reference)
self.assertIsInstance(
encrypted_tensor2, MPCTensor, "new() returns incorrect type"
)
self._check(encrypted_tensor2, reference, "en/decryption failed")
# TODO: Implement broadcast_size on GPU
if self.device.type == "cuda":
continue
# encryption and decryption with source:
for src in range(self.world_size):
input_tensor = reference if src == self.rank else []
encrypted_tensor = MPCTensor(input_tensor, src=src, broadcast_size=True)
for dst in range(self.world_size):
self._check(
encrypted_tensor,
reference,
"en/decryption with broadcast_size failed",
dst=dst,
)
# MPCTensors cannot be initialized with None:
with self.assertRaises(ValueError):
_ = MPCTensor(None)
def test_arithmetic(self):
"""Tests arithmetic functions on encrypted tensor."""
arithmetic_functions = ["add", "add_", "sub", "sub_", "mul", "mul_"]
for func in arithmetic_functions:
for tensor_type in [lambda x: x, MPCTensor]:
tensor1 = self._get_random_test_tensor(is_float=True)
tensor2 = self._get_random_test_tensor(is_float=True)
encrypted = MPCTensor(tensor1)
encrypted2 = tensor_type(tensor2)
reference = getattr(tensor1, func)(tensor2)
encrypted_out = getattr(encrypted, func)(encrypted2)
self._check(
encrypted_out,
reference,
"%s %s failed"
% ("private" if tensor_type == MPCTensor else "public", func),
)
if "_" in func:
# Check in-place op worked
self._check(
encrypted,
reference,
"%s %s failed"
% ("private" if tensor_type == MPCTensor else "public", func),
)
else:
# Check original is not modified
self._check(
encrypted,
tensor1,
"%s %s failed"
% ("private" if tensor_type == MPCTensor else "public", func),
)
# Check encrypted vector with encrypted scalar works.
tensor1 = self._get_random_test_tensor(is_float=True)
tensor2 = self._get_random_test_tensor(is_float=True, size=(1,))
encrypted1 = MPCTensor(tensor1)
encrypted2 = MPCTensor(tensor2)
reference = getattr(tensor1, func)(tensor2)
encrypted_out = getattr(encrypted1, func)(encrypted2)
self._check(encrypted_out, reference, "private %s failed" % func)
tensor = self._get_random_test_tensor(is_float=True)
reference = tensor * tensor
encrypted = MPCTensor(tensor)
encrypted_out = encrypted.square()
self._check(encrypted_out, reference, "square failed")
# Test radd, rsub, and rmul
reference = 2 + tensor1
encrypted = MPCTensor(tensor1)
encrypted_out = 2 + encrypted
self._check(encrypted_out, reference, "right add failed")
reference = 2 - tensor1
encrypted_out = 2 - encrypted
self._check(encrypted_out, reference, "right sub failed")
reference = 2 * tensor1
encrypted_out = 2 * encrypted
self._check(encrypted_out, reference, "right mul failed")
def test_sum(self):
"""Tests sum reduction on encrypted tensor."""
tensor = self._get_random_test_tensor(size=(100, 100), is_float=True)
encrypted = MPCTensor(tensor)
self._check(encrypted.sum(), tensor.sum(), "sum failed")
for dim in [0, 1]:
reference = tensor.sum(dim)
encrypted_out = encrypted.sum(dim)
self._check(encrypted_out, reference, "sum failed")
def test_prod(self):
"""Tests prod reduction on encrypted tensor."""
tensor = self._get_random_test_tensor(size=(3, 3), max_value=3, is_float=False)
encrypted = MPCTensor(tensor)
self._check(encrypted.prod(), tensor.prod().float(), "prod failed")
tensor = self._get_random_test_tensor(
size=(5, 5, 5), max_value=3, is_float=False
)
encrypted = MPCTensor(tensor)
for dim in [0, 1, 2]:
reference = tensor.prod(dim).float()
encrypted_out = encrypted.prod(dim)
self._check(encrypted_out, reference, "prod failed")
def test_ptype(self):
"""Test that ptype attribute creates the correct type of encrypted tensor"""
ptype_values = [crypten.mpc.arithmetic, crypten.mpc.binary]
tensor_types = [ArithmeticSharedTensor, BinarySharedTensor]
for i, curr_ptype in enumerate(ptype_values):
tensor = self._get_random_test_tensor(is_float=False)
encr_tensor = crypten.cryptensor(tensor, ptype=curr_ptype)
assert isinstance(encr_tensor._tensor, tensor_types[i]), "ptype test failed"
def test_div(self):
"""Tests division of encrypted tensor by scalar and tensor."""
for function in ["div", "div_"]:
for scalar in [2, 2.0]:
tensor = self._get_random_test_tensor(is_float=True)
reference = tensor.float().div(scalar)
encrypted_tensor = MPCTensor(tensor)
encrypted_tensor = getattr(encrypted_tensor, function)(scalar)
self._check(encrypted_tensor, reference, "scalar division failed")
# multiply denominator by 10 to avoid dividing by small num
divisor = self._get_random_test_tensor(is_float=True, ex_zero=True) * 10
reference = tensor.div(divisor)
encrypted_tensor = MPCTensor(tensor)
encrypted_tensor = getattr(encrypted_tensor, function)(divisor)
self._check(encrypted_tensor, reference, "tensor division failed")
def test_mean(self):
"""Tests computing means of encrypted tensors."""
tensor = self._get_random_test_tensor(size=(5, 10, 15), is_float=True)
encrypted = MPCTensor(tensor)
self._check(encrypted.mean(), tensor.mean(), "mean failed")
for dim in [0, 1, 2]:
reference = tensor.mean(dim)
encrypted_out = encrypted.mean(dim)
self._check(encrypted_out, reference, "mean failed")
def test_var(self):
"""Tests computing variances of encrypted tensors."""
tensor = self._get_random_test_tensor(size=(5, 10, 15), is_float=True)
encrypted = MPCTensor(tensor)
self._check(encrypted.var(), tensor.var(), "var failed")
for dim in [0, 1, 2]:
reference = tensor.var(dim)
encrypted_out = encrypted.var(dim)
self._check(encrypted_out, reference, "var failed")
def test_matmul(self):
"""Test matrix multiplication."""
for tensor_type in [lambda x: x, MPCTensor]:
tensor = self._get_random_test_tensor(max_value=7, is_float=True)
for width in range(2, tensor.nelement()):
matrix_size = (tensor.nelement(), width)
matrix = self._get_random_test_tensor(
max_value=7, size=matrix_size, is_float=True
)
reference = tensor.matmul(matrix)
encrypted_tensor = MPCTensor(tensor)
matrix = tensor_type(matrix)
encrypted_tensor = encrypted_tensor.matmul(matrix)
self._check(
encrypted_tensor,
reference,
"Private-%s matrix multiplication failed"
% ("private" if tensor_type == MPCTensor else "public"),
)
def test_dot_ger(self):
"""Test dot product of vector and encrypted tensor."""
for tensor_type in [lambda x: x, MPCTensor]:
tensor1 = self._get_random_test_tensor(is_float=True).squeeze()
tensor2 = self._get_random_test_tensor(is_float=True).squeeze()
dot_reference = tensor1.dot(tensor2)
ger_reference = torch.ger(tensor1, tensor2)
tensor2 = tensor_type(tensor2)
# dot
encrypted_tensor = MPCTensor(tensor1)
encrypted_out = encrypted_tensor.dot(tensor2)
self._check(
encrypted_out,
dot_reference,
"%s dot product failed" % "private"
if tensor_type == MPCTensor
else "public",
)
# ger
encrypted_tensor = MPCTensor(tensor1)
encrypted_out = encrypted_tensor.ger(tensor2)
self._check(
encrypted_out,
ger_reference,
"%s outer product failed" % "private"
if tensor_type == MPCTensor
else "public",
)
def test_squeeze(self):
tensor = self._get_random_test_tensor(is_float=True)
for dim in [0, 1, 2]:
# Test unsqueeze
reference = tensor.unsqueeze(dim)
encrypted = MPCTensor(tensor)
encrypted_out = encrypted.unsqueeze(dim)
self._check(encrypted_out, reference, "unsqueeze failed")
# Test squeeze
encrypted = MPCTensor(tensor.unsqueeze(0))
encrypted_out = encrypted.squeeze()
self._check(encrypted_out, reference.squeeze(), "squeeze failed")
# Check that the encrypted_out and encrypted point to the same
# thing.
encrypted_out[0:2] = torch.tensor(
[0, 1], dtype=torch.float, device=self.device
)
ref = encrypted.squeeze().get_plain_text()
self._check(encrypted_out, ref, "squeeze failed")
def test_transpose(self):
sizes = [
(1,),
(5,),
(1, 1),
(1, 5),
(5, 1),
(5, 5),
(1, 5, 5),
(5, 1, 5),
(5, 5, 1),
(5, 5, 5),
(1, 3, 32, 32),
(5, 3, 32, 32),
]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
if len(size) == 2: # t() asserts dim == 2
reference = tensor.t()
encrypted_out = encrypted_tensor.t()
self._check(encrypted_out, reference, "t() failed")
for dim0 in range(len(size)):
for dim1 in range(len(size)):
reference = tensor.transpose(dim0, dim1)
encrypted_out = encrypted_tensor.transpose(dim0, dim1)
self._check(encrypted_out, reference, "transpose failed")
def test_conv1d_smaller_signal_one_channel(self):
self._conv1d(5, 1)
def test_conv1d_smaller_signal_many_channels(self):
self._conv1d(5, 5)
def test_conv1d_larger_signal_one_channel(self):
self._conv1d(16, 1)
def test_conv1d_larger_signal_many_channels(self):
self._conv1d(16, 5)
def _conv1d(self, signal_size, in_channels):
"""Test convolution of encrypted tensor with public/private tensors."""
nbatches = [1, 3]
kernel_sizes = [1, 2, 3]
ochannels = [1, 3, 6]
paddings = [0, 1]
strides = [1, 2]
dilations = [1, 2]
groupings = [1, 2]
for func_name in ["conv1d", "conv_transpose1d"]:
for kernel_type in [lambda x: x, MPCTensor]:
for (
batches,
kernel_size,
out_channels,
padding,
stride,
dilation,
groups,
) in itertools.product(
nbatches,
kernel_sizes,
ochannels,
paddings,
strides,
dilations,
groupings,
):
# group convolution is not supported on GPU
if self.device.type == "cuda" and groups > 1:
continue
input_size = (batches, in_channels * groups, signal_size)
signal = self._get_random_test_tensor(
size=input_size, is_float=True
)
if func_name == "conv1d":
k_size = (out_channels * groups, in_channels, kernel_size)
else:
k_size = (in_channels * groups, out_channels, kernel_size)
kernel = self._get_random_test_tensor(size=k_size, is_float=True)
reference = getattr(F, func_name)(
signal,
kernel,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
)
encrypted_signal = MPCTensor(signal)
encrypted_kernel = kernel_type(kernel)
encrypted_conv = getattr(encrypted_signal, func_name)(
encrypted_kernel,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
)
self._check(encrypted_conv, reference, f"{func_name} failed")
def test_conv2d_square_image_one_channel(self):
self._conv2d((5, 5), 1, "conv2d")
def test_conv_transpose2d_square_image_one_channel(self):
self._conv2d((5, 5), 1, "conv_transpose2d")
def test_conv2d_square_image_many_channels(self):
self._conv2d((5, 5), 5, "conv2d")
def test_conv_transpose2d_square_image_many_channels(self):
self._conv2d((5, 5), 5, "conv_transpose2d")
def test_conv2d_rectangular_image_one_channel(self):
self._conv2d((16, 7), 1, "conv2d")
def test_conv_transpose2d_rectangular_image_one_channel(self):
self._conv2d((16, 7), 1, "conv_transpose2d")
def test_conv2d_rectangular_image_many_channels(self):
self._conv2d((16, 7), 5, "conv2d")
def test_conv_transpose2d_rectangular_image_many_channels(self):
self._conv2d((16, 7), 5, "conv_transpose2d")
def _conv2d(self, image_size, in_channels, func_name):
"""Test convolution of encrypted tensor with public/private tensors."""
nbatches = [1, 3]
kernel_sizes = [(1, 1), (2, 2), (2, 3)]
ochannels = [1, 3]
paddings = [0, 1, (0, 1)]
strides = [1, 2, (1, 2)]
dilations = [1, 2]
groupings = [1, 2]
assert func_name in [
"conv2d",
"conv_transpose2d",
], f"Invalid func_name: {func_name}"
for kernel_type in [lambda x: x, MPCTensor]:
for (
batches,
kernel_size,
out_channels,
padding,
stride,
dilation,
groups,
) in itertools.product(
nbatches,
kernel_sizes,
ochannels,
paddings,
strides,
dilations,
groupings,
):
# group convolution is not supported on GPU
if self.device.type == "cuda" and groups > 1:
continue
# sample input:
input_size = (batches, in_channels * groups, *image_size)
input = self._get_random_test_tensor(size=input_size, is_float=True)
# sample filtering kernel:
if func_name == "conv2d":
k_size = (out_channels * groups, in_channels, *kernel_size)
else:
k_size = (in_channels * groups, out_channels, *kernel_size)
kernel = self._get_random_test_tensor(size=k_size, is_float=True)
# perform filtering:
encr_matrix = MPCTensor(input)
encr_kernel = kernel_type(kernel)
encr_conv = getattr(encr_matrix, func_name)(
encr_kernel,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
)
# check that result is correct:
reference = getattr(F, func_name)(
input,
kernel,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
)
self._check(encr_conv, reference, "%s failed" % func_name)
def test_max_pooling(self):
"""Test max_pool of encrypted tensor."""
def _assert_index_match(
indices,
encrypted_indices,
matrix_size,
kernel_size,
**kwargs,
):
# Assert each kernel is one-hot
self.assertTrue(
encrypted_indices.get_plain_text()
.sum(-1)
.sum(-1)
.eq(torch.ones_like(indices))
.all(),
"Encrypted indices are not one-hot",
)
# Populate tensor with kernel indices
arange_size = matrix_size[-2:]
index_values = torch.arange(arange_size.numel(), device=indices.device)
index_values = index_values.view(arange_size)
index_values = index_values.expand(matrix_size)
# Ensure encrypted indices are correct
index_mask, size = _pool2d_reshape(index_values, kernel_size, **kwargs)
index_mask = index_mask.view(*size, kernel_size, kernel_size)
crypten_indices = encrypted_indices.mul(index_mask).sum(-1).sum(-1)
self._check(
crypten_indices, indices.float(), "max_pool2d indexing is incorrect"
)
dilations = [1, 2]
for width in range(2, 5):
for kernel_size in range(1, width):
matrix_size = (1, 4, 5, width)
matrix = self._get_random_test_tensor(size=matrix_size, is_float=True)
strides = list(range(1, kernel_size + 1)) + [(1, kernel_size)]
paddings = range(kernel_size // 2 + 1)
for (
stride,
padding,
dilation,
ceil_mode,
return_indices,
) in itertools.product(
strides,
paddings,
dilations,
[False, True],
[False, True],
):
kwargs = {
"stride": stride,
"padding": padding,
"dilation": dilation,
"ceil_mode": ceil_mode,
"return_indices": return_indices,
}
# Skip kernels that lead to 0-size outputs
if (kernel_size - 1) * dilation > width - 1:
continue
reference = F.max_pool2d(matrix, kernel_size, **kwargs)
encrypted_matrix = MPCTensor(matrix)
encrypted_pool = encrypted_matrix.max_pool2d(kernel_size, **kwargs)
if return_indices:
indices = reference[1]
encrypted_indices = encrypted_pool[1]
kwargs.pop("return_indices")
_assert_index_match(
indices,
encrypted_indices,
matrix.size(),
kernel_size,
**kwargs,
)
encrypted_pool = encrypted_pool[0]
reference = reference[0]
self._check(encrypted_pool, reference, "max_pool2d failed")
def test_avg_pooling(self):
"""Test avg_pool of encrypted tensor."""
for width in range(2, 5):
for kernel_size in range(1, width):
matrix_size = (1, 4, 5, width)
matrix = self._get_random_test_tensor(size=matrix_size, is_float=True)
strides = list(range(1, kernel_size + 1)) + [(1, kernel_size)]
paddings = range(kernel_size // 2 + 1)
for stride, padding in itertools.product(strides, paddings):
kwargs = {"stride": stride, "padding": padding}
reference = F.avg_pool2d(matrix, kernel_size, **kwargs)
encrypted_matrix = MPCTensor(matrix)
encrypted_pool = encrypted_matrix.avg_pool2d(kernel_size, **kwargs)
self._check(encrypted_pool, reference, "avg_pool2d failed")
def test_adaptive_pooling(self):
"""test adaptive_avg_pool2d and adaptive_max_pool2d"""
for in_size in range(1, 11):
for out_size in list(range(1, in_size + 1)) + [None]:
input_size = (1, in_size, in_size)
output_size = (out_size, out_size)
tensor = self._get_random_test_tensor(
size=input_size, is_float=True
).unsqueeze(0)
encrypted = MPCTensor(tensor)
# Test adaptive_avg_pool2d
reference = F.adaptive_avg_pool2d(tensor, output_size)
encrypted_out = encrypted.adaptive_avg_pool2d(output_size)
self._check(encrypted_out, reference, "adaptive_avg_pool2d failed")
# Test adapvite_max_pool2d
for return_indices in [False, True]:
reference = F.adaptive_max_pool2d(
tensor, output_size, return_indices=return_indices
)
encrypted_out = encrypted.adaptive_max_pool2d(
output_size, return_indices=return_indices
)
if return_indices:
encrypted_out = encrypted_out[0]
reference = reference[0]
self._check(encrypted_out, reference, "adaptive_max_pool2d failed")
def test_take(self):
"""Tests take function on encrypted tensor"""
tensor_size = [5, 5, 5, 5]
index = torch.tensor(
[[[1, 2], [3, 4]], [[4, 2], [1, 3]]], dtype=torch.long, device=self.device
)
tensor = self._get_random_test_tensor(size=tensor_size, is_float=True)
# Test when dimension!=None
for dimension in range(0, 4):
ndarray = tensor.cpu().numpy()
reference = torch.from_numpy(ndarray.take(index.cpu(), dimension))
encrypted_tensor = MPCTensor(tensor)
encrypted_out = encrypted_tensor.take(index, dimension)
self._check(encrypted_out, reference, "take function failed: dimension set")
# Test when dimension is default (i.e. None)
sizes = [(15,), (5, 10), (15, 10, 5)]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
take_indices = [[0], [10], [0, 5, 10]]
for indices in take_indices:
indices = torch.tensor(indices, device=self.device)
self._check(
encrypted_tensor.take(indices),
tensor.take(indices),
f"take failed with indices {indices}",
)
def test_neg(self):
"""Test negative on encrypted tensor."""
for width in range(2, 5):
matrix_size = (5, width)
matrix = self._get_random_test_tensor(size=matrix_size, is_float=True)
encrypted_matrix = MPCTensor(matrix)
self._check(-encrypted_matrix, -matrix, "__neg__ failed")
for func_name in ["neg", "neg_"]:
reference = getattr(matrix, func_name)()
encrypted_output = getattr(encrypted_matrix, func_name)()
self._check(encrypted_output, reference, "%s failed" % func_name)
def test_relu(self):
"""Test relu on encrypted tensor."""
for width in range(2, 5):
matrix_size = (5, width)
matrix = self._get_random_test_tensor(size=matrix_size, is_float=True)
# Generate some negative values
matrix2 = self._get_random_test_tensor(size=matrix_size, is_float=True)
matrix = matrix - matrix2
encrypted_matrix = MPCTensor(matrix)
reference = F.relu_(matrix)
encrypted_matrix = encrypted_matrix.relu()
self._check(encrypted_matrix, reference, "relu failed")
def test_comparators(self):
"""Test comparators (>, >=, <, <=, ==, !=)"""
for comp in ["gt", "ge", "lt", "le", "eq", "ne"]:
for tensor_type in [lambda x: x, MPCTensor]:
tensor1 = self._get_random_test_tensor(is_float=True)
tensor2 = self._get_random_test_tensor(is_float=True)
encrypted_tensor1 = MPCTensor(tensor1)
encrypted_tensor2 = tensor_type(tensor2)
reference = getattr(tensor1, comp)(tensor2).float()
encrypted_out = getattr(encrypted_tensor1, comp)(encrypted_tensor2)
self._check(encrypted_out, reference, "%s comparator failed" % comp)
# Check deterministic example to guarantee all combinations
tensor1 = torch.tensor([2.0, 3.0, 1.0, 2.0, 2.0])
tensor2 = torch.tensor([2.0, 2.0, 2.0, 3.0, 1.0])
encrypted_tensor1 = MPCTensor(tensor1)
encrypted_tensor2 = tensor_type(tensor2)
reference = getattr(tensor1, comp)(tensor2).float()
encrypted_out = getattr(encrypted_tensor1, comp)(encrypted_tensor2)
self._check(encrypted_out, reference, "%s comparator failed" % comp)
def test_max_min_pairwise(self):
"""Tests max and min for the deterministic constant (n^2) algorithm"""
self._max_min("pairwise")
def test_max_min_log_reduction(self):
"""Tests max and min for log reduction algorithm"""
self._max_min("log_reduction")
def test_max_min_double_log_reduction(self):
"""Tests max and min for double log reduction algorithm"""
self._max_min("double_log_reduction")
def test_max_min_accelerated_cascade(self):
"""Tests max and min for accelerated cascading algorithm"""
self._max_min("accelerated_cascade")
def _max_min(self, method):
"""Test max and min for the specified algorithm"""
sizes = [
(),
(1,),
(5,),
(1, 1),
(1, 5),
(5, 5),
(1, 1, 1),
(5, 5, 5),
(1, 1, 1, 1),
(5, 5, 5, 5),
]
test_cases = [
torch.tensor(
[[1, 1, 2, 1, 4, 1, 3, 4]], dtype=torch.float, device=self.device
)
] + [self._get_random_test_tensor(size=size, is_float=False) for size in sizes]
for tensor in test_cases:
tensor = tensor.float()
encrypted_tensor = MPCTensor(tensor)
for comp in ["max", "min"]:
reference = getattr(tensor, comp)()
with cfg.temp_override({"functions.max_method": method}):
encrypted_out = getattr(encrypted_tensor, comp)()
self._check(encrypted_out, reference, "%s reduction failed" % comp)
for dim in range(tensor.dim()):
for keepdim in [False, True]:
reference = getattr(tensor, comp)(dim, keepdim=keepdim)
# Test with one_hot = False
with cfg.temp_override({"functions.max_method": method}):
encrypted_out = getattr(encrypted_tensor, comp)(
dim, keepdim=keepdim, one_hot=False
)
# Check max / min values are correct
self._check(
encrypted_out[0], reference[0], "%s reduction failed" % comp
)
# Test argmax / argmin values are correct
out_encr = encrypted_out[1]
out_decr = out_encr.get_plain_text().long()
argmax_ref = reference[1]
# Must index into tensor since ties are broken randomly
# so crypten and PyTorch can return different indices.
# This checks that they index to the same value.
if not keepdim:
out_decr = out_decr.unsqueeze(dim)
argmax_ref = argmax_ref.unsqueeze(dim)
mpc_result = tensor.gather(dim, out_decr)
torch_result = tensor.gather(dim, argmax_ref)
self.assertTrue(
(mpc_result == torch_result).all().item(),
"%s reduction failed" % comp,
)
# Test indices with one_hot = True
with cfg.temp_override({"functions.max_method": method}):
encrypted_out = getattr(encrypted_tensor, comp)(
dim, keepdim=keepdim, one_hot=True
)
# Check argmax results
val_ref = reference[0]
out_encr = encrypted_out[1]
out_decr = out_encr.get_plain_text()
self.assertTrue((out_decr.sum(dim) == 1).all())
self.assertTrue(
(
out_decr.mul(tensor).sum(dim, keepdim=keepdim)
== val_ref
).all()
)
def test_argmax_argmin_pairwise(self):
"""Tests argmax and argmin for the deterministic constant (n^2) algorithm"""
self._argmax_argmin("pairwise")
def test_argmax_argmin_log_reduction(self):
"""Tests argmax and argmin for log reduction algorithm"""
self._argmax_argmin("log_reduction")
def test_argmax_argmin_double_log_reduction(self):
"""Tests argmax and argmin for double log reduction algorithm"""
self._argmax_argmin("double_log_reduction")
def test_argmax_argmin_accelerated_cascade(self):
"""Tests max and min for accelerated cascading algorithm"""
self._max_min("accelerated_cascade")
def _argmax_argmin(self, method):
"""Test argmax and argmin for specified algorithm"""
sizes = [
(),
(1,),
(5,),
(1, 1),
(1, 5),
(5, 5),
(1, 1, 1),
(5, 5, 5),
(1, 1, 1, 1),
(5, 5, 5, 5),
]
test_cases = [
torch.tensor(
[[1, 1, 2, 1, 4, 1, 3, 4]], dtype=torch.float, device=self.device
)
] + [self._get_random_test_tensor(size=size, is_float=False) for size in sizes]
for tensor in test_cases:
tensor = tensor.float()
encrypted_tensor = MPCTensor(tensor)
for comp in ["argmax", "argmin"]:
cmp = comp[3:]
value = getattr(tensor, cmp)()
# test with one_hot = False
with cfg.temp_override({"functions.max_method": method}):
encrypted_out = getattr(encrypted_tensor, comp)(one_hot=False)
# Must index into tensor since ties are broken randomly
# so crypten and PyTorch can return different indices.
# This checks that they index to the same value.
decrypted_out = encrypted_out.get_plain_text()
if tensor.dim() == 0: # if input is 0-d, argmax should be 0
self.assertEqual(decrypted_out, 0)
else:
decrypted_val = tensor.flatten()[decrypted_out.long()]
self.assertTrue(decrypted_val.eq(value).all().item())
# test with one_hot = False
with cfg.temp_override({"functions.max_method": method}):
encrypted_out = getattr(encrypted_tensor, comp)(one_hot=True)
one_hot_indices = (tensor == value).float()
decrypted_out = encrypted_out.get_plain_text()
self.assertTrue(decrypted_out.sum() == 1)
self.assertTrue(decrypted_out.mul(one_hot_indices).sum() == 1)
for dim in range(tensor.dim()):
for keepdim in [False, True]:
# Compute one-hot argmax/min reference in plaintext
values, indices = getattr(tensor, cmp)(dim, keepdim=keepdim)
# test with one_hot = False
with cfg.temp_override({"functions.max_method": method}):
encrypted_out = getattr(encrypted_tensor, comp)(
dim, keepdim=keepdim, one_hot=False
)
# Must index into tensor since ties are broken randomly
# so crypten and PyTorch can return different indices.
# This checks that they index to the same value.abs
decrypted_out = encrypted_out.get_plain_text()
if not keepdim:
decrypted_out = decrypted_out.unsqueeze(dim)
indices = indices.unsqueeze(dim)
decrypted_val = tensor.gather(dim, decrypted_out.long())
reference = tensor.gather(dim, indices)
self.assertTrue(decrypted_val.eq(reference).all().item())
# test with one_hot = True
with cfg.temp_override({"functions.max_method": method}):
encrypted_out = getattr(encrypted_tensor, comp)(
dim, keepdim=keepdim, one_hot=True
)
decrypted_out = encrypted_out.get_plain_text()
if not keepdim:
values = values.unsqueeze(dim)
one_hot_indices = tensor.eq(values).float()
self.assertTrue(decrypted_out.sum(dim).eq(1).all())
self.assertTrue(
decrypted_out.mul(one_hot_indices).sum(dim).eq(1).all()
)
def test_abs_sign(self):
"""Test absolute value function"""
for op in ["abs", "sign"]:
tensor = self._get_random_test_tensor(is_float=True)
if op == "sign":
# do not test on 0 since torch.tensor([0]).sign() = 0
tensor = tensor + (tensor == 0).float()
encrypted_tensor = MPCTensor(tensor)
reference = getattr(tensor, op)()
encrypted_out = getattr(encrypted_tensor, op)()
self._check(encrypted_out, reference, "%s failed" % op)
def test_approximations(self):
"""Test appoximate functions (exp, log, sqrt, reciprocal, pos_pow)"""
def test_with_inputs(func, input):
encrypted_tensor = MPCTensor(input)
reference = getattr(tensor, func)()
encrypted_out = getattr(encrypted_tensor, func)()
self._check(encrypted_out, reference, "%s failed" % func)
# Test on [-10, 10] range
full_range_cases = ["exp"]
tensor = torch.tensor(
[0.01 * i for i in range(-1000, 1001, 1)], device=self.device
)
for func in full_range_cases:
test_with_inputs(func, tensor)
# Test on [0, 10] range
tensor[tensor == 0] = 1.0
non_zero_cases = ["reciprocal"]
for func in non_zero_cases:
test_with_inputs(func, tensor)
# Test on [0, 10] range
tensor = tensor[1001:]
pos_cases = ["log", "sqrt"]
for func in pos_cases:
test_with_inputs(func, tensor)
# Test pos_pow with several exponents
encrypted_tensor = MPCTensor(tensor)
# Reduced the max_value so approximations have less absolute error
tensor_exponent = self._get_random_test_tensor(
max_value=2, size=tensor.size(), is_float=True
)
exponents = [-3, -2, -1, 0, 1, 2, 3, tensor_exponent]
exponents += [MPCTensor(tensor_exponent)]
for p in exponents:
if isinstance(p, MPCTensor):
reference = tensor.pow(p.get_plain_text())
else:
reference = tensor.pow(p)
encrypted_out = encrypted_tensor.pos_pow(p)
self._check(encrypted_out, reference, f"pos_pow failed with power {p}")
def test_norm(self):
"""Tests p-norm"""
for p in [1, 1.5, 2, 3, float("inf"), "fro"]:
for dim in [None, 0, 1, 2]:
tensor = self._get_random_test_tensor(size=(3, 3, 3), is_float=True) / 5
if dim is None:
reference = tensor.norm(p=p)
else:
reference = tensor.norm(p=p, dim=dim)
encrypted = MPCTensor(tensor)
encrypted_out = encrypted.norm(p=p, dim=dim)
self._check(encrypted_out, reference, f"{p}-norm failed", tolerance=0.5)
def test_logistic(self):
"""Tests logistic functions (sigmoid, tanh)"""
tensor = torch.tensor(
[0.01 * i for i in range(-1000, 1001, 1)], device=self.device
)
encrypted_tensor = MPCTensor(tensor)
cases = ["sigmoid", "tanh"]
for func in cases:
reference = getattr(tensor, func)()
encrypted_out = getattr(encrypted_tensor, func)()
self._check(encrypted_out, reference, "%s failed" % func)
def test_hardtanh(self):
tensor = torch.arange(-10, 10, dtype=torch.float32)
encrypted = MPCTensor(tensor)
for minval in range(-10, 10):
for maxval in range(minval, 11):
reference = torch.nn.functional.hardtanh(tensor, minval, maxval)
encrypted_out = encrypted.hardtanh(minval, maxval)
self._check(encrypted_out, reference, "hardtanh failed")
def test_inplace_warning(self):
"""Tests that a warning is thrown that indicates that the `inplace` kwarg
is ignored when a function is called with `inplace=True`
"""
tensor = get_random_test_tensor(is_float=True)
encrypted = MPCTensor(tensor)
functions = ["dropout", "_feature_dropout"]
for func in functions:
warning_str = (
f"CrypTen {func} does not support inplace computation during training."
)
with self.assertLogs(logger=logging.getLogger(), level="WARNING") as cm:
getattr(encrypted, func)(inplace=True)
self.assertTrue(f"WARNING:root:{warning_str}" in cm.output)
def test_cos_sin(self):
"""Tests trigonometric functions (cos, sin)"""
tensor = torch.tensor(
[0.01 * i for i in range(-1000, 1001, 1)], device=self.device
)
encrypted_tensor = MPCTensor(tensor)
cases = ["cos", "sin"]
for func in cases:
reference = getattr(tensor, func)()
encrypted_out = getattr(encrypted_tensor, func)()
self._check(encrypted_out, reference, "%s failed" % func)
def test_softmax(self):
"""Test softmax and log_softmax function"""
for softmax_fn in ["softmax", "log_softmax"]:
# Test 0-dim tensor:
tensor = self._get_random_test_tensor(size=(), is_float=True)
reference = getattr(tensor, softmax_fn)(0)
encrypted_tensor = MPCTensor(tensor)
encrypted_out = getattr(encrypted_tensor, softmax_fn)(0)
self._check(encrypted_out, reference, "0-dim tensor %s failed" % softmax_fn)
# Test all other sizes
sizes = [
(1,),
(5,),
(1, 1),
(1, 5),
(5, 1),
(5, 5),
(1, 5, 5),
(5, 1, 5),
(5, 5, 1),
(5, 5, 5),
(1, 5, 5, 5),
(5, 5, 5, 5),
]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True) / 5
encrypted_tensor = MPCTensor(tensor)
for dim in range(tensor.dim()):
reference = getattr(tensor, softmax_fn)(dim)
encrypted_out = getattr(encrypted_tensor, softmax_fn)(dim)
self._check(encrypted_out, reference, "%s failed" % softmax_fn)
def test_get_set(self):
"""Tests element setting and getting by index"""
for tensor_type in [lambda x: x, MPCTensor]:
for size in range(1, 5):
# Test __getitem__
tensor = self._get_random_test_tensor(size=(size, size), is_float=True)
reference = tensor[:, 0]
encrypted_tensor = MPCTensor(tensor)
encrypted_out = encrypted_tensor[:, 0]
self._check(encrypted_out, reference, "getitem failed")
reference = tensor[0, :]
encrypted_out = encrypted_tensor[0, :]
self._check(encrypted_out, reference, "getitem failed")
# Test __setitem__
tensor2 = self._get_random_test_tensor(size=(size,), is_float=True)
reference = tensor.clone()
reference[:, 0] = tensor2
encrypted_out = MPCTensor(tensor)
encrypted2 = tensor_type(tensor2)
encrypted_out[:, 0] = encrypted2
self._check(
encrypted_out, reference, "%s setitem failed" % type(encrypted2)
)
reference = tensor.clone()
reference[0, :] = tensor2
encrypted_out = MPCTensor(tensor)
encrypted2 = tensor_type(tensor2)
encrypted_out[0, :] = encrypted2
self._check(
encrypted_out, reference, "%s setitem failed" % type(encrypted2)
)
def test_pad(self):
"""Tests padding"""
sizes = [(1,), (5,), (1, 1), (5, 5), (5, 5, 5), (5, 3, 32, 32)]
pads = [
(0, 0, 0, 0),
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
(1, 1, 1, 1),
(2, 2, 1, 1),
(2, 2, 2, 2),
]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
for pad in pads:
for value in [0, 1, 10]:
if tensor.dim() < 2:
pad = pad[:2]
reference = torch.nn.functional.pad(tensor, pad, value=value)
encrypted_value = MPCTensor(value, device=self.device)
encrypted_out = encrypted_tensor.pad(pad, value=encrypted_value)
encrypted_out2 = encrypted_tensor.pad(pad, value=value)
self._check(encrypted_out, reference, "pad failed")
self._check(encrypted_out2, reference, "pad failed")
def test_index_add(self):
"""Test index_add function of encrypted tensor"""
index_add_functions = ["index_add", "index_add_"]
tensor_size1 = [5, 5, 5, 5]
index = torch.tensor(
[1, 2, 3, 4, 4, 2, 1, 3], dtype=torch.long, device=self.device
)
for dimension in range(0, 4):
tensor_size2 = [5, 5, 5, 5]
tensor_size2[dimension] = index.size(0)
for func in index_add_functions:
for tensor_type in [lambda x: x, MPCTensor]:
tensor1 = self._get_random_test_tensor(
size=tensor_size1, is_float=True
)
tensor2 = self._get_random_test_tensor(
size=tensor_size2, is_float=True
)
encrypted = MPCTensor(tensor1)
encrypted2 = tensor_type(tensor2)
reference = getattr(tensor1, func)(dimension, index, tensor2)
encrypted_out = getattr(encrypted, func)(
dimension, index, encrypted2
)
private_type = tensor_type == MPCTensor
self._check(
encrypted_out,
reference,
"%s %s failed"
% ("private" if private_type else "public", func),
)
if func.endswith("_"):
# Check in-place index_add worked
self._check(
encrypted,
reference,
"%s %s failed"
% ("private" if private_type else "public", func),
)
else:
# Check original is not modified
self._check(
encrypted,
tensor1,
"%s %s failed"
% (
"private" if tensor_type == MPCTensor else "public",
func,
),
)
def test_scatter(self):
"""Test scatter/scatter_add function of encrypted tensor"""
funcs = ["scatter", "scatter_", "scatter_add", "scatter_add_"]
sizes = [(5, 5), (5, 5, 5), (5, 5, 5, 5)]
for func in funcs:
for size in sizes:
for tensor_type in [lambda x: x, MPCTensor]:
for dim in range(len(size)):
tensor1 = self._get_random_test_tensor(size=size, is_float=True)
tensor2 = self._get_random_test_tensor(size=size, is_float=True)
index = self._get_random_test_tensor(size=size, is_float=False)
index = index.abs().clamp(0, 4)
encrypted = MPCTensor(tensor1)
encrypted2 = tensor_type(tensor2)
reference = getattr(tensor1, func)(dim, index, tensor2)
encrypted_out = getattr(encrypted, func)(dim, index, encrypted2)
private = tensor_type == MPCTensor
self._check(
encrypted_out,
reference,
"%s %s failed" % ("private" if private else "public", func),
)
if func.endswith("_"):
# Check in-place scatter/scatter_add worked
self._check(
encrypted,
reference,
"%s %s failed"
% ("private" if private else "public", func),
)
else:
# Check original is not modified
self._check(
encrypted,
tensor1,
"%s %s failed"
% ("private" if private else "public", func),
)
def test_broadcast_arithmetic_ops(self):
"""Test broadcast of arithmetic functions."""
arithmetic_functions = ["add", "sub", "mul", "div"]
# TODO: Add broadcasting for pos_pow since it can take a tensor argument
arithmetic_sizes = [
(),
(1,),
(2,),
(1, 1),
(1, 2),
(2, 1),
(2, 2),
(1, 1, 1),
(1, 1, 2),
(1, 2, 1),
(2, 1, 1),
(2, 2, 2),
(1, 1, 1, 1),
(1, 1, 1, 2),
(1, 1, 2, 1),
(1, 2, 1, 1),
(2, 1, 1, 1),
(2, 2, 2, 2),
]
for tensor_type in [lambda x: x, MPCTensor]:
for func in arithmetic_functions:
for size1, size2 in itertools.combinations(arithmetic_sizes, 2):
exclude_zero = True if func == "div" else False
# multiply denominator by 10 to avoid dividing by small num
const = 10 if func == "div" else 1
tensor1 = self._get_random_test_tensor(size=size1, is_float=True)
tensor2 = self._get_random_test_tensor(
size=size2, is_float=True, ex_zero=exclude_zero
)
tensor2 *= const
encrypted1 = MPCTensor(tensor1)
encrypted2 = tensor_type(tensor2)
reference = getattr(tensor1, func)(tensor2)
encrypted_out = getattr(encrypted1, func)(encrypted2)
private = isinstance(encrypted2, MPCTensor)
self._check(
encrypted_out,
reference,
"%s %s broadcast failed"
% ("private" if private else "public", func),
)
# Test with integer tensor
tensor2 = self._get_random_test_tensor(
size=size2, is_float=False, ex_zero=exclude_zero
)
tensor2 *= const
reference = getattr(tensor1, func)(tensor2.float())
encrypted_out = getattr(encrypted1, func)(tensor2)
self._check(
encrypted_out,
reference,
"%s broadcast failed with public integer tensor" % func,
)
def test_broadcast_matmul(self):
"""Test broadcast of matmul."""
matmul_sizes = [(1, 1), (1, 5), (5, 1), (5, 5)]
batch_dims = [(), (1,), (5,), (1, 1), (1, 5), (5, 5)]
for tensor_type in [lambda x: x, MPCTensor]:
for size in matmul_sizes:
for batch1, batch2 in itertools.combinations(batch_dims, 2):
size1 = (*batch1, *size)
size2 = (*batch2, *size)
tensor1 = self._get_random_test_tensor(size=size1, is_float=True)
tensor2 = self._get_random_test_tensor(size=size2, is_float=True)
tensor2 = tensor2.transpose(-2, -1)
encrypted1 = MPCTensor(tensor1)
encrypted2 = tensor_type(tensor2)
reference = tensor1.matmul(tensor2)
encrypted_out = encrypted1.matmul(encrypted2)
private = isinstance(encrypted2, MPCTensor)
self._check(
encrypted_out,
reference,
"%s matmul broadcast failed"
% ("private" if private else "public"),
)
# Test with integer tensor
tensor2 = self._get_random_test_tensor(size=size2, is_float=False)
tensor2 = tensor2.float().transpose(-2, -1)
reference = tensor1.matmul(tensor2)
encrypted_out = encrypted1.matmul(tensor2)
self._check(
encrypted_out,
reference,
"matmul broadcast failed with public integer tensor",
)
def test_inplace(self):
"""Test inplace vs. out-of-place functions"""
for op in ["add", "sub", "mul", "div"]:
for tensor_type in [lambda x: x, MPCTensor]:
tensor1 = self._get_random_test_tensor(is_float=True)
tensor2 = self._get_random_test_tensor(is_float=True)
reference = getattr(torch, op)(tensor1, tensor2)
encrypted1 = MPCTensor(tensor1)
encrypted2 = tensor_type(tensor2)
input_tensor_id = id(encrypted1._tensor)
input_encrypted_id = id(encrypted1)
# Test that out-of-place functions do not modify the input
private = isinstance(encrypted2, MPCTensor)
encrypted_out = getattr(encrypted1, op)(encrypted2)
self._check(
encrypted1,
tensor1,
"%s out-of-place %s modifies input"
% ("private" if private else "public", op),
)
self._check(
encrypted_out,
reference,
"%s out-of-place %s produces incorrect output"
% ("private" if private else "public", op),
)
self.assertFalse(id(encrypted_out._tensor) == input_tensor_id)
self.assertFalse(id(encrypted_out) == input_encrypted_id)
# Test that in-place functions modify the input
encrypted_out = getattr(encrypted1, op + "_")(encrypted2)
self._check(
encrypted1,
reference,
"%s in-place %s_ does not modify input"
% ("private" if private else "public", op),
)
self._check(
encrypted_out,
reference,
"%s in-place %s_ produces incorrect output"
% ("private" if private else "public", op),
)
self.assertTrue(id(encrypted_out._tensor) == input_tensor_id)
self.assertTrue(id(encrypted_out) == input_encrypted_id)
def test_copy_clone(self):
"""Tests shallow_copy and clone of encrypted tensors."""
sizes = [(5,), (1, 5), (5, 10, 15)]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
# test shallow_copy
encrypted_tensor_shallow = encrypted_tensor.shallow_copy()
self.assertEqual(
id(encrypted_tensor_shallow._tensor), id(encrypted_tensor._tensor)
)
self._check(encrypted_tensor_shallow, tensor, "shallow_copy failed")
# test clone
encrypted_tensor_clone = encrypted_tensor.clone()
self.assertNotEqual(
id(encrypted_tensor_clone._tensor), id(encrypted_tensor._tensor)
)
self._check(encrypted_tensor_clone, tensor, "clone failed")
def test_copy_(self):
"""Tests copy_ function."""
sizes = [(5,), (1, 5), (5, 10, 15)]
for size in sizes:
tensor1 = self._get_random_test_tensor(size=size, is_float=True)
tensor2 = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor1 = MPCTensor(tensor1)
encrypted_tensor2 = MPCTensor(tensor2)
encrypted_tensor1.copy_(encrypted_tensor2)
self._check(encrypted_tensor1, tensor2, "copy_ failed")
def test_index_select(self):
"""Tests index_select of encrypted tensors."""
sizes = [(5,), (5, 10), (5, 10, 15)]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
indices = [[0], [0, 3], [0, 2, 4]]
for dim in range(tensor.dim()):
for index in indices:
index_tensor = torch.tensor(
index, dtype=torch.long, device=self.device
)
reference = tensor.index_select(dim, index_tensor)
encrypted_out = encrypted_tensor.index_select(dim, index_tensor)
self._check(
encrypted_out,
reference,
"index_select failed at dim {dim} and index {index}",
)
def test_narrow(self):
"""Tests narrow function."""
sizes = [(5, 6), (5, 6, 7), (6, 7, 8, 9)]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encr_tensor = MPCTensor(tensor)
for dim in range(len(size)):
for start in range(size[dim] - 2):
for length in range(1, size[dim] - start):
tensor_narrow = tensor.narrow(dim, start, length)
encr_tensor_narrow = encr_tensor.narrow(dim, start, length)
self._check(
encr_tensor_narrow,
tensor_narrow,
"narrow failed along dimension %d" % dim,
)
def test_repeat_expand(self):
"""Tests repeat and expand of encrypted tensors."""
sizes = [(1, 8), (4, 1, 8)]
repeat_dims = [(4, 2, 1), (4, 2, 10)]
expand_dims = [(4, 2, 8), (4, 5, 8), (10, 4, 5, 8)]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
for dims in repeat_dims:
encrypted_tensor_repeated = encrypted_tensor.repeat(*dims)
# test that repeat copies tensor's data
self.assertNotEqual(
id(encrypted_tensor_repeated._tensor), id(encrypted_tensor._tensor)
)
self._check(
encrypted_tensor_repeated,
tensor.repeat(*dims),
f"repeat failed with dims {dims}",
)
for dims in expand_dims:
encrypted_tensor_expanded = encrypted_tensor.expand(*dims)
# test that expand creates a view into the same underlying tensor
self.assertNotEqual(
id(encrypted_tensor_expanded.share), id(encrypted_tensor.share)
)
self._check(
encrypted_tensor_expanded,
tensor.expand(*dims),
f"repeat failed with dims {dims}",
)
def test_view_flatten(self):
"""Tests view and flatten of encrypted tensors."""
sizes = [(100,), (4, 25), (2, 5, 10)]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
for dim in range(tensor.dim()):
self._check(
encrypted_tensor.flatten(start_dim=dim),
tensor.flatten(start_dim=dim),
f"flatten failed with dim {dim}",
)
shapes = [100, (5, 20), (10, 2, 5), (-1, 10)]
for shape in shapes:
self._check(
encrypted_tensor.view(shape),
tensor.view(shape),
f"view failed with shape {shape}",
)
def test_roll(self):
"""Tests roll of encrypted tensors."""
sizes = [(10, 1), (5, 2), (5, 10, 15)]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
roll_shifts = [1, 2, 3, (2, 1)]
roll_dims = [0, 1, 0, (0, 1)]
for shifts, dims in zip(roll_shifts, roll_dims):
encrypted_tensor_rolled = encrypted_tensor.roll(shifts, dims=dims)
self.assertEqual(encrypted_tensor_rolled.numel(), tensor.numel())
self._check(
encrypted_tensor_rolled,
tensor.roll(shifts, dims=dims),
f"roll failed with shift {shifts} and dims {dims}",
)
def test_unfold(self):
"""Tests unfold of encrypted tensors."""
tensor_sizes = [(8,), (15, 10, 5), (5, 10, 15, 20)]
for tensor_size in tensor_sizes:
tensor = self._get_random_test_tensor(size=tensor_size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
for size, step in itertools.product(range(1, 4), range(1, 4)):
# check unfold along higher dimension if possible
for dim in range(tensor.dim()):
self._check(
encrypted_tensor.unfold(dim, size, step),
tensor.unfold(dim, size, step),
"unfold failed with dim "
f"{dim}, size {size}, and step {step}",
)
def test_to(self):
"""Tests Arithemetic/Binary SharedTensor type conversions."""
from crypten.mpc.ptype import ptype as Ptype
tensor_sizes = [(), (1,), (5,), (1, 1), (5, 5), (1, 1, 1), (5, 5, 5)]
for size in tensor_sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
self.assertEqual(encrypted_tensor.ptype, Ptype.arithmetic)
binary_encrypted_tensor = encrypted_tensor.to(Ptype.binary)
self.assertEqual(binary_encrypted_tensor.ptype, Ptype.binary)
# check original encrypted_tensor was not modified after conversion
self._check(
encrypted_tensor,
tensor,
"encrypted_tensor was modified during conversion to BinarySharedTensor.",
)
encrypted_from_binary = binary_encrypted_tensor.to(Ptype.arithmetic)
self._check(
encrypted_from_binary,
tensor,
"to failed from BinarySharedTensor to ArithmeticSharedTensor",
)
# Test API
tensor = self._get_random_test_tensor(size=(5,), is_float=True)
encrypted_tensor = MPCTensor(tensor)
if torch.cuda.is_available():
encrypted_tensor = encrypted_tensor.to("cuda")
self.assertEqual(encrypted_tensor.device.type, "cuda")
self.assertEqual(encrypted_tensor.ptype, Ptype.arithmetic)
self._check(
encrypted_tensor,
tensor,
"encrypted_tensor was modified during conversion to cuda",
)
encrypted_tensor = encrypted_tensor.to(device="cuda")
self.assertEqual(encrypted_tensor.device.type, "cuda")
self.assertEqual(encrypted_tensor.ptype, Ptype.arithmetic)
self._check(
encrypted_tensor,
tensor,
"encrypted_tensor was modified during conversion to cuda",
)
encrypted_tensor = encrypted_tensor.to("cpu")
self.assertEqual(encrypted_tensor.device.type, "cpu")
self.assertEqual(encrypted_tensor.ptype, Ptype.arithmetic)
self._check(
encrypted_tensor,
tensor,
"encrypted_tensor was modified during conversion to cpu",
)
encrypted_tensor = encrypted_tensor.to(device="cpu")
self.assertEqual(encrypted_tensor.device.type, "cpu")
self.assertEqual(encrypted_tensor.ptype, Ptype.arithmetic)
self._check(
encrypted_tensor,
tensor,
"encrypted_tensor was modified during conversion to cpu",
)
encrypted_tensor = encrypted_tensor.to(ptype=Ptype.binary)
self.assertEqual(encrypted_tensor.device.type, "cpu")
self.assertEqual(encrypted_tensor.ptype, Ptype.binary)
self._check(
encrypted_tensor,
tensor,
"encrypted_tensor was modified during conversion to BinarySharedTensor.",
)
encrypted_tensor = encrypted_tensor.to(ptype=Ptype.arithmetic)
self.assertEqual(encrypted_tensor.device.type, "cpu")
self.assertEqual(encrypted_tensor.ptype, Ptype.arithmetic)
self._check(
encrypted_tensor,
tensor,
"encrypted_tensor was modified during conversion to ArithmeticSharedTensor.",
)
def test_cumsum(self):
"""Tests cumulative sum on encrypted tensors."""
sizes = [(8,), (5, 10), (15, 10, 5)]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
for dim in range(tensor.dim()):
self._check(
encrypted_tensor.cumsum(dim),
tensor.cumsum(dim),
f"cumsum failed along {dim} dim",
)
def test_trace(self):
"""Tests trace operation on 2D encrypted tensors."""
sizes = [(3, 3), (10, 10), (2, 3)]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
self._check(encrypted_tensor.trace(), tensor.trace(), "trace failed")
def test_flip(self):
"""Tests flip operation on encrypted tensors."""
sizes = [(5,), (5, 10), (5, 10, 15)]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
flip_dims = [(0,), (0, 1), (0, 1, 2)]
for dims in flip_dims:
if len(dims) <= tensor.dim():
self._check(
encrypted_tensor.flip(dims),
tensor.flip(dims),
f"flip failed with {dims} dims",
)
def test_control_flow_failure(self):
"""Tests that control flow fails as expected"""
tensor = self._get_random_test_tensor(is_float=True)
encrypted_tensor = MPCTensor(tensor)
with self.assertRaises(RuntimeError):
if encrypted_tensor:
pass
with self.assertRaises(RuntimeError):
tensor = 5 if encrypted_tensor else 0
with self.assertRaises(RuntimeError):
if False:
pass
elif encrypted_tensor:
pass
def test_where(self):
"""Tests where() conditional element selection"""
sizes = [(10,), (5, 10), (1, 5, 10)]
y_types = [lambda x: x, MPCTensor]
for size, y_type in itertools.product(sizes, y_types):
tensor1 = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor1 = MPCTensor(tensor1)
tensor2 = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor2 = y_type(tensor2)
condition_tensor = (
self._get_random_test_tensor(max_value=1, size=size, is_float=False) + 1
)
condition_encrypted = MPCTensor(condition_tensor)
condition_bool = condition_tensor.bool()
reference_out = tensor1.where(condition_bool, tensor2)
encrypted_out = encrypted_tensor1.where(condition_bool, encrypted_tensor2)
y_is_private = y_type == MPCTensor
self._check(
encrypted_out,
reference_out,
f"{'private' if y_is_private else 'public'} y "
"where failed with public condition",
)
encrypted_out = encrypted_tensor1.where(
condition_encrypted, encrypted_tensor2
)
self._check(
encrypted_out,
reference_out,
f"{'private' if y_is_private else 'public'} y "
"where failed with private condition",
)
# test scalar y
scalar = self._get_random_test_tensor(max_value=0, size=[1], is_float=True)
self._check(
encrypted_tensor1.where(condition_bool, scalar),
tensor1.where(condition_bool, scalar),
"where failed against scalar y with public condition",
)
self._check(
encrypted_tensor1.where(condition_encrypted, scalar),
tensor1.where(condition_bool, scalar),
"where failed against scalar y with private condition",
)
def test_unbind(self):
"""Tests unbind"""
sizes = [
(1,),
(5,),
(1, 1),
(1, 5),
(5, 5),
(1, 1, 1),
(5, 5, 5),
(1, 1, 1, 1),
(5, 5, 5, 5),
]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted = MPCTensor(tensor)
for dim in range(tensor.dim()):
reference = tensor.unbind(dim)
encrypted_out = encrypted.unbind(dim)
self._check_tuple(encrypted_out, reference, "unbind failed")
def test_split(self):
"""Tests split"""
sizes = [
(1,),
(5,),
(1, 1),
(1, 5),
(5, 5),
(1, 1, 1),
(5, 5, 5),
(1, 1, 1, 1),
(5, 5, 5, 5),
]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted = MPCTensor(tensor)
for dim in range(tensor.dim()):
# Get random split
split = self._get_random_test_tensor(
size=(), max_value=tensor.size(dim)
)
split = split.abs().clamp(0, tensor.size(dim) - 1)
split = split.item()
# Test int split
int_split = 1 if split == 0 else split
reference = tensor.split(int_split, dim=dim)
encrypted_out = encrypted.split(int_split, dim=dim)
self._check_tuple(encrypted_out, reference, "split failed")
# Test list split
split = [split, tensor.size(dim) - split]
reference = tensor.split(split, dim=dim)
encrypted_out = encrypted.split(split, dim=dim)
self._check_tuple(encrypted_out, reference, "split failed")
def test_set(self):
"""Tests set correctly re-assigns encrypted shares"""
sizes = [(1, 5), (5, 10), (15, 10, 5)]
for size in sizes:
tensor1 = self._get_random_test_tensor(size=size, is_float=True)
encrypted1 = MPCTensor(tensor1)
tensor2 = self._get_random_test_tensor(size=size, is_float=True)
encrypted2 = MPCTensor(tensor2)
# check encrypted set
encrypted1.set(encrypted2)
self._check(
encrypted1, tensor2, f"set with encrypted other failed with size {size}"
)
# check plain text set
encrypted1 = MPCTensor(tensor1)
encrypted1.set(tensor2)
self._check(
encrypted1,
tensor2,
f"set with unencrypted other failed with size {size}",
)
def test_polynomial(self):
"""Tests polynomial function"""
sizes = [
(1,),
(5,),
(1, 1),
(1, 5),
(5, 5),
(1, 1, 1),
(5, 5, 5),
(1, 1, 1, 1),
(5, 5, 5, 5),
]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, max_value=3, is_float=True)
encrypted = MPCTensor(tensor)
for terms in range(1, 5):
coeffs = self._get_random_test_tensor(
size=(terms,), max_value=3, is_float=True
)
reference = torch.zeros(size=tensor.size(), device=self.device)
for i, term in enumerate(coeffs.tolist()):
reference += term * tensor.pow(i + 1)
# Test list coeffs
encrypted_out = encrypted.polynomial(coeffs.tolist())
self._check(encrypted_out, reference, "polynomial failed")
# Test plaintext tensor coeffs
encrypted_out = encrypted.polynomial(coeffs)
self._check(encrypted_out, reference, "polynomial failed")
# Test encrypted tensor coeffs
coeffs_enc = MPCTensor(coeffs)
encrypted_out = encrypted.polynomial(coeffs_enc)
self._check(encrypted_out, reference, "polynomial failed")
def test_gather(self):
"""Test gather function of encrypted tensor"""
sizes = [(5, 5), (5, 5, 5), (5, 5, 5, 5)]
for size in sizes:
for dim in range(len(size)):
tensor = self._get_random_test_tensor(size=size, is_float=True)
index = self._get_random_test_tensor(size=size, is_float=False)
index = index.abs().clamp(0, 4)
encrypted = MPCTensor(tensor)
reference = tensor.gather(dim, index)
encrypted_out = encrypted.gather(dim, index)
self._check(encrypted_out, reference, f"gather failed with size {size}")
def test_dropout(self):
"""
Tests the dropout functions. Directly compares the zero and non-zero
entries of the input tensor, since we cannot force the encrypted and
unencrypted versions to generate identical random output. Also confirms
that the number of zeros in the encrypted dropout function is as expected.
"""
all_prob_values = [x * 0.2 for x in range(5)]
def get_first_nonzero_value(x):
x = x.flatten()
x = x[x.abs().ge(1e-4)]
x = x.take(torch.tensor(0))
return x
# check that the encrypted and plaintext versions scale
# identically, by testing on all-ones tensor
for prob in all_prob_values:
tensor = torch.ones([10, 10, 10], device=self.device).float()
encr_tensor = MPCTensor(tensor)
dropout_encr = encr_tensor.dropout(prob, training=True)
dropout_decr = dropout_encr.get_plain_text()
dropout_plain = F.dropout(tensor, prob, training=True)
# All non-zero values should be identical in both tensors, so
# compare any one of them
decr_nonzero_value = get_first_nonzero_value(dropout_decr)
plaintext_nonzero_value = get_first_nonzero_value(dropout_plain)
self.assertTrue(
math.isclose(
decr_nonzero_value,
plaintext_nonzero_value,
rel_tol=1e-2,
abs_tol=1e-2,
)
)
for dropout_fn in ["dropout", "_feature_dropout"]:
for prob in all_prob_values:
for size in [(5, 10), (5, 10, 15), (5, 10, 15, 20)]:
for inplace in [False, True]:
for training in [False, True]:
tensor = self._get_random_test_tensor(
size=size, ex_zero=True, min_value=1.0, is_float=True
)
encr_tensor = MPCTensor(tensor)
dropout_encr = getattr(encr_tensor, dropout_fn)(
prob, inplace=inplace, training=training
)
if training:
# Check the scaling for non-zero elements
dropout_decr = dropout_encr.get_plain_text()
scaled_tensor = tensor / (1 - prob)
reference = dropout_decr.where(
dropout_decr == 0, scaled_tensor
)
else:
reference = tensor
self._check(
dropout_encr,
reference,
f"dropout failed with size {size} and probability "
f"{prob}",
)
if inplace:
self._check(
encr_tensor,
reference,
f"in-place dropout failed with size {size} and "
f"probability {prob}",
)
else:
self._check(
encr_tensor,
tensor,
"out-of-place dropout modifies input",
)
# Check that channels that are zeroed are all zeros
if dropout_fn in [
"dropout2d",
"dropout3d",
"feature_dropout",
]:
dropout_encr_flat = dropout_encr.flatten(
start_dim=0, end_dim=1
)
dropout_flat = dropout_encr_flat.get_plain_text()
for i in range(0, dropout_flat.size(0)):
all_zeros = (dropout_flat[i] == 0).all()
all_nonzeros = (dropout_flat[i] != 0).all()
self.assertTrue(
all_zeros or all_nonzeros,
f"{dropout_fn} failed for size {size} with "
f"training {training} and inplace {inplace}",
)
# Check the expected number of zero elements
# For speed, restrict test to single p = 0.4
encr_tensor = MPCTensor(torch.empty((int(1e5), 2, 2)).fill_(1).to(self.device))
dropout_encr = encr_tensor.dropout(0.4)
dropout_tensor = dropout_encr.get_plain_text()
frac_zero = float((dropout_tensor == 0).sum()) / dropout_tensor.nelement()
self.assertTrue(math.isclose(frac_zero, 0.4, rel_tol=1e-2, abs_tol=1e-2))
def test_tuple_cache(self):
# Skip RSS setting since it does not generate tuples
if cfg.mpc.protocol == "replicated":
return
# TODO: encorporate wrap_rng for 3PC+ settings
if comm.get().get_world_size() > 2:
return
provider = crypten.mpc.get_default_provider()
# Test tracing attribute
crypten.trace()
self.assertTrue(provider.tracing)
x = get_random_test_tensor(is_float=True)
x = crypten.cryptensor(x)
_ = x.square()
_ = x * x
_ = x.matmul(x.t())
_ = x.relu()
y = x.unsqueeze(0)
_ = y.conv1d(y, stride=2)
# Populate reference requests
ref_names = ["square"]
ref_names += ["generate_additive_triple"] * 2
ref_names += ["generate_binary_triple"] * 7 + ["B2A_rng"]
ref_names += ["generate_additive_triple"] * 2
ref_args = [
(torch.Size([1, 5]),),
(torch.Size([1, 5]), torch.Size([1, 5]), "mul"),
(torch.Size([1, 5]), torch.Size([5, 1]), "matmul"),
(torch.Size([1, 1, 5]), torch.Size([1, 1, 5])),
]
ref_args += [(torch.Size([2, 1, 1, 5]), torch.Size([2, 1, 1, 5]))] * 6
ref_args += [(torch.Size([1, 5]),)]
ref_args += [(torch.Size([1, 5]), torch.Size([1, 5]), "mul")]
ref_args += [(torch.Size([1, 1, 5]), torch.Size([1, 1, 5]), "conv1d")]
kwargs = {"device": torch.device("cpu")}
conv_kwargs = {"device": torch.device("cpu"), "stride": 2}
requests = [(ref_names[i], ref_args[i], kwargs) for i in range(12)]
requests += [(ref_names[12], ref_args[12], conv_kwargs)]
self.assertEqual(
provider.request_cache,
requests,
"TupleProvider request cache incorrect",
)
crypten.trace(False)
self.assertFalse(provider.tracing)
# Check that cache populates as expected
crypten.fill_cache()
kwargs = frozenset(kwargs.items())
conv_kwargs = frozenset(conv_kwargs.items())
keys = [(ref_names[i], ref_args[i], kwargs) for i in range(12)]
keys += [(ref_names[12], ref_args[12], conv_kwargs)]
self.assertEqual(
set(provider.tuple_cache.keys()),
set(keys),
"TupleProvider tuple_cache populated incorrectly",
)
# Test that function calls return from cache when trace is off
crypten.trace(False)
_ = x.square()
_ = x * x
_ = x.matmul(x.t())
_ = x.relu()
y = x.unsqueeze(0)
_ = y.conv1d(y, stride=2)
for v in provider.tuple_cache.values():
self.assertEqual(
len(v), 0, msg="TupleProvider is not popping tuples properly from cache"
)
# Run all unit tests with both TFP and TTP providers
class TestTFP(MultiProcessTestCase, TestMPC):
def setUp(self):
self._original_provider = cfg.mpc.provider
crypten.CrypTensor.set_grad_enabled(False)
cfg.mpc.provider = "TFP"
super(TestTFP, self).setUp()
def tearDown(self):
cfg.mpc.provider = self._original_provider
crypten.CrypTensor.set_grad_enabled(True)
super(TestTFP, self).tearDown()
class TestTTP(MultiProcessTestCase, TestMPC):
def setUp(self):
self._original_provider = cfg.mpc.provider
crypten.CrypTensor.set_grad_enabled(False)
cfg.mpc.provider = "TTP"
super(TestTTP, self).setUp()
def tearDown(self):
cfg.mpc.provider = self._original_provider
crypten.CrypTensor.set_grad_enabled(True)
super(TestTTP, self).tearDown()
class Test3PC(MultiProcessTestCase, TestMPC):
def setUp(self):
super(Test3PC, self).setUp(world_size=3)
class TestRSS(MultiProcessTestCase, TestMPC):
def setUp(self):
self._original_protocol = cfg.mpc.protocol
cfg.mpc.protocol = "replicated"
super(TestRSS, self).setUp(world_size=3)
def tearDown(self):
cfg.mpc.protocol = self._original_protocol
super(TestRSS, self).tearDown()
# This code only runs when executing the file outside the test harness (e.g.
# via the buck target of another test)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jjvanzon/JJ.TryOut",
"score": 3
} |
#### File: glib-2.0/gdb/glib.py
```python
Components/Mono-3.2.3/share/glib-2.0/gdb/glib.py
import gdb
# This is not quite right, as local vars may override symname
def read_global_var (symname):
return gdb.selected_frame().read_var(symname)
def g_quark_to_string (quark):
if quark == None:
return None
quark = long(quark)
if quark == 0:
return None
val = read_global_var ("g_quarks")
max_q = long(read_global_var ("g_quark_seq_id"))
if quark < max_q:
return val[quark].string()
return None
# We override the node printers too, so that node->next is not expanded
class GListNodePrinter:
"Prints a GList node"
def __init__ (self, val):
self.val = val
def to_string (self):
return "{data=%s, next=0x%x, prev=0x%x}" % (str(self.val["data"]), long(self.val["next"]), long(self.val["prev"]))
class GSListNodePrinter:
"Prints a GSList node"
def __init__ (self, val):
self.val = val
def to_string (self):
return "{data=%s, next=0x%x}" % (str(self.val["data"]), long(self.val["next"]))
class GListPrinter:
"Prints a GList"
class _iterator:
def __init__(self, head, listtype):
self.link = head
self.listtype = listtype
self.count = 0
def __iter__(self):
return self
def next(self):
if self.link == 0:
raise StopIteration
data = self.link['data']
self.link = self.link['next']
count = self.count
self.count = self.count + 1
return ('[%d]' % count, data)
def __init__ (self, val, listtype):
self.val = val
self.listtype = listtype
def children(self):
return self._iterator(self.val, self.listtype)
def to_string (self):
return "0x%x" % (long(self.val))
def display_hint (self):
return "array"
class GHashPrinter:
"Prints a GHashTable"
class _iterator:
def __init__(self, ht, keys_are_strings):
self.ht = ht
if ht != 0:
self.array = ht["nodes"]
self.size = ht["size"]
self.pos = 0
self.keys_are_strings = keys_are_strings
self.value = None
def __iter__(self):
return self
def next(self):
if self.ht == 0:
raise StopIteration
if self.value != None:
v = self.value
self.value = None
return v
while long(self.pos) < long(self.size):
node = self.array[self.pos]
self.pos = self.pos + 1
if long (node["key_hash"]) >= 2:
key = node["key"]
val = node["value"]
if self.keys_are_strings:
key = key.cast (gdb.lookup_type("char").pointer())
# Queue value for next result
self.value = ('[%dv]'% (self.pos), val)
# Return key
return ('[%dk]'% (self.pos), key)
raise StopIteration
def __init__ (self, val):
self.val = val
self.keys_are_strings = False
try:
string_hash = read_global_var ("g_str_hash")
except:
string_hash = None
if self.val != 0 and string_hash != None and self.val["hash_func"] == string_hash:
self.keys_are_strings = True
def children(self):
return self._iterator(self.val, self.keys_are_strings)
def to_string (self):
return "0x%x" % (long(self.val))
def display_hint (self):
return "map"
def pretty_printer_lookup (val):
if is_g_type_instance (val):
return GTypePrettyPrinter (val)
def pretty_printer_lookup (val):
# None yet, want things like hash table and list
type = val.type.unqualified()
# If it points to a reference, get the reference.
if type.code == gdb.TYPE_CODE_REF:
type = type.target ()
if type.code == gdb.TYPE_CODE_PTR:
type = type.target().unqualified()
t = str(type)
if t == "GList":
return GListPrinter(val, "GList")
if t == "GSList":
return GListPrinter(val, "GSList")
if t == "GHashTable":
return GHashPrinter(val)
else:
t = str(type)
if t == "GList":
return GListNodePrinter(val)
if t == "GSList *":
return GListPrinter(val, "GSList")
return None
def register (obj):
if obj == None:
obj = gdb
obj.pretty_printers.append(pretty_printer_lookup)
class ForeachCommand (gdb.Command):
"""Foreach on list"""
def __init__ (self):
super (ForeachCommand, self).__init__ ("gforeach",
gdb.COMMAND_DATA,
gdb.COMPLETE_SYMBOL)
def valid_name (self, name):
if not name[0].isalpha():
return False
return True
def parse_args (self, arg):
i = arg.find(" ")
if i <= 0:
raise Exception ("No var specified")
var = arg[:i]
if not self.valid_name(var):
raise Exception ("Invalid variable name")
while i < len (arg) and arg[i].isspace():
i = i + 1
if arg[i:i+2] != "in":
raise Exception ("Invalid syntax, missing in")
i = i + 2
while i < len (arg) and arg[i].isspace():
i = i + 1
colon = arg.find (":", i)
if colon == -1:
raise Exception ("Invalid syntax, missing colon")
val = arg[i:colon]
colon = colon + 1
while colon < len (arg) and arg[colon].isspace():
colon = colon + 1
command = arg[colon:]
return (var, val, command)
def do_iter(self, arg, item, command):
item = item.cast (gdb.lookup_type("void").pointer())
item = long(item)
to_eval = "set $%s = (void *)0x%x\n"%(arg, item)
gdb.execute(to_eval)
gdb.execute(command)
def slist_iterator (self, arg, container, command):
l = container.cast (gdb.lookup_type("GSList").pointer())
while long(l) != 0:
self.do_iter (arg, l["data"], command)
l = l["next"]
def list_iterator (self, arg, container, command):
l = container.cast (gdb.lookup_type("GList").pointer())
while long(l) != 0:
self.do_iter (arg, l["data"], command)
l = l["next"]
def pick_iterator (self, container):
t = container.type.unqualified()
if t.code == gdb.TYPE_CODE_PTR:
t = t.target().unqualified()
t = str(t)
if t == "GSList":
return self.slist_iterator
if t == "GList":
return self.list_iterator
raise Exception("Invalid container type %s"%(str(container.type)))
def invoke (self, arg, from_tty):
(var, container, command) = self.parse_args(arg)
container = gdb.parse_and_eval (container)
func = self.pick_iterator(container)
func(var, container, command)
ForeachCommand ()
``` |
{
"source": "jjvdhoven/IPFIT5-LH",
"score": 3
} |
#### File: IPFIT5-LH/EventLog_Scan/event_log_scanner.py
```python
import win32evtlog
import traceback
import sys
import logging
import json
import os
new_dir = "Output\\"
if not os.path.exists(new_dir):
os.makedirs(new_dir)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(levelname)s:%(funcName)s:%(message)s')
file_handler = logging.FileHandler('Output\\eventScannerResults.log')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
class EventLogScanner:
run = True
auditSuccesArray = []
auditFailArray = []
informationTypeArray = []
warningTypeArray = []
errorTypeArray = []
machine = "localhost"
def __init__(self, eventRequestArray):
# eventRequestArray.sort(key=lambda x: x.LogType, reverse=True)
self.divideInputInAppropriateList(eventRequestArray)
self.runScanner(self.machine)
''''divide input into one of 5 lists that are required to open that specifik event log'''
def divideInputInAppropriateList(self, eventRequestArray):
for i, event in enumerate(eventRequestArray):
if event.LogType == "EVENTLOG_AUDIT_FAILURE" or event.LogType == "ALL":
event.ID = i
self.auditFailArray.append(event)
if event.LogType == "EVENTLOG_AUDIT_SUCCESS" or event.LogType == "ALL":
event.ID = i
self.auditSuccesArray.append(event)
if event.LogType == "EVENTLOG_INFORMATION_TYPE" or event.LogType == "ALL":
event.ID = i
self.informationTypeArray.append(event)
if event.LogType == "EVENTLOG_WARNING_TYPE" or event.LogType == "ALL":
event.ID = i
self.warningTypeArray.append(event)
if event.LogType == "EVENTLOG_ERROR_TYPE" or event.LogType == "ALL":
event.ID = i
self.errorTypeArray.append(event)
def compareLogRecordWithList(self, eventArray, readRecord):
""""* this function is strongly dependant on a the list being sorted on all values with NONE at the bottom
Very important that the array values are in the same order as that the list is sorted
*"""
seenValue = False
parameterNameArray = ['EventType', 'EventID', 'Sid', 'SourceName', 'StringInserts', 'EventCategory',
'Data', 'ComputerName']
indexOfParameters = 0
givenParamToCheck = "event." + parameterNameArray[indexOfParameters]
readParamToCheck = "readRecord." + parameterNameArray[indexOfParameters]
try:
for event in eventArray:
if eval(givenParamToCheck) == eval(readParamToCheck):
self.compareIOCRecordWithLogRecord(event, readRecord)
seenValue = True
elif eval(givenParamToCheck) != eval(readParamToCheck) and seenValue:
if eval(givenParamToCheck) is None:
indexOfParameters += 1
givenParamToCheck = "event." + parameterNameArray[indexOfParameters]
readParamToCheck = "readRecord." + parameterNameArray[indexOfParameters]
seenValue = False
if eval(givenParamToCheck) == eval(readParamToCheck):
self.compareIOCRecordWithLogRecord(event, readRecord)
seenValue = True
elif eval(givenParamToCheck) is None and not seenValue:
indexOfParameters += 1
givenParamToCheck = "event." + parameterNameArray[indexOfParameters]
readParamToCheck = "readRecord." + parameterNameArray[indexOfParameters]
if eval(givenParamToCheck) == eval(readParamToCheck):
self.compareIOCRecordWithLogRecord(event, readRecord)
seenValue = True
except:
try:
logger.info(traceback.print_exc(sys.exc_info()))
except:
logger.info("Exeption Cant compare record with Input List")
def compareIOCRecordWithLogRecord(self, givenIOCRecord, readLogRecord):
"""Compare amount of parameters given with the amount of matches within the LOG record
if all given values are equal it is a match
"""
amountOfParametersGiven = 0
amountOfMatchesFound = 0
parameterNameArray = ['EventType', 'EventID', 'Sid', 'SourceName', 'StringInserts', 'EventCategory',
'Data', 'ComputerName']
"""For each Parameter defined in the parameterNameArray:
make a string for both objects with the parameter we want to check
parse the string to an expression and check if the value is not None
if the value is not None check if is equal to the read object
"""
for parameterName in parameterNameArray:
givenParamToCheck = "givenIOCRecord." + parameterName
readParamToCheck = "readLogRecord." + parameterName
if eval(givenParamToCheck) is not None:
amountOfParametersGiven += 1
if eval(givenParamToCheck) == eval(readParamToCheck):
amountOfMatchesFound += 1
if amountOfParametersGiven == amountOfMatchesFound:
givenIOCRecord.Found = True
try:
record = {"matchID": str(givenIOCRecord.ID),
"Reserved": str(readLogRecord.Reserved),
"RecordNumber": str(readLogRecord.RecordNumber),
"TimeGenerated": str(readLogRecord.TimeGenerated),
"TimeWritten": str(readLogRecord.TimeWritten),
"EventType": str(readLogRecord.EventType),
"EventID": str(readLogRecord.EventID),
"ReservedFlags": str(readLogRecord.ReservedFlags),
"ClosingRecordNumber": str(readLogRecord.ClosingRecordNumber),
"Sid": str(readLogRecord.Sid),
"SourceName": str(readLogRecord.SourceName),
"EventCategory": str(readLogRecord.EventCategory),
"StringInserts": str(readLogRecord.StringInserts),
"Data": str(readLogRecord.Data),
"ComputerName": str(readLogRecord.ComputerName)}
self.writeOutput(record)
except Exception as e:
logger.info(e)
try:
logger.info(traceback.print_exc(sys.exc_info()))
except:
logger.info('Exception Log Record Cant be Constucted')
else:
return False
def readEventLog(self, server, log_type, eventArray):
'''
Reads the log_type (e.g., "Application" or "System") Windows events from the
specified server.
'''
try:
"""Open Log File and sort array on Event ID"""
log_handle = win32evtlog.OpenEventLog(server, log_type)
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | win32evtlog.EVENTLOG_SEQUENTIAL_READ
total = win32evtlog.GetNumberOfEventLogRecords(log_handle)
try:
eventArray.sort(key=lambda x: (x.EventType or 0, x.EventID or 0, x.Sid or 0, x.SourceName or '',
x.StringInserts or '', x.EventCategory or 0, x.Data or '',
x.ComputerName or ''), reverse=True)
except Exception as e:
print(e)
try:
logger.info(traceback.print_exc(sys.exc_info()))
except:
logger.info('Exception sort went wrong')
logger.info("Scanning through {} events on {} in {}".format(total, server, log_type))
"""As long as there are events keep reading"""
readEvent_count = 0
readEvents = 1
while readEvents:
readEvents = win32evtlog.ReadEventLog(log_handle, flags, 0)
for readEvent in readEvents:
self.compareLogRecordWithList(eventArray, readEvent)
readEvent_count += 1
""""Close Log File"""
logger.info("Scanned through {} events on {} in {}".format(readEvent_count, server, log_type))
win32evtlog.CloseEventLog(log_handle)
except:
logger.info("I cant read yar bastard")
try:
logger.info(traceback.print_exc(sys.exc_info()))
except:
logger.info('Exception while printing traceback')
def runScanner(self, machine):
"""While run is true
1. read logtype from input
2. read logtypes
"""
if self.auditFailArray:
self.readEventLog(machine, 'EVENTLOG_AUDIT_FAILURE', self.auditFailArray)
self.writeInputRecords(self.auditFailArray)
if self.auditSuccesArray:
self.readEventLog(machine, 'EVENTLOG_AUDIT_SUCCESS', self.auditSuccesArray)
self.writeInputRecords(self.auditSuccesArray)
if self.informationTypeArray:
self.readEventLog(machine, 'EVENTLOG_INFORMATION_TYPE', self.informationTypeArray)
self.writeInputRecords(self.informationTypeArray)
if self.warningTypeArray:
self.readEventLog(machine, 'EVENTLOG_WARNING_TYPE', self.warningTypeArray)
self.writeInputRecords(self.warningTypeArray)
if self.errorTypeArray:
self.readEventLog(machine, 'EVENTLOG_ERROR_TYPE', self.errorTypeArray)
self.writeInputRecords(self.errorTypeArray)
def writeInputRecords(self, inputArray):
for event in inputArray:
record = {'ID': str(event.ID),
'EventType': str(event.EventType),
'EventID': str(event.EventID),
'Sid': str(event.Sid),
'SourceName': str(event.SourceName),
'EventCategory': str(event.EventCategory),
'StringInsterts': str(event.StringInserts),
'Data': str(event.Data),
'ComputerName': str(event.ComputerName),
'logType': str(event.LogType),
'Found': str(event.Found)}
self.writeOutput(record)
def writeOutput(self, line):
File = open("Output\\eventOutput.json", "a")
File.write(json.dumps(line))
File.write("\n")
File.close()
# def stopScanner(self):
# self.run = False
# return
```
#### File: IPFIT5-LH/EventLog_Scan/event_main.py
```python
from EventLog_Scan import event_log_scanner
import win32evtlog
import traceback
import sys
from EventLog_Scan import EventRequest
def readEventLog(server, log_type):
# Reads the log_type (e.g. "Application" or "System" from the Windows event viewer on the specified server
try:
# Open the log file and sort array on Event ID's
log_handle = win32evtlog.OpenEventLog(server, log_type)
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | win32evtlog.EVENTLOG_SEQUENTIAL_READ
total = win32evtlog.GetNumberOfEventLogRecords(log_handle)
print("Scanning through {} events on {} in {}".format(total, server, log_type))
# As long as there are events, keep reading
readEvent_count = 1
readEvents = 1
events = []
while readEvents:
readEvents = win32evtlog.ReadEventLog(log_handle, flags, 1)
for event in readEvents:
events.append(event)
readEvent_count += 1
win32evtlog.CloseEventLog(log_handle)
return events
except:
print("I can't read yar bastard")
try:
print(traceback.print_exc(sys.exc_info()))
except:
print("Exception whilst printing traceback")
def main():
input_array = []
event1 = EventRequest.EventRequest(13, 14, 233434, "lala", "evil.exe", 34, "lala", "mijnComputer", "information_type")
event2 = EventRequest.EventRequest(13, 14, 233434, "lala", "evil.exe", 34, "lala", "mijnComputer", "audit_succes")
event3 = EventRequest.EventRequest(13, 14, 233434, "lala", "evil.exe", 34, "lala", "mijnComputer", "audit_failure")
event4 = EventRequest.EventRequest(13, 14, 233434, "lala", "evil.exe", 34, "lala", "mijnComputer", "error_type")
event5 = EventRequest.EventRequest(13, 14, 233434, "lala", "evil.exe", 34, "lala", "mijnComputer", "warning_type")
event6 = EventRequest.EventRequest(13, 14, 233434, "lala", "evil.exe", 34, "lala", "mijnComputer", " ")
input_array.append(event1)
input_array.append(event2)
input_array.append(event3)
input_array.append(event4)
input_array.append(event5)
input_array.append(event6)
try:
scan = event_log_scanner.EventLogScanner(input_array)
except:
try:
print(traceback.print_exc(sys.exc_info()))
except:
print("Exception whilst printing traceback")
if __name__ == "__main__":
main()
```
#### File: IPFIT5-LH/File_scan/file_scanner.py
```python
import os
import logging
"""
This script was made by <NAME> for NFIR, unless specified, all code in this document was written by the
former name.
Refactoring and writing to output file by <NAME> to comply with PEP8
Conversion of list item to string and int was done by <NAME>.
"""
filesList = []
def input_check(p_1, p_2):
logging.info("\n---FILE SCANNER---")
output = open(r"Output\file-scan-results.txt", "w")
output.write("\n\n")
output.write(120 * "-")
output.write("\nFILE SCANNING RESULTS\n")
output.write(120 * "-")
# convert list to list without , and []
# convert list to str variable
p1 = str(','.join(p_1))
p2 = str(','.join(p_2))
try:
p2 = int(p2)
if isinstance(p2, int):
p2 = int(p2)
else:
p2 = str(p2)
except:
p2 = p2
if isinstance(p1, str) and isinstance(p2, str):
if p1.__contains__("\\"):
for root, subdirs, files in os.walk(p1):
for file in files:
if file.__contains__(p2):
output.write("\nPath: ")
output.write(root)
output.write("\nFile name: ")
output.write(str(file))
output.write("\n")
print('\nPath: ' + root + '\n' + 'File Name: ' + str(file))
logging.info("Path: " + root + " File Name: " + str(file))
output.write("\n")
output.write(120 * "-")
output.write("\n\n")
elif p2.__contains__("\\"):
for root, subdirs, files in os.walk(p2):
for file in files:
if file.__contains__(p1):
output.write("\nPath: ")
output.write(root)
output.write("\nFile name: ")
output.write(str(file))
output.write("\n")
print('\nPath: ' + root + '\n' + 'File Name: ' + str(file))
logging.info("Path: " + root + " File Name: " + str(file))
output.write("\n")
output.write(120 * "-")
output.write("\n\n")
elif isinstance(p2, int): # TO DO write the logic when P2 is int
for path, subdirs, files in os.walk(p1):
for name in files:
filesList.append(os.path.join(path, name))
for i in filesList:
file_size = os.path.getsize(str(i))
if file_size >= p2 * 1024:
output.write("\nThe file name is: ")
output.write(str(i))
output.write("\nThe file size is: ")
output.write(str(file_size / 2014))
output.write(" kilobytes")
output.write("\n")
print("\nThe File: " + str(i) + " File Size is: " + str(file_size / 1024) + " kiloBytes")
logging.info("The File: " + str(i) + " File Size is: " + str(file_size / 1024) + " kiloBytes")
output.write("\n")
output.write(120 * "-")
output.write("\n\n")
elif isinstance(p1, int):
for path, subdirs, files in os.walk(p2):
for name in files:
filesList.append(os.path.join(path, name))
for i in filesList:
file_size = os.path.getsize(str(i))
if file_size >= p1 * 1024:
output.write("\nThe file name is: ")
output.write(str(i))
output.write("\nThe file size is: ")
output.write(str(file_size / 2014))
output.write(" kilobytes")
output.write("\n")
print("\nThe File: " + str(i) + " is: " + str(file_size / 1024) + " kiloBytes")
logging.info("The File: " + str(i) + " is: " + str(file_size / 1024) + " kiloBytes")
output.write("\n")
output.write(120 * "-")
output.write("\n\n")
else:
print("input error")
logging.info("input error")
if __name__ == "__main__":
# InputCheck(P1, P2)
print("if __name__" == "__main__")
```
#### File: jjvdhoven/IPFIT5-LH/main.py
```python
from Input import InputValidator as ir
from Input import InputInterpeter as ii
from EventLog_Scan import event_log_scanner as els
from File_scan import file_scanner as fs
from Registry import read_registry as rreg
from Processes import process_scanner as pss
import sys
import output
import os
import logging
# Set logging settings and create newline, upon starting this function
logging.basicConfig(filename="Output\\logging.log", level=logging.INFO, format='%(asctime)s, %(levelname)s, '
'%(funcName)s, ''%(message)s')
def make_output_dir():
output_dir = "Output\\"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Function that gets the input from the IOC List input file
def get_input():
logging.info("\n---GET INPUT---")
logging.info("Started getting config from the config file")
# Make objects
logging.info("Creating input objects")
validate = ir.InputValidator()
interpret = ii.InputInterpeter()
# Validate the config file and return the raw_string[]
logging.info("Getting lists from config file")
registry_list, file_list, event_list, process_list = validate.getInput(r"IOC_lijst.txt")
# Parse hkey_list and add it to a variable
logging.info("Binding registry lists to variables: hkey_list")
registry_list = interpret.transformRegistryInput(registry_list)
# Parse file_list and add it to a variable
logging.info("Binding file list to variable: file_list")
file_list = interpret.transformFileInput(file_list)
# Parse event_list and add it to a variable
logging.info("Binding event list to variable: event_list")
event_list = interpret.transformEventInput(event_list)
# Parse process_list and add it to a variable
logging.info("Binding event list to variable: process_list")
process_list = interpret.transformProcessInput(process_list)
# Parse config_list and add it to a variable
logging.info("Binding event list to variable: config_list")
# config_list = interpret.transformConfigInput(config_list)
# Returning newly created variables
logging.info("Returning made variables: hkey_list, path_list, file_list, event_list, process_list, config_list")
return registry_list, file_list, event_list, process_list
# Function that controls what scanner gets called when
def order_of_calling(registry_list, file_list, event_list, process_list):
logging.info("\n---SCANNER CONTROL---")
logging.info("Started calling scanners with given input")
logging.info("Starting registry scan")
rreg.main(registry_list)
logging.info("Starting process scanner")
pss.main(process_list)
logging.info("Starting event log scanner")
els.EventLogScanner(event_list)
logging.info("Starting file scanner")
fs.input_check(file_list[0], file_list[1])
output.main()
print("")
input("Press ENTER to quit")
logging.info("Exiting program")
sys.exit(0)
# Main function of the script
def main():
logging.info("\n---MAIN OF PROGRAM---")
logging.info("Checking if output directory exists")
make_output_dir()
logging.info("Retrieving input from config file")
registry_list, file_list, event_list, process_list = get_input()
logging.info("Starting scanner calling function with config file input")
order_of_calling(registry_list, file_list, event_list, process_list)
if __name__ == '__main__':
main()
```
#### File: IPFIT5-LH/Main-test/Files.py
```python
import os
Folder_path = os.getcwd()
print(Folder_path)
def listDir(dir):
fileNames = os.listdir(dir)
for fileName in fileNames:
print ('File Name: '+ fileName)
print ('Folder Path: '+ os.path.abspath(os.path.join(dir, fileName)))
if __name__ == '__main__':
listDir(Folder_path)
``` |
{
"source": "jjvdhoven/IPFIT6-scraper",
"score": 3
} |
#### File: scraper-main/CodeBase/marktplaats.py
```python
import csv
import requests
from bs4 import BeautifulSoup
from openpyxl import Workbook
from datetime import datetime
import os.path as osp
import helpers as h
from helpers import get_searchterms as gst
# Class for the item for writing to CSV/Excel
class Item:
title = ""
price = ""
summary = ""
seller = ""
date = ""
location = ""
seller_url = ""
seller_website = ""
# Create the URL to use for the scraper
def create_url(item_query, item_postalcode, item_distance):
url = 'https://www.marktplaats.nl'
url += '/q/:' + item_query
url += '/#distanceMeters:' + item_distance
url += '|postcode:' + item_postalcode
return url
# Write the results to a external file for storage and analysis
def write_to_csv(items, file_name, query):
with open(f'../persistant/Marktplaats/marktplaats_{file_name}.csv', 'a+', newline='') as write_obj:
csv_writer = csv.writer(write_obj)
csv_writer.writerow([f"The current query is: {query}"])
csv_writer.writerow(['Title', 'Price', 'Summary', "Seller", "Date", "Location", "Seller url", "Seller website"])
for item in items:
csv_writer.writerow([item.title, item.price, item.summary, item.seller, item.date, item.location, item.seller_url, item.seller_website])
csv_writer.writerow('')
write_obj.close()
# Convert the CSV file to a Excel file for easier reading
def convert_csv_to_xsl(file_name):
wb = Workbook()
ws = wb.active
with open(f"../persistant/Marktplaats/marktplaats_{file_name}.csv", 'r') as f:
for row in csv.reader(f):
ws.append(row)
wb.save(f'../persistant/Marktplaats/marktplaats-{file_name}.xlsx')
def is_correct_response(response):
# Check that the response returned 'success'
return response == 'success'
def is_defined_item(element):
if element is not None:
return element
else:
return "not Available"
# Collect the listings with help form the entered search terms
def get_listings(url, timestr, query):
source = requests.get(url)
marktplaats = BeautifulSoup(source.text, 'lxml')
# Returns entire page
body = marktplaats.find('body')
# Returns website body
search_result = is_defined_item(body.find('ul', class_='mp-Listings--list-view'))
list_of_articles = []
try:
for article in search_result:
try:
# Get all product links
item_link = article.a['href']
# Get all product titles
item_title = is_defined_item(article.find('h3')).text
# Get items short summary
item_summary = is_defined_item(article.find('p')).text
# Get the name of the seller
item_seller = is_defined_item(article.find('span', class_="mp-Listing-seller-name")).text
# Get the link to the sellers Marktplaats profile
gen_item_seller_link = is_defined_item(article.find('a', class_="mp-TextLink"))['href']
if "/u/" in gen_item_seller_link:
item_seller_link = "https://www.marktplaats.nl" + gen_item_seller_link
# Get the price of the item
item_price = is_defined_item(article.find('span', class_="mp-Listing-price mp-text-price-label")).text
item_price = item_price.replace("\xc2\xa0", " ")
# Get the date the item was posted
item_date = is_defined_item(article.find('span', class_="mp-Listing-date mp-Listing-date--desktop")).text
# Get the location from where the seller is selling from
item_location = is_defined_item(article.find('span', class_="mp-Listing-location")).text
# Get the external website the seller promotes in the listing
item_seller_website = is_defined_item(article.find('a', class_="mp-Listing-sellerCoverLink"))['href']
myObj = Item()
myObj.title = item_title.strip()
myObj.price = item_price.strip()
myObj.summary = item_summary.strip()
myObj.seller = item_seller.strip()
myObj.date = item_date.strip()
myObj.location = item_location.strip()
myObj.url = item_link.strip()
myObj.seller_url = item_seller_link.strip()
myObj.seller_website = item_seller_website.strip()
list_of_articles.append(myObj)
except Exception as e:
summary_ = "None"
title_ = "None"
href = "None"
price = "None"
print(e)
except Exception as e:
print(e)
write_to_csv(list_of_articles, timestr, query)
convert_csv_to_xsl(timestr)
def main(file_name_time):
i = 0
id = "mp"
postalcode = '1011ab'
distance = '25000'
query_list = gst(id)
while i < len(query_list):
current_url = create_url(query_list[i], postalcode, distance)
print(f"Current URL to search: {current_url}")
get_listings(current_url, file_name_time, query_list[i])
i += 1
if __name__ == "__main__":
fn = "2020"
main(fn)
``` |
{
"source": "JJWagner/EnigmaCribber",
"score": 4
} |
#### File: JJWagner/EnigmaCribber/enigmacribber.py
```python
def bound(input):
# Wraps offsets around the wheel.
# i.e. Z + 1 = A
# B - 5 = W
if input > 25:
return (input - 26)
elif input < 0:
return (input + 26)
else:
return input
def rotor_RtoL (rotor, index, rotorpos):
# Compute the rotor offsets for signals
# flowing from right (input/plugboard)
# to left (reflector).
#
position = bound(index + rotorpos)
return bound(rotor[position] - rotorpos)
def rotor_LtoR (rotor, index, rotorpos):
# Compute the rotor offsets for signals
# flowing from left (reflector) to
# right (input/plugboard)
#
position = bound(index + rotorpos)
for idx in range(26):
if rotor[idx] == position:
return bound(idx - rotorpos)
def rotors_advance(LeftRotorPos, MiddleRotorPos, RightRotorPos, LeftRotor, MiddleRotor, RightRotor):
# Advance three rotors (args), paying
# attention to the notch placement
# and double-stepping at the appropriate
# times.
#
# Pull the notch positions off the ends
# of the wheel arrays
LeftRotorNotch = LeftRotor[26]
MiddleRotorNotch = MiddleRotor[26]
RightRotorNotch = RightRotor[26]
advanceRandM = False
advanceMandL = False
# If any of the pawls will fall in a
# notch, set a flag.
if(RightRotorPos == RightRotorNotch):
advanceRandM = True
if(MiddleRotorPos == MiddleRotorNotch):
advanceMandL = True
# the rightmost rotor always advances
RightRotorPos = bound(RightRotorPos + 1)
# the middle one advances if its pawl falls
# in the rightmost wheel's notch, or if
# the left wheel's pawl falls in the middle
# wheel's notch. Take care of the first
# condition here
if(advanceRandM and not advanceMandL):
MiddleRotorPos = bound(MiddleRotorPos + 1)
# If the left wheel's pawl falls in the
# middle wheel's notch, advance them both.
if(advanceMandL):
MiddleRotorPos = bound(MiddleRotorPos + 1)
LeftRotorPos = bound(LeftRotorPos + 1)
rotors_advance.RightRotorPos = RightRotorPos
rotors_advance.MiddleRotorPos = MiddleRotorPos
rotors_advance.LeftRotorPos = LeftRotorPos
def Compute(stepcount, signalin, RotorL, RotorM, RotorR, RotorLPos, RotorMPos, RotorRPos, Reflector):
# Automate stepping the rotors and
# computing the substitutions back
# and forth across the wheels and
# reflector
#
for step in range(stepcount):
rotors_advance(RotorLPos, RotorMPos, RotorRPos, RotorL, RotorM, RotorR)
RotorRPos = rotors_advance.RightRotorPos
RotorMPos = rotors_advance.MiddleRotorPos
RotorLPos = rotors_advance.LeftRotorPos
# finished advancing. save rotor positions
# for the function caller
Compute.RotorRPos = RotorRPos
Compute.RotorMPos = RotorMPos
Compute.RotorLPos = RotorLPos
# compute offsets from input/plugboard to reflector
RotorROutL = rotor_RtoL(RotorR, signalin, RotorRPos)
RotorMOutL = rotor_RtoL(RotorM, RotorROutL, RotorMPos)
RotorLOutL = rotor_RtoL(RotorL, RotorMOutL, RotorLPos)
ReflectorOutR = rotor_RtoL(Reflector, RotorLOutL, 0)
# compute offsets from reflector back to the input/plugboard
RotorLOutR = rotor_LtoR(RotorL, ReflectorOutR, RotorLPos)
RotorMOutR = rotor_LtoR(RotorM, RotorLOutR, RotorMPos)
RotorROutR = rotor_LtoR(RotorR, RotorMOutR, RotorRPos)
return RotorROutR
#
# Constants and variables
#
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
alpha = "a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"
alphastr = "abcdefghijklmnopqrstuvwxyz"
#rotor definitions. the notch position is encoded in element 26
rotorI = 4,10,12,5,11,6,3,16,21,25,13,19,14,22,24,7,23,20,18,15,0,8,1,17,2,9, 16
rotorII = 0,9,3,10,18,8,17,20,23,1,11,7,22,19,12,2,16,6,25,13,15,24,5,21,14,4, 4
rotorIII = 1,3,5,7,9,11,2,15,17,19,23,21,25,13,24,4,8,22,6,0,10,12,20,18,16,14, 21
reflecB = 24,17,20,7,16,18,11,3,15,23,13,6,14,10,12,8,4,1,5,25,2,22,21,9,0,19
# Set ciphertext and crib text, and rotor order
ciphertext = "YJBDELCZ"
cribtext = "THECACHE"
rotorL = rotorI
rotorM = rotorII
rotorR = rotorIII
reflector = reflecB
CandidateRotorPositions = []
ReducedRotorPositions = []
# Create initial list of candidate positions
for rotorLpos in range(26):
for rotorMpos in range(26):
for rotorRpos in range(26):
CandidateRotorPositions.append([rotorLpos, rotorMpos, rotorRpos])
for ch in range(len(ciphertext)):
cipherchar = alphastr.index(ciphertext[ch].lower())
target = alphastr.index(cribtext[ch].lower())
#print(cipherchar, alpha[cipherchar], target, alpha[target])
total = 0
count = 0
for rotorPositions in range(len(CandidateRotorPositions)):
count += 1
rotorLstart = CandidateRotorPositions[rotorPositions][0]
rotorMstart = CandidateRotorPositions[rotorPositions][1]
rotorRstart = CandidateRotorPositions[rotorPositions][2]
#print("Run ", ch, " Starting positions: \t", alpha[rotorLstart], alpha[rotorMstart], alpha[rotorRstart])
if(Compute(ch + 1, cipherchar, rotorL, rotorM, rotorR, rotorLstart, rotorMstart, rotorRstart, reflector) == target):
#print("Found! starting positions: ", alpha[rotorLstart], alpha[rotorMstart], alpha[rotorRstart])
#print(" ending positions: ", alpha[Compute.RotorLPos], alpha[Compute.RotorMPos], alpha[Compute.RotorRPos])
ReducedRotorPositions.append([rotorLstart, rotorMstart, rotorRstart])
total += 1
print("Character ", ch, "\tPositions tested:", count, "\tStarting Positions Found:", total)
CandidateRotorPositions = ReducedRotorPositions
ReducedRotorPositions = []
for rotorPositions in CandidateRotorPositions:
print("Possible starting rotor configuration: ", alpha[rotorPositions[0]], alpha[rotorPositions[1]], alpha[rotorPositions[2]])
``` |
{
"source": "jjwallman/gscholartex",
"score": 3
} |
#### File: jjwallman/gscholartex/gscholartex.py
```python
import itertools
from bs4 import BeautifulSoup
import datetime
# I have yet to work out how to automatically obtain a web page with more than
# 20 articles due to restrictions on the Google Scholar API
# The current workaround is to manually load the web page, click "show more",
# and save the resulting html
def clean_number(string):
if string is None:
return 0
return ''.join(s for s in string if s.isdigit())
def reformat_entry(html_string, unwanted_strings):
for phrase in unwanted_strings:
if html_string.text.find(phrase) != -1:
return None
ref_data = html_string.contents[0].contents
ret = {'title': ref_data[0],
'authors': ref_data[1].contents[0]}
journal = ref_data[2].contents[0]
journal = journal.replace("arXiv preprint ", "")
journal = journal.split("(")
try:
journal[1] = journal[1].split(")")[1]
except IndexError:
pass
journal = "".join(journal)
journal = journal.split(", ")
try:
journal[1] = "".join(itertools.takewhile(str.isdigit, journal[1]))
except IndexError:
pass
ret['journal'] = ", ".join(journal)
ret['year'] = clean_number(ref_data[2].contents[1].contents[0])
try:
ret['citations'] = int(html_string.contents[1].contents[0])
except IndexError:
ret['citations'] = 0
return ret
def extract_publications(html_doc, unwanted_strings=None):
"""
Extract publications and citation data from a saved Google Scholar page.
Parameters
----------
html_doc, str
file name of the saved web page
unwanted_strings, an iterable of str
strings to filter out "publications" that should not be counted
"""
if unwanted_strings is None:
unwanted_strings = ["APS", "Bulletin"]
f = open(html_doc, "r", encoding='utf8')
doc = f.read()
f.close()
soup = BeautifulSoup(doc, 'html.parser')
for a in soup.findAll('a'):
a.replaceWithChildren()
# Stripping out refs, buttons, etc
labels = {'button': None,
'td': {"class": "gsc_a_x"},
'th': {'class': "gsc_a_x"},
'tr': {'id': "gsc_a_tr0"}}
for k, v in labels.items():
if v is None:
for entry in soup.findAll(k):
entry.decompose()
else:
for entry in soup.findAll(k, v):
entry.decompose()
for div in soup.find_all('th', {'class': "gsc_a_t"}):
for div2 in div.find_all('div', recursive=False):
div2.decompose()
pubs = soup.find_all('tr', {'class': 'gsc_a_tr'})
pubs = [reformat_entry(pub, unwanted_strings) for pub in pubs]
pubs = [pub for pub in pubs if pub is not None]
cites = [int(c.contents[0]) for c in soup.find_all('span', {'class': 'gsc_g_al'})]
years = [int(y.contents[0]) for y in soup.find_all('span', {'class': 'gsc_g_t'})]
return pubs, {year: cite for year, cite in zip(years, cites)}
def citation_metrics(publications):
"""
Return the h_index and total number of citations calculated from a list of
publications.
"""
cite_counts = sorted([v['citations'] for v in publications], reverse=True)
for j, k in enumerate(cite_counts):
if j + 1 > k:
return j, sum(cite_counts)
return len(cite_counts), sum(cite_counts)
def clean_cite(num_cites):
if num_cites == 0:
return ""
if num_cites == 1:
return " 1 citation."
return " {} citations.".format(num_cites)
def bib_entry(publication):
return "\\item {}. {}, {} ({}).{}\n".format(publication['title'],
publication['authors'],
publication['journal'],
publication['year'],
clean_cite(publication['citations']))
def scholar_to_tex(html_doc, output, unwanted_strings=None):
"""
Extract publications and citation data from a saved Google Scholar page.
The data is written to a file for inclusion in a LaTeX document.
Parameters
----------
html_doc, str
file name of the saved web page
output, str
file name for the output
unwanted_strings, an iterable of str
strings to filter out "publications" that should not be counted
"""
pubs, cites = extract_publications(html_doc, unwanted_strings)
h_index, total_cites = citation_metrics(pubs)
f = open(output, 'w')
f.write("\\UseRawInputEncoding")
f.write("\\newcommand{\\citedata}{%s}\n" % " ".join(str(a) for a in cites.items()))
f.write("\\newcommand{\\citedate}{%s}\n" % datetime.date.today())
f.write("\\newcommand{\\hindex}{%d}\n" % h_index)
f.write("\\newcommand{\\numcites}{%d}\n" % total_cites)
f.write("\\newcommand{\\printpubs}{%s}" %
"".join([bib_entry(pub) for pub in pubs]))
f.close()
``` |
{
"source": "JJwangbilin/fastapi-example",
"score": 2
} |
#### File: app/core/jwt.py
```python
from datetime import datetime, timedelta
from typing import Optional
import jwt
from fastapi import Depends, Header
from jwt import PyJWTError
from starlette.exceptions import HTTPException
from starlette.status import HTTP_403_FORBIDDEN, HTTP_404_NOT_FOUND
from fastapi.security import OAuth2PasswordBearer
from app.crud.user import get_user
from app.db.mongodb import AsyncIOMotorClient, get_database
from app.models.token import TokenPayload
from app.models.user import User
from app.core.config import JWT_TOKEN_PREFIX, SECRET_KEY, ACCESS_TOKEN_EXPIRE_MINUTES
ALGORITHM = "HS256"
# Header中authorization信息校验,校验token的前缀
def _get_authorization_token(Authorization: str = Header(...)):
token_prefix, token = Authorization.split(" ")
if token_prefix != JWT_TOKEN_PREFIX:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="令牌信息错误"
)
return token
# Swagger UI
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/users/login")
# 解密token,从db中获取用户信息
async def _get_current_user(db: AsyncIOMotorClient = Depends(get_database),
token: str = Depends(oauth2_scheme)) -> User:
try:
payload = jwt.decode(token, str(SECRET_KEY), algorithms=[ALGORITHM])
# TokenPayload可校验解密后内容
token_data = TokenPayload(**payload)
except PyJWTError:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="无效的授权信息"
)
# TODO 校验token是否过期token_data.exp 和当前时间比较
dbuser = await get_user(db, id=token_data.id)
if not dbuser:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="用户不存在")
if not dbuser.activated:
raise HTTPException(status_code=HTTP_403_FORBIDDEN, detail="用户被冻结,如需申诉,请联系管理员")
user = User(**dbuser.dict(), token=token)
return user
# 公开内容,无token可访问
def _get_authorization_token_optional(Authorization: str = Header(None)):
if Authorization:
return _get_authorization_token(Authorization)
return ""
# 可选项,用户信息
async def _get_current_user_optional(db: AsyncIOMotorClient = Depends(get_database),
token: str = Depends(_get_authorization_token_optional), ) -> Optional[User]:
if token:
return await _get_current_user(db, token)
return None
# 获取当前用户信息,required=True,必须拥有token才可访问,False,公开内容
def get_current_user_authorizer(*, required: bool = True):
if required:
return _get_current_user
else:
return _get_current_user_optional
# 创建token
# token包含exp,和用户自定义的json数据
def create_access_token(*, data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, str(SECRET_KEY), algorithm=ALGORITHM)
return encoded_jwt
```
#### File: app/models/user.py
```python
from pydantic import BaseModel, UUID1, EmailStr, HttpUrl, Field
from app.models.common import IDModel, CreatedAtModel, UpdatedAtModel
from typing import List, Optional
from enum import Enum
from datetime import datetime
from app.common.security import verify_password
class _ChannelEnum(str, Enum):
RG = '注册'
WX = '微信'
QQ = 'QQ'
GIT = 'GitHub'
class UserBase(BaseModel):
username: str
email: Optional[EmailStr] = None
mobile: str = Field(None, regex='^1[3456789]\d{9}$')
unionid: str = Field(None, title='三方登录唯一id')
channel: _ChannelEnum
role: List[int]
remark: str = None
thumb: HttpUrl
activated: bool
class User(UserBase):
token: str
class UserInDB(UserBase):
id: str = ""
salt: str = ""
hashed_password: str = ""
updatedAt: datetime
createdAt: datetime
def check_password(self, password: str):
return verify_password(self.salt + password, self.hashed_password)
class UserInResponse(BaseModel):
user: User
# 写入数据库,和前端请求参数无关
class UserInCreate(UserBase, IDModel, CreatedAtModel, UpdatedAtModel):
password: str
``` |
{
"source": "jjwang/laibot-client",
"score": 3
} |
#### File: laibot-client/client/app_utils.py
```python
import smtplib
from email.MIMEText import MIMEText
import urllib2
import re
from pytz import timezone
def sendEmail(SUBJECT, BODY, TO, FROM, SENDER, PASSWORD, SMTP_SERVER):
"""Sends an HTML email."""
for body_charset in 'US-ASCII', 'ISO-8859-1', 'UTF-8':
try:
BODY.encode(body_charset)
except UnicodeError:
pass
else:
break
msg = MIMEText(BODY.encode(body_charset), 'html', body_charset)
msg['From'] = SENDER
msg['To'] = TO
msg['Subject'] = SUBJECT
SMTP_PORT = 587
session = smtplib.SMTP(SMTP_SERVER, SMTP_PORT)
session.starttls()
session.login(FROM, PASSWORD)
session.sendmail(SENDER, TO, msg.as_string())
session.quit()
def emailUser(profile, SUBJECT="", BODY=""):
"""
sends an email.
Arguments:
profile -- contains information related to the user (e.g., email
address)
SUBJECT -- subject line of the email
BODY -- body text of the email
"""
def generateSMSEmail(profile):
"""
Generates an email from a user's phone number based on their carrier.
"""
if profile['carrier'] is None or not profile['phone_number']:
return None
return str(profile['phone_number']) + "@" + profile['carrier']
if profile['prefers_email'] and profile['gmail_address']:
# add footer
if BODY:
BODY = profile['first_name'] + \
",<br><br>Here are your top headlines:" + BODY
BODY += "<br>Sent from your Jasper"
recipient = profile['gmail_address']
if profile['first_name'] and profile['last_name']:
recipient = profile['first_name'] + " " + \
profile['last_name'] + " <%s>" % recipient
else:
recipient = generateSMSEmail(profile)
if not recipient:
return False
try:
if 'mailgun' in profile:
user = profile['mailgun']['username']
password = profile['mailgun']['password']
server = 'smtp.mailgun.org'
else:
user = profile['gmail_address']
password = profile['gmail_password']
server = 'smtp.gmail.com'
sendEmail(SUBJECT, BODY, recipient, user,
"Jasper <jasper>", password, server)
return True
except Exception:
return False
def getTimezone(profile):
"""
Returns the pytz timezone for a given profile.
Arguments:
profile -- contains information related to the user (e.g., email
address)
"""
try:
return timezone(profile['timezone'])
except Exception:
return None
def generateTinyURL(URL):
"""
Generates a compressed URL.
Arguments:
URL -- the original URL to-be compressed
"""
target = "http://tinyurl.com/api-create.php?url=" + URL
response = urllib2.urlopen(target)
return response.read()
def isNegative(phrase):
"""
Returns True if the input phrase has a negative sentiment.
Arguments:
phrase -- the input phrase to-be evaluated
"""
return bool(re.search(r'\b(no(t)?|don\'t|stop|end)\b', phrase,
re.IGNORECASE))
def isPositive(phrase):
"""
Returns True if the input phrase has a positive sentiment.
Arguments:
phrase -- the input phrase to-be evaluated
"""
return bool(re.search(r'\b(sure|yes|yeah|go)\b', phrase, re.IGNORECASE))
```
#### File: laibot-client/client/jasperpath.py
```python
import os
# Jasper main directory
APP_PATH = os.path.normpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
DATA_PATH = os.path.join(APP_PATH, "static")
LIB_PATH = os.path.join(APP_PATH, "client")
TOOLS_PATH = os.path.join(APP_PATH, "tools")
PLUGIN_PATH = os.path.join(LIB_PATH, "modules")
CONFIG_PATH = os.path.join(APP_PATH, 'conf')
TJBOT_PATH = os.path.join(APP_PATH, '../tjbot/bootstrap/tests/')
def config(*fname):
return os.path.join(CONFIG_PATH, *fname)
def data(*fname):
return os.path.join(DATA_PATH, *fname)
def tjbot(*fname):
return os.path.join(TJBOT_PATH, *fname)
def hotword():
return '<PASSWORD>'
```
#### File: laibot-client/tests/test_modules.py
```python
import unittest
from client import test_mic
from client.modules import Unclear
DEFAULT_PROFILE = {
'prefers_email': False,
'location': 'Cape Town',
'timezone': 'US/Eastern',
'phone_number': '012344321'
}
class TestModules(unittest.TestCase):
def setUp(self):
self.profile = DEFAULT_PROFILE
self.send = False
def runConversation(self, query, inputs, module):
"""Generic method for spoofing conversation.
Arguments:
query -- The initial input to the server.
inputs -- Additional input, if conversation is extended.
Returns:
The server's responses, in a list.
"""
self.assertTrue(module.isValid(query))
mic = test_mic.Mic(inputs)
module.handle(query, mic, self.profile)
return mic.outputs
def testUnclear(self):
query = "What time is it?"
inputs = []
self.runConversation(query, inputs, Unclear)
``` |
{
"source": "jjwatts/gigantum-base-images",
"score": 3
} |
#### File: gigantum-base-images/python3-figantleaf/altmetric.py
```python
import requests
from hashlib import sha1
import hmac
import configparser
from urllib.parse import urlencode, quote_plus
import twitter
altproperties_location="/home/giguser/.dimensions/dsl.ini"
api_url = "https://www.altmetric.com/explorer/api/"
def ConfigSectionMap(section):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
#if dict1[option] == -1:
#print ("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
Config = configparser.ConfigParser()
Config.read(altproperties_location)
aprops = ConfigSectionMap("altmetric")
secret=aprops['secret']
key=aprops['key']
def altmetric_auth(secret,filters):
my_hmac = hmac.new(bytes(secret, 'UTF-8'), bytes(filters, 'UTF-8'), sha1)
digest = my_hmac.hexdigest()
return digest
def querystrings(afilter,base='filter'):
digeststring = []
urlstring = []
for k in sorted(list(afilter.keys())):
if type(afilter[k]) == str:
urlstring.append("{}[{}]={}".format(base,k,afilter[k]))
digeststring.append("{}|{}".format(k,afilter[k]))
if type(afilter[k]) == list:
digeststring.append("{}|{}".format(k,"|".join(sorted(afilter[k]))))
for i in afilter[k]:
urlstring.append("{}[{}][]={}".format(base,k,i))
return dict(
digest="|".join(digeststring),
url="&".join(urlstring).replace('[','%5B').replace(']','%5D')
)
def init_altmetric_query(afilter,endpoint, api_url=api_url, page=None):
urlpage = ""
if page is not None:
urlpage = querystrings(page,base='page')['url']
query = "{}{}/?digest={}&{}&key={}&{}".format(api_url,
endpoint,
altmetric_auth(secret,querystrings(afilter)['digest']),
querystrings(afilter)['url'],
key,
urlpage)
#print(query)
return requests.get(query)
# twitter
tprops = ConfigSectionMap("twitter")
consumer_key=tprops['consumer_key']
consumer_secret=tprops['consumer_secret']
access_token_key=tprops['access_token_key']
access_token_secret=tprops['access_token_secret']
twitterapi = twitter.Api(consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token_key=access_token_key,
access_token_secret=access_token_secret)
```
#### File: gigantum-base-images/python3-figantleaf/progress.py
```python
import time, sys
from IPython.display import clear_output
def update_progress(progress):
bar_length = 20
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
if progress < 0:
progress = 0
if progress >= 1:
progress = 1
block = int(round(bar_length * progress))
clear_output(wait = True)
text = "Progress: [{0}] {1:.1f}%".format( "#" * block + "-" * (bar_length - block), progress * 100)
print(text)
``` |
{
"source": "jjwatts/gigantum-client",
"score": 2
} |
#### File: api/connections/list.py
```python
import base64
import graphene
class ListBasedConnection(object):
def __init__(self, edges, cursors, args):
"""Class to provide Relay compliant pagination for list based connections
Args:
edges(list): A list of edge data
cursors(list): A list of cursors for edges
args(dict): The input arguments to the resolve method
Returns:
ListBasedConnection
"""
self.edges = edges
self.cursors = cursors
self.args = args
self.page_info = None
def apply(self):
"""Method to apply cursors to the edges
Returns:
None
"""
if "first" in self.args and "last" in self.args:
raise ValueError("`first` and `last` arguments cannot be used together")
# Verify valid slicing args
if "first" in self.args:
if int(self.args["first"]) < 0:
raise ValueError("`first` must be greater than 0")
if "last" in self.args:
if int(self.args["last"]) < 0:
raise ValueError("`last` must be greater than 0")
# Apply cursor filters
after_index = None
before_index = None
if "after" in self.args:
if self.args["after"] in self.cursors:
# Remove edges after cursor
after_index = int(base64.b64decode(self.args["after"]))
else:
raise ValueError("`after` cursor is invalid")
if "before" in self.args:
if self.args["before"] in self.cursors:
# Remove edges after cursor
before_index = int(base64.b64decode(self.args["before"]))
else:
raise ValueError("`before` cursor is invalid")
if after_index is not None and before_index is not None:
self.edges = self.edges[after_index + 1:before_index]
self.cursors = self.cursors[after_index + 1:before_index]
elif after_index is not None:
self.edges = self.edges[after_index + 1:]
self.cursors = self.cursors[after_index + 1:]
elif before_index is not None:
self.edges = self.edges[:before_index]
self.cursors = self.cursors[:before_index]
pre_slice_len = len(self.edges)
# Apply slicing filters
if "first" in self.args:
if len(self.edges) > int(self.args["first"]):
self.edges = self.edges[:int(self.args["first"])]
self.cursors = self.cursors[:int(self.args["first"])]
if "last" in self.args:
if len(self.edges) > int(self.args["last"]):
self.edges = self.edges[-int(self.args["last"]):]
self.cursors = self.cursors[-int(self.args["last"]):]
# Compute page info status
has_previous_page = False
if "last" not in self.args or len(self.edges) == 0:
has_previous_page = False
elif pre_slice_len > int(self.args["last"]):
has_previous_page = True
has_next_page = False
if "first" not in self.args or len(self.edges) == 0:
has_next_page = False
elif pre_slice_len > int(self.args["first"]):
has_next_page = True
if len(self.edges) == 0:
start_cursor, end_cursor = None, None
else:
start_cursor, end_cursor = self.cursors[0], self.cursors[-1]
# startCursor and endCursor
self.page_info = graphene.relay.PageInfo(has_next_page=has_next_page, has_previous_page=has_previous_page,
start_cursor=start_cursor, end_cursor=end_cursor)
```
#### File: lmsrvcore/middleware/metric.py
```python
from gtmcore.logging import LMLogger
from time import time as timer
import json
logger = LMLogger.get_logger()
def time_all_resolvers_middleware(next, root, info, **args):
"""Middleware to time and log all resolvers"""
start = timer()
return_value = next(root, info, **args)
duration = timer() - start
data = {"metric_type": "field_resolver_duration",
"parent_type": root._meta.name if root and hasattr(root, '_meta') else '',
"field_name": info.field_name,
"duration_ms": round(duration * 1000, 2)}
logger.info(f"METRIC :: {json.dumps(data)}")
return return_value
```
#### File: lmsrvcore/tests/test_identity.py
```python
import pytest
from flask import current_app
from mock import patch
from lmsrvcore.auth.identity import get_identity_manager_instance, AuthenticationError
from lmsrvcore.middleware import AuthorizationMiddleware, error_middleware, time_all_resolvers_middleware, \
DataloaderMiddleware
from lmsrvcore.tests.fixtures import fixture_working_dir_with_cached_user
from gtmcore.configuration import Configuration
from gtmcore.auth.local import LocalIdentityManager
class MockCurrentApp(object):
"""Mock class to test get_identity_manager_instance() without Flask in the loop"""
def __init__(self, id_manager=None):
self.config = {"LABMGR_ID_MGR": id_manager}
class MockFlaskContext(object):
"""Mock class to test middleware"""
def __init__(self):
self.headers = {"Authorization": "Bearer adkajshfgklujasdhfiuashfiusahf"}
self.labbook_loader = None
class MockGrapheneInfo(object):
"""Mock class to test middleware"""
def __init__(self):
self.context = MockFlaskContext()
class TestAuthIdentity(object):
def test_get_identity_manager_instance(self, fixture_working_dir_with_cached_user):
"""Test getting identity manager in a flask app"""
# Test normal
mgr = get_identity_manager_instance()
assert type(mgr) == LocalIdentityManager
# Test when no mgr is set
current_app.config["LABMGR_ID_MGR"] = None
mgr = get_identity_manager_instance()
assert mgr is None
# Test when mgr is missing from the current Flask application config
del current_app.config["LABMGR_ID_MGR"]
with pytest.raises(AuthenticationError):
get_identity_manager_instance()
def test_authorization_middleware_user_local(self, fixture_working_dir_with_cached_user):
"""Test authorization middlewhere when loading a user exists locally"""
def next_fnc(root, info, **args):
"""Dummy method to test next chain in middleware"""
assert root == "something"
assert type(info) == MockGrapheneInfo
assert args['foo'] == "a"
assert args['bar'] == "b"
# Create a mocked info obj and remove the auth header since you are testing the logged in user pull from cache
fake_info = MockGrapheneInfo()
del fake_info.context.headers["Authorization"]
mw = AuthorizationMiddleware()
mw.resolve(next_fnc, "something", fake_info, foo="a", bar="b")
def test_authorization_middleware_bad_header(self, fixture_working_dir_with_cached_user):
"""Test authorization middlewhere when a token header is malformed"""
def next_fnc(root, info, **args):
"""Dummy method to test next chain in middleware"""
assert "Should not get here"
fake_info = MockGrapheneInfo()
fake_info.context.headers["Authorization"] = "Token <PASSWORD>"
mw = AuthorizationMiddleware()
with pytest.raises(AuthenticationError):
mw.resolve(next_fnc, "something", fake_info, foo="a", bar="b")
# TODO: Add test when easier to mock a token
# def test_authorization_middleware_token(self):
# """Test authorization middlewhere when a token is provided"""
# pass
```
#### File: api/objects/commit.py
```python
import graphene
from gtmcore.workflows.gitlab import GitLabManager
from gtmcore.inventory.inventory import InventoryManager
from gtmcore.inventory.branching import BranchManager
from lmsrvcore.auth.identity import parse_token
from lmsrvcore.auth.user import get_logged_in_username
from lmsrvcore.api.interfaces import GitCommit, GitRepository
class LabbookCommit(graphene.ObjectType, interfaces=(graphene.relay.Node, GitRepository, GitCommit)):
"""An object representing a commit to a LabBook"""
@classmethod
def get_node(cls, info, id):
"""Method to resolve the object based on it's Node ID"""
# Parse the key
owner, name, hash_str = id.split("&")
return LabbookCommit(id=f"{owner}&{name}&{hash_str}", name=name, owner=owner,
hash=hash_str)
def resolve_id(self, info):
"""Resolve the unique Node id for this object"""
if not self.id:
if not self.owner or not self.name or not self.hash:
raise ValueError("Resolving a LabbookCommit Node ID requires owner, name, and hash to be set")
self.id = f"{self.owner}&{self.name}&{self.hash}"
def resolve_short_hash(self, info):
"""Resolve the short_hash field"""
return self.hash[:8]
def resolve_committed_on(self, info):
"""Resolve the committed_on field"""
return info.context.labbook_loader.load(f"{get_logged_in_username()}&{self.owner}&{self.name}").then(
lambda labbook: labbook.git.repo.commit(self.hash).committed_datetime.isoformat())
class Branch(graphene.ObjectType, interfaces=(graphene.relay.Node, GitRepository)):
""" Represents a branch in the repo """
branch_name = graphene.String(required=True)
# If true, indicates this branch is currently checked out
is_active = graphene.Boolean()
# Indicates whether this branch exists in the local repo
is_local = graphene.Boolean()
# Indicates whether this branch exists remotely
is_remote = graphene.Boolean()
# Indicates whether this branch can be merged into the current active branch
is_mergeable = graphene.Boolean()
# Count of commits on remote not present in local branch
commits_behind = graphene.Int()
# Count of commits on local branch not present in remote.
commits_ahead = graphene.Int()
@classmethod
def get_node(cls, info, id):
owner, labbook_name, branch_name = id.split('&')
return Branch(owner=owner, name=labbook_name, branch_name=branch_name)
def resolve_id(self, info):
return '&'.join((self.owner, self.name, self.branch_name))
def resolve_is_active(self, info):
lb = InventoryManager().load_labbook(get_logged_in_username(),
self.owner,
self.name)
return BranchManager(lb).active_branch == self.branch_name
def resolve_is_local(self, info):
lb = InventoryManager().load_labbook(get_logged_in_username(),
self.owner,
self.name)
return self.branch_name in BranchManager(lb).branches_local
def resolve_is_remote(self, info):
lb = InventoryManager().load_labbook(get_logged_in_username(),
self.owner,
self.name)
return self.branch_name in BranchManager(lb).branches_remote
def resolve_is_mergeable(self, info):
lb = InventoryManager().load_labbook(get_logged_in_username(),
self.owner,
self.name)
mergeable = self.branch_name in BranchManager(lb).branches_local \
and self.branch_name != BranchManager(lb).active_branch
return mergeable
@classmethod
def _configure_git(cls, lb, info) -> GitLabManager:
# Extract valid Bearer token
# TODO - This code is duplicated all over the place, must be refactored.
token = None
if hasattr(info.context.headers, 'environ'):
if "HTTP_AUTHORIZATION" in info.context.headers.environ:
token = parse_token(info.context.headers.environ["HTTP_AUTHORIZATION"])
if not token:
raise ValueError("Authorization header not provided. "
"Must have a valid session to query for collaborators")
default_remote = lb.client_config.config['git']['default_remote']
admin_service = None
for remote in lb.client_config.config['git']['remotes']:
if default_remote == remote:
admin_service = lb.client_config.config['git']['remotes'][remote]['admin_service']
break
if not admin_service:
raise ValueError('admin_service could not be found')
# Configure git creds
mgr = GitLabManager(default_remote, admin_service, access_token=token)
mgr.configure_git_credentials(default_remote, get_logged_in_username())
return mgr
def resolve_commits_ahead(self, info):
lb = InventoryManager().load_labbook(get_logged_in_username(),
self.owner,
self.name)
self._configure_git(lb, info)
bm = BranchManager(lb)
return bm.get_commits_ahead(branch_name=self.branch_name)
def resolve_commits_behind(self, info):
lb = InventoryManager().load_labbook(get_logged_in_username(),
self.owner,
self.name)
self._configure_git(lb, info)
bm = BranchManager(lb)
return bm.get_commits_behind(branch_name=self.branch_name)
```
#### File: lmsrvlabbook/tests/int_labbook_mutations.py
```python
import multiprocessing
import os
import pprint
import pytest
import shutil
import time
from mock import patch
import requests
from gtmcore.environment import ComponentManager
from gtmcore.dispatcher import Dispatcher, JobKey
from gtmcore.inventory.inventory import InventoryManager
from lmsrvlabbook.tests.fixtures import fixture_working_dir_env_repo_scoped, fixture_working_dir
from gtmcore.fixtures import ENV_UNIT_TEST_REPO, ENV_UNIT_TEST_BASE, ENV_UNIT_TEST_REV
import service
@pytest.fixture()
def start_server():
pass
class SampleMockObject(object):
def method_to_mock(self):
with open('/tmp/cats', 'w') as f:
f.write("If you see this file, things didn't work")
def mocky(self):
with open('/tmp/dogs', 'w') as f:
f.write("This indicates the mocking in a subprocess worked!")
def invoker():
return SampleMockObject().method_to_mock()
@pytest.fixture(scope="session")
def pause():
time.sleep(3)
class TestLabbookMutation(object):
def test_mocking_in_subprocess(self):
# This test should remain to validate that mocking applies to classes
# loaded by a sub-process of this pytest process.
if os.path.exists('/tmp/cats'):
os.remove('/tmp/cats')
if os.path.exists('/tmp/dogs'):
os.remove('/tmp/dogs')
with patch.object(SampleMockObject, 'method_to_mock', mocky):
assert not os.path.exists('/tmp/cats')
proc = multiprocessing.Process(target=invoker)
proc.daemon = True
proc.start()
time.sleep(1)
assert not os.path.exists('/tmp/cats')
assert os.path.exists('/tmp/dogs')
def test_launch_api_server(self, pause, fixture_working_dir_env_repo_scoped):
proc = multiprocessing.Process(target=service.main, kwargs={'debug': False})
proc.daemon = True
proc.start()
time.sleep(4)
assert proc.is_alive()
proc.terminate()
def test_insert_file(self, fixture_working_dir_env_repo_scoped):
# TODO - Pending on integration tests working.
pass
def test_export_and_import_lb(self, fixture_working_dir_env_repo_scoped):
api_server_proc = multiprocessing.Process(target=service.main, kwargs={'debug': False})
api_server_proc.daemon = True
api_server_proc.start()
assert api_server_proc.is_alive()
time.sleep(5)
assert api_server_proc.is_alive()
# Make and validate request
assert api_server_proc.is_alive()
lb_name = "mutation-export-import-unittest"
im = InventoryManager(fixture_working_dir_env_repo_scoped[0])
lb = im.create_labbook("default", "default", lb_name, description="Import/Export Mutation Testing.")
cm = ComponentManager(lb)
cm.add_base(ENV_UNIT_TEST_REPO, 'ut-busybox', 0)
assert api_server_proc.is_alive()
export_query = """
mutation export {
exportLabbook(input: {
owner: "default",
labbookName: "%s"
}) {
jobKey
}
}
""" % lb.name
r = fixture_working_dir_env_repo_scoped[2].execute(export_query)
pprint.pprint(r)
# Sleep while the background job completes, and then delete new lb.
time.sleep(5)
d = Dispatcher()
job_status = d.query_task(JobKey(r['data']['exportLabbook']['jobKey']))
# Delete existing labbook in file system.
shutil.rmtree(lb.root_dir)
assert api_server_proc.is_alive()
assert job_status.status == 'finished'
assert not os.path.exists(lb.root_dir)
assert os.path.exists(job_status.result)
pprint.pprint(job_status.result)
if os.path.exists(os.path.join('/tmp', os.path.basename(job_status.result))):
os.remove(os.path.join('/tmp', os.path.basename(job_status.result)))
new_path = shutil.move(job_status.result, '/tmp')
# Now, import the labbook that was just exported.
export_query = """
mutation import {
importLabbook(input: {
}) {
jobKey
}
}
"""
files = {'uploadFile': open(new_path, 'rb')}
qry = {"query": export_query}
assert api_server_proc.is_alive()
r = requests.post('http://localhost:10001/labbook/', data=qry, files=files)
time.sleep(0.5)
pprint.pprint(r)
assert 'errors' not in r
time.sleep(2)
```
#### File: lmsrvlabbook/tests/test_dataloader_package.py
```python
import pytest
import os
from lmsrvlabbook.tests.fixtures import fixture_working_dir, build_image_for_jupyterlab
from promise import Promise
from lmsrvlabbook.dataloader.package import PackageLatestVersionLoader
class TestDataloaderPackage(object):
def test_load_one_pip(self, build_image_for_jupyterlab):
"""Test loading 1 package"""
key = "pip>munit1"
lb, username = build_image_for_jupyterlab[0], build_image_for_jupyterlab[5]
loader = PackageLatestVersionLoader([key], lb, username)
promise1 = loader.load(key)
assert isinstance(promise1, Promise)
pkg = promise1.get()
assert pkg == '0.12.4'
def test_load_many_pip(self, build_image_for_jupyterlab):
"""Test loading many labbooks"""
lb, username = build_image_for_jupyterlab[0], build_image_for_jupyterlab[5]
keys = ["pip>munit1", "pip>munit2", "pip>munit3"]
loader = PackageLatestVersionLoader(keys, lb, username)
promise1 = loader.load_many(keys)
assert isinstance(promise1, Promise)
version_list = promise1.get()
assert len(version_list) == 3
assert version_list[0] == "0.12.4"
assert version_list[1] == "12.2"
assert version_list[2] == "5.0"
def test_load_many_conda(self, build_image_for_jupyterlab):
"""Test loading many labbooks"""
lb, username = build_image_for_jupyterlab[0], build_image_for_jupyterlab[5]
keys = ["conda3&cdutil", "conda3&python-coveralls", "conda3&nltk"]
loader = PackageLatestVersionLoader(keys, lb, username)
promise1 = loader.load_many(keys)
assert isinstance(promise1, Promise)
version_list = promise1.get()
assert len(version_list) == 3
assert version_list[0] == "8.1"
assert version_list[1] == "2.9.1"
assert version_list[2] == "3.2.5"
def test_load_many_conda2(self, build_image_for_jupyterlab):
"""Test loading many labbooks"""
lb, username = build_image_for_jupyterlab[0], build_image_for_jupyterlab[5]
keys = ["conda3&cdutil", "conda3&python-coveralls", "conda3&nltk"]
loader = PackageLatestVersionLoader(keys, lb, username)
promise1 = loader.load_many(keys)
assert isinstance(promise1, Promise)
version_list = promise1.get()
assert len(version_list) == 3
assert version_list[0] == "8.1"
assert version_list[1] == "2.9.1"
assert version_list[2] == "3.2.5"
def test_load_many_mixed(self, build_image_for_jupyterlab):
"""Test loading many labbooks"""
lb, username = build_image_for_jupyterlab[0], build_image_for_jupyterlab[5]
keys = ["conda3&cdutil", "pip>munit1", "conda3&nltk"]
loader = PackageLatestVersionLoader(keys, lb, username)
promise1 = loader.load_many(keys)
assert isinstance(promise1, Promise)
version_list = promise1.get()
assert len(version_list) == 3
assert version_list[0] == "8.1"
assert version_list[1] == "0.12.4"
assert version_list[2] == "3.2.5"
def test_load_invalid_package(self, build_image_for_jupyterlab):
"""Test loading many labbooks"""
lb, username = build_image_for_jupyterlab[0], build_image_for_jupyterlab[5]
keys = ["pip&scipysdfsdfs", "<KEY>"]
loader = PackageLatestVersionLoader(keys, lb, username)
promise1 = loader.load_many(keys)
assert isinstance(promise1, Promise)
with pytest.raises(Exception):
version_list = promise1.get()
```
#### File: lmsrvlabbook/tests/test_dataset_collaborator_mutations.py
```python
import responses
from werkzeug.test import EnvironBuilder
from werkzeug.wrappers import Request
from lmsrvlabbook.tests.fixtures import fixture_working_dir
import pytest
from gtmcore.inventory.inventory import InventoryManager
@pytest.fixture()
def mock_create_dataset(fixture_working_dir):
# Create a labbook in the temporary directory
im = InventoryManager(fixture_working_dir[0])
ds = im.create_dataset("default", "default", "dataset1",
storage_type="gigantum_object_v1", description="Test labbook 1")
responses.add(responses.GET, 'https://usersrv.gigantum.io/key',
json={'key': 'afaketoken'}, status=200)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects?search=dataset1',
json=[{
"id": 22,
"description": "",
}],
status=200, match_querystring=True)
yield fixture_working_dir
class TestDatasetCollaboratorMutations(object):
@responses.activate
def test_add_collaborator(self, mock_create_dataset):
"""Test adding a collaborator to a dataset"""
# Setup REST mocks
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/users?username=person100',
json=[
{
"id": 100,
"name": "<NAME>",
"username": "default",
"state": "active",
"access_level": 30
}
],
status=200)
responses.add(responses.POST, 'https://repo.gigantum.io/api/v4/projects/default%2Fdataset1/members',
json={
"id": 100,
"name": "<NAME>",
"username": "default",
"state": "active",
},
status=201)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/default%2Fdataset1',
json=[{
"id": 27,
"description": "",
}],
status=200)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/default%2Fdataset1/members',
json=[
{
"id": 29,
"name": "<NAME>",
"username": "janed",
"access_level": 40,
"expires_at": None
},
{
"id": 100,
"name": "<NAME>",
"username": "person100",
"access_level": 30,
"expires_at": None
}
],
status=200)
# Mock the request context so a fake authorization header is present
builder = EnvironBuilder(path='/labbook', method='POST', headers={'Authorization': 'Bearer AJDFHASD'})
env = builder.get_environ()
req = Request(environ=env)
query = """
mutation AddCollaborator {
addDatasetCollaborator(
input: {
owner: "default",
datasetName: "dataset1",
username: "default"
permissions: "readwrite"
}) {
updatedDataset {
collaborators {
collaboratorUsername
}
canManageCollaborators
}
}
}
"""
r = mock_create_dataset[2].execute(query, context_value=req)
assert 'errors' not in r
assert r['data']['addDatasetCollaborator']['updatedDataset']['collaborators'][0]['collaboratorUsername'] == 'janed'
assert r['data']['addDatasetCollaborator']['updatedDataset']['collaborators'][1]['collaboratorUsername'] == 'person100'
assert r['data']['addDatasetCollaborator']['updatedDataset']['canManageCollaborators'] is False
@responses.activate
def test_add_collaborator_as_owner(self, mock_create_dataset):
"""Test adding a collaborator to a LabBook"""
# Setup REST mocks
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/users?username=person100',
json=[
{
"id": 100,
"name": "<NAME>",
"username": "default",
"state": "active",
}
],
status=200)
responses.add(responses.POST, 'https://repo.gigantum.io/api/v4/projects/default%2Fdataset1/members',
json={
"id": 100,
"name": "<NAME>",
"username": "default",
"state": "active",
},
status=201)
responses.add(responses.DELETE, 'https://repo.gigantum.io/api/v4/projects/default%2Fdataset1/members/100',
status=204)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/default%2Fdataset1',
json=[{
"id": 27,
"description": "",
}],
status=200)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/default%2Fdataset1/members',
json=[
{
"id": 29,
"name": "Default User",
"username": "default",
"access_level": 40,
"expires_at": None
},
{
"id": 100,
"name": "<NAME>",
"username": "person100",
"access_level": 30,
"expires_at": None
}
],
status=200)
# Mock the request context so a fake authorization header is present
builder = EnvironBuilder(path='/labbook', method='POST', headers={'Authorization': 'Bearer AJDFHASD'})
env = builder.get_environ()
req = Request(environ=env)
query = """
mutation AddCollaborator {
addDatasetCollaborator(
input: {
owner: "default",
datasetName: "dataset1",
username: "default"
permissions: "owner"
}) {
updatedDataset {
collaborators {
collaboratorUsername
}
canManageCollaborators
}
}
}
"""
r = mock_create_dataset[2].execute(query, context_value=req)
assert 'errors' not in r
assert r['data']['addDatasetCollaborator']['updatedDataset']['canManageCollaborators'] is True
assert r['data']['addDatasetCollaborator']['updatedDataset']['collaborators'][0]['collaboratorUsername'] == 'default'
assert r['data']['addDatasetCollaborator']['updatedDataset']['collaborators'][1]['collaboratorUsername'] == 'person100'
@responses.activate
def test_delete_collaborator(self, mock_create_dataset):
"""Test deleting a collaborator from a LabBook"""
# Setup REST mocks
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/users?username=person100',
json=[
{
"id": 100,
"name": "<NAME>",
"username": "default",
"state": "active",
}
],
status=200)
responses.add(responses.DELETE, 'https://repo.gigantum.io/api/v4/projects/default%2Fdataset1/members/100',
status=204)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/default%2Fdataset1',
json=[{
"id": 27,
"description": "",
}],
status=200)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/default%2Fdataset1/members',
json=[
{
"id": 29,
"name": "<NAME>",
"username": "janed",
"access_level": 40,
"expires_at": None
}
],
status=200)
# Mock the request context so a fake authorization header is present
builder = EnvironBuilder(path='/labbook', method='DELETE', headers={'Authorization': 'Bearer AJDFHASD'})
env = builder.get_environ()
req = Request(environ=env)
query = """
mutation DeleteCollaborator {
deleteDatasetCollaborator(
input: {
owner: "default",
datasetName: "dataset1",
username: "default"
}) {
updatedDataset {
collaborators {
collaboratorUsername
}
}
}
}
"""
r = mock_create_dataset[2].execute(query, context_value=req)
assert 'errors' not in r
assert r['data']['deleteDatasetCollaborator']['updatedDataset']['collaborators'][0]['collaboratorUsername'] == 'janed'
@responses.activate
def test_change_collaborator_permissions(self, mock_create_dataset):
"""Test adding a collaborator to a dataset"""
# Setup REST mocks
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/users?username=person100',
json=[
{
"id": 100,
"name": "<NAME>",
"username": "default",
"state": "active",
"access_level": 30
}
],
status=200)
responses.add(responses.POST, 'https://repo.gigantum.io/api/v4/projects/default%2Fdataset1/members',
json={
"username": "default",
"access_level": 30,
},
status=201)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/default%2Fdataset1',
json=[{
"id": 27,
"description": "",
}],
status=200)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/default%2Fdataset1/members',
json=[
{
"id": 29,
"name": "<NAME>",
"username": "janed",
"access_level": 40,
"expires_at": None
},
{
"id": 100,
"name": "<NAME>",
"username": "default",
"access_level": 30,
"expires_at": None
}
],
status=200)
responses.add(responses.DELETE, 'https://repo.gigantum.io/api/v4/projects/default%2Fdataset1/members/100',
status=204)
responses.add(responses.POST, 'https://repo.gigantum.io/api/v4/projects/default%2Fdataset1/members',
json={
"username": "default",
"access_level": 20,
},
status=201)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/default%2Fdataset1/members',
json=[
{
"id": 29,
"name": "<NAME>",
"username": "janed",
"access_level": 40,
"expires_at": None
},
{
"id": 100,
"name": "<NAME>",
"username": "default",
"access_level": 20,
"expires_at": None
}
],
status=200)
# Mock the request context so a fake authorization header is present
builder = EnvironBuilder(path='/labbook', method='POST', headers={'Authorization': 'Bearer AJDFHASD'})
env = builder.get_environ()
req = Request(environ=env)
query = """
mutation AddCollaborator {
addDatasetCollaborator(
input: {
owner: "default",
datasetName: "dataset1",
username: "default"
permissions: "readwrite"
}) {
updatedDataset {
collaborators {
collaboratorUsername
}
canManageCollaborators
}
}
}
"""
r = mock_create_dataset[2].execute(query, context_value=req)
assert 'errors' not in r
assert r['data']['addDatasetCollaborator']['updatedDataset']['collaborators'][0]['collaboratorUsername'] == 'janed'
assert r['data']['addDatasetCollaborator']['updatedDataset']['collaborators'][1]['collaboratorUsername'] == 'default'
assert r['data']['addDatasetCollaborator']['updatedDataset']['canManageCollaborators'] is False
query = """
mutation AddCollaborator {
addDatasetCollaborator(
input: {
owner: "default",
datasetName: "dataset1",
username: "default"
permissions: "readonly"
}) {
updatedDataset {
collaborators {
collaboratorUsername
}
canManageCollaborators
}
}
}
"""
r = mock_create_dataset[2].execute(query, context_value=req)
assert 'errors' not in r
assert r['data']['addDatasetCollaborator']['updatedDataset']['collaborators'][0]['collaboratorUsername'] == 'janed'
assert r['data']['addDatasetCollaborator']['updatedDataset']['collaborators'][1]['collaboratorUsername'] == 'default'
assert r['data']['addDatasetCollaborator']['updatedDataset']['canManageCollaborators'] is False
```
#### File: lmsrvlabbook/tests/test_dataset_files_queries.py
```python
import pytest
import os
from snapshottest import snapshot
from lmsrvlabbook.tests.fixtures import fixture_single_dataset
class TestDatasetFilesQueries(object):
def test_get_dataset_files(self, fixture_single_dataset, snapshot):
query = """{
dataset(name: "test-dataset", owner: "default") {
id
name
description
allFiles {
edges{
node {
key
isDir
isFavorite
isLocal
size
}
cursor
}
pageInfo{
hasNextPage
hasPreviousPage
endCursor
}
}
}
}
"""
r = fixture_single_dataset[2].execute(query)
assert 'errors' not in r
snapshot.assert_match(r)
query = """{
dataset(name: "test-dataset", owner: "default") {
id
name
description
allFiles(first: 2) {
edges{
node {
key
isDir
isFavorite
isLocal
size
}
cursor
}
pageInfo{
hasNextPage
hasPreviousPage
endCursor
}
}
}
}
"""
r = fixture_single_dataset[2].execute(query)
assert 'errors' not in r
snapshot.assert_match(r)
query = """{
dataset(name: "test-dataset", owner: "default") {
id
name
description
allFiles(first: 1, after: "MQ==") {
edges{
node {
key
isDir
isFavorite
isLocal
size
}
cursor
}
pageInfo{
hasNextPage
hasPreviousPage
endCursor
}
}
}
}
"""
r = fixture_single_dataset[2].execute(query)
assert 'errors' not in r
snapshot.assert_match(r)
query = """{
dataset(name: "test-dataset", owner: "default") {
id
name
description
allFiles(first: 100, after: "MQ==") {
edges{
node {
key
isDir
isFavorite
isLocal
size
}
cursor
}
pageInfo{
hasNextPage
hasPreviousPage
endCursor
}
}
}
}
"""
r = fixture_single_dataset[2].execute(query)
assert 'errors' not in r
snapshot.assert_match(r)
def test_get_dataset_files_missing(self, fixture_single_dataset, snapshot):
query = """{
dataset(name: "test-dataset", owner: "default") {
id
name
description
allFiles {
edges{
node {
key
isDir
isFavorite
isLocal
size
}
cursor
}
pageInfo{
hasNextPage
hasPreviousPage
endCursor
}
}
}
}
"""
r = fixture_single_dataset[2].execute(query)
assert 'errors' not in r
snapshot.assert_match(r)
ds = fixture_single_dataset[3]
cache_mgr = fixture_single_dataset[4]
revision = ds.git.repo.head.commit.hexsha
os.remove(os.path.join(cache_mgr.cache_root, revision, 'test1.txt'))
os.remove(os.path.join(cache_mgr.cache_root, revision, 'test2.txt'))
query = """{
dataset(name: "test-dataset", owner: "default") {
id
name
description
allFiles {
edges{
node {
key
isDir
isFavorite
isLocal
size
}
cursor
}
pageInfo{
hasNextPage
hasPreviousPage
endCursor
}
}
}
}
"""
r = fixture_single_dataset[2].execute(query)
assert 'errors' not in r
snapshot.assert_match(r)
```
#### File: lmsrvlabbook/tests/test_dataset_upload_files_mutations.py
```python
import os
import io
import math
import tempfile
import pytest
from graphene.test import Client
from werkzeug.datastructures import FileStorage
from gtmcore.inventory.inventory import InventoryManager
from gtmcore.dataset.cache.filesystem import HostFilesystemCache
from gtmcore.dataset.manifest import Manifest
from lmsrvcore.middleware import DataloaderMiddleware
from lmsrvlabbook.tests.fixtures import fixture_working_dir
@pytest.fixture()
def mock_create_dataset(fixture_working_dir):
# Create a dataset in the temporary directory
im = InventoryManager(fixture_working_dir[0])
ds = im.create_dataset("default", "default", "dataset1", storage_type="gigantum_object_v1",
description="my dataset")
# name of the config file, temporary working directory, the schema
yield fixture_working_dir
class TestDatasetUploadFilesMutations(object):
def test_add_file(self, mock_create_dataset):
"""Test adding a new file to a labbook"""
class DummyContext(object):
def __init__(self, file_handle):
self.dataset_loader = None
self.files = {'uploadChunk': file_handle}
client = Client(mock_create_dataset[3], middleware=[DataloaderMiddleware()])
# Create file to upload
test_file = os.path.join(tempfile.gettempdir(), "myValidFile.dat")
est_size = 9000000
try:
os.remove(test_file)
except:
pass
with open(test_file, 'wb') as tf:
tf.write(os.urandom(est_size))
new_file_size = os.path.getsize(tf.name)
# Get upload params
chunk_size = 4194000
file_info = os.stat(test_file)
file_size = int(file_info.st_size / 1000)
total_chunks = int(math.ceil(file_info.st_size / chunk_size))
ds = InventoryManager(mock_create_dataset[0]).load_dataset('default', 'default', 'dataset1')
fsc = HostFilesystemCache(ds, 'default')
target_file = os.path.join(fsc.current_revision_dir, "myValidFile.dat")
txid = "000-unitest-transaction"
with open(test_file, 'rb') as tf:
# Check for file to exist (shouldn't yet)
assert os.path.exists(target_file) is False
for chunk_index in range(total_chunks):
# Upload a chunk
chunk = io.BytesIO()
chunk.write(tf.read(chunk_size))
chunk.seek(0)
file = FileStorage(chunk)
query = f"""
mutation addDatasetFile{{
addDatasetFile(input:{{owner:"default",
datasetName: "dataset1",
filePath: "myValidFile.dat",
transactionId: "{txid}",
chunkUploadParams:{{
uploadId: "fdsfdsfdsfdfs",
chunkSize: {chunk_size},
totalChunks: {total_chunks},
chunkIndex: {chunk_index},
fileSizeKb: {file_size},
filename: "{os.path.basename(test_file)}"
}}
}}) {{
newDatasetFileEdge {{
node{{
id
key
isDir
size
}}
}}
}}
}}
"""
r = client.execute(query, context_value=DummyContext(file))
assert 'errors' not in r
# So, these will only be populated once the last chunk is uploaded. Will be None otherwise.
assert r['data']['addDatasetFile']['newDatasetFileEdge']['node']['isDir'] is False
assert r['data']['addDatasetFile']['newDatasetFileEdge']['node']['key'] == 'myValidFile.dat'
assert r['data']['addDatasetFile']['newDatasetFileEdge']['node']['size'] == f"{new_file_size}"
# When done uploading, file should exist in the labbook
assert os.path.exists(target_file)
assert os.path.isfile(target_file)
complete_query = f"""
mutation completeQuery {{
completeDatasetUploadTransaction(input: {{
owner: "default",
datasetName: "dataset1",
transactionId: "{txid}"
}}) {{
success
}}
}}
"""
r = client.execute(complete_query, context_value=DummyContext(file))
assert 'errors' not in r
m = Manifest(ds, 'default')
status = m.status()
assert len(status.created) == 0
assert len(status.modified) == 0
assert len(status.deleted) == 0
assert 'Uploaded 1 new file(s)' in ds.git.log()[0]['message']
def test_add_file_errors(self, mock_create_dataset, snapshot):
"""Test new file error handling"""
class DummyContext(object):
def __init__(self, file_handle):
self.labbook_loader = None
self.files = {'blah': file_handle}
client = Client(mock_create_dataset[3])
query = f"""
mutation addDatasetFile{{
addDatasetFile(input:{{owner:"default",
datasetName: "dataset1",
filePath: "myValidFile.dat",
transactionId: "adsfasdfasdf",
chunkUploadParams:{{
uploadId: "fdsfdsfdsfdfs",
chunkSize: 200,
totalChunks: 2,
chunkIndex: 0,
fileSizeKb: 6777,
filename: "asdfh"
}}
}}) {{
newDatasetFileEdge {{
node{{
id
key
isDir
size
}}
}}
}}
}}
"""
# Fail because no file
r = client.execute(query, context_value=DummyContext(None))
assert 'errors' in r
```
#### File: lmsrvlabbook/tests/test_environment_component_mutations.py
```python
import pytest
import yaml
import os
import graphql
from snapshottest import snapshot
from lmsrvlabbook.tests.fixtures import fixture_working_dir_env_repo_scoped
from gtmcore.inventory.inventory import InventoryManager
class TestAddComponentMutations(object):
def test_add_package(self, fixture_working_dir_env_repo_scoped, snapshot):
"""Test listing labbooks"""
im = InventoryManager(fixture_working_dir_env_repo_scoped[0])
lb = im.create_labbook('default', 'default', 'catbook-package-tester',
description="LB to test package mutation")
# Add a base image
pkg_query = """
mutation myPkgMutation {
addPackageComponents (input: {
owner: "default",
labbookName: "catbook-package-tester",
packages: [{manager: "conda3", package: "python-coveralls", version: "2.9.1"}]
}) {
clientMutationId
newPackageComponentEdges {
node{
id
manager
package
version
fromBase
}
cursor
}
}
}
"""
snapshot.assert_match(fixture_working_dir_env_repo_scoped[2].execute(pkg_query))
def test_add_multiple_packages(self, fixture_working_dir_env_repo_scoped, snapshot):
"""Test listing labbooks"""
im = InventoryManager(fixture_working_dir_env_repo_scoped[0])
lb = im.create_labbook('default', 'default', 'catbook-package-tester-multi',
description="LB to test package mutation")
labbook_dir = lb.root_dir
# Add a base image
pkg_query = """
mutation myPkgMutation {
addPackageComponents (input: {
owner: "default",
labbookName: "catbook-package-tester-multi",
packages: [{manager: "pip3", package: "gtmunit1", version: "0.12.4"},
{manager: "pip3", package: "gtmunit2", version: "1.14.1"}]
}) {
clientMutationId
newPackageComponentEdges {
node{
id
manager
package
version
fromBase
}
cursor
}
}
}
"""
snapshot.assert_match(fixture_working_dir_env_repo_scoped[2].execute(pkg_query))
# Validate the LabBook .gigantum/env/ directory
assert os.path.exists(os.path.join(labbook_dir, '.gigantum', 'env', 'package_manager')) is True
assert os.path.exists(os.path.join(labbook_dir, '.gigantum', 'env', 'package_manager', 'pip3_gtmunit1.yaml'))
assert os.path.exists(os.path.join(labbook_dir, '.gigantum', 'env', 'package_manager', 'pip3_gtmunit2.yaml'))
with open(os.path.join(labbook_dir, '.gigantum', 'env', 'package_manager', 'pip3_gtmunit1.yaml')) as pkg_yaml:
package_info_dict = yaml.safe_load(pkg_yaml)
assert package_info_dict['package'] == 'gtmunit1'
assert package_info_dict['manager'] == 'pip3'
assert package_info_dict['version'] == '0.12.4'
assert package_info_dict['from_base'] is False
with open(os.path.join(labbook_dir, '.gigantum', 'env', 'package_manager', 'pip3_gtmunit2.yaml')) as pkg_yaml:
package_info_dict = yaml.safe_load(pkg_yaml)
assert package_info_dict['package'] == 'gtmunit2'
assert package_info_dict['manager'] == 'pip3'
assert package_info_dict['version'] == '1.14.1'
assert package_info_dict['from_base'] is False
def test_add_packages_multiple_mgr_error(self, fixture_working_dir_env_repo_scoped, snapshot):
"""Test listing labbooks"""
im = InventoryManager(fixture_working_dir_env_repo_scoped[0])
lb = im.create_labbook('default', 'default', 'catbook-package-tester-mgr-errors',
description="LB to test package mutation")
# Test with version missing
pkg_query = """
mutation myPkgMutation {
addPackageComponents (input: {
owner: "default",
labbookName: "catbook-package-tester-mgr-errors",
packages: [{manager: "pip3", package: "requests", version: "2.18.4"},
{manager: "conda3", package: "responses", version: "1.4"}]
}) {
clientMutationId
newPackageComponentEdges {
node{
id
manager
package
version
fromBase
}
cursor
}
}
}
"""
result = fixture_working_dir_env_repo_scoped[2].execute(pkg_query)
assert "errors" in result
assert result['errors'][0]['message'] == 'Only batch add packages via 1 package manager at a time.'
def test_add_package_no_version(self, fixture_working_dir_env_repo_scoped, snapshot):
"""Test adding a package but omitting the version"""
im = InventoryManager(fixture_working_dir_env_repo_scoped[0])
lb = im.create_labbook('default', 'default', 'catbook-package-no-version',
description="LB to test package mutation")
# Add a base image
pkg_query = """
mutation myPkgMutation {
addPackageComponents (input: {
owner: "default",
labbookName: "catbook-package-no-version",
packages: [{manager: "pip3", package: "gtmunit1"}]
}) {
clientMutationId
newPackageComponentEdges {
node{
id
manager
package
version
fromBase
}
cursor
}
}
}
"""
result = fixture_working_dir_env_repo_scoped[2].execute(pkg_query)
assert "errors" in result
assert result['errors'][0]['message'] == "'version'"
def test_remove_package(self, fixture_working_dir_env_repo_scoped, snapshot):
"""Test removing a package from a labbook"""
im = InventoryManager(fixture_working_dir_env_repo_scoped[0])
lb = im.create_labbook('default', 'default', 'catbook-package-tester-remove',
description="LB to test package mutation")
labbook_dir = lb.root_dir
# Add a pip package
pkg_query = """
mutation myPkgMutation {
addPackageComponents (input: {
owner: "default",
labbookName: "catbook-package-tester-remove",
packages: [{manager: "pip3", package: "gtmunit1", version: "0.12.4"},
{manager: "pip3", package: "gtmunit2", version: "1.14.1"}]
}) {
clientMutationId
newPackageComponentEdges {
node{
id
}
}
}
}
"""
snapshot.assert_match(fixture_working_dir_env_repo_scoped[2].execute(pkg_query))
# Assert that the dependency was added
assert os.path.exists(os.path.join(labbook_dir, '.gigantum', 'env', 'package_manager', 'pip3_gtmunit1.yaml'))
assert os.path.exists(os.path.join(labbook_dir, '.gigantum', 'env', 'package_manager', 'pip3_gtmunit2.yaml'))
# Remove a pip package
pkg_query = """
mutation myPkgMutation {
removePackageComponents (input: {
owner: "default",
labbookName: "catbook-package-tester-remove",
packages: ["gtmunit2"],
manager: "pip3"
}) {
clientMutationId
success
}
}
"""
snapshot.assert_match(fixture_working_dir_env_repo_scoped[2].execute(pkg_query))
# Assert that the dependency is gone
assert not os.path.exists(os.path.join(labbook_dir, '.gigantum', 'env',
'package_manager', 'pip3_gtmunit2.yaml'))
assert os.path.exists(os.path.join(labbook_dir, '.gigantum', 'env', 'package_manager', 'pip3_gtmunit1.yaml'))
def test_custom_docker_snippet_success(self, fixture_working_dir_env_repo_scoped):
"""Test adding a custom dependency"""
im = InventoryManager(fixture_working_dir_env_repo_scoped[0])
lb = im.create_labbook('default', 'default', 'custom-docker-lb-unittest',
description="Testing custom docker and stuff")
client = fixture_working_dir_env_repo_scoped[2]
query = """
mutation addCustomDocker($labbook_name: String!, $owner: String!, $custom_docker: String!) {
addCustomDocker(input: {
owner: $owner,
labbookName: $labbook_name,
dockerContent: $custom_docker
}) {
updatedEnvironment {
dockerSnippet
}
}
}
"""
vars = {'labbook_name': "custom-docker-lb-unittest",
'owner': 'default',
'custom_docker': "RUN true"}
r = client.execute(query, variable_values=vars)
assert 'errors' not in r
assert r['data']['addCustomDocker']['updatedEnvironment']['dockerSnippet'] == "RUN true"
remove_query = """
mutation removeCustomDocker($labbook_name: String!, $owner: String!) {
removeCustomDocker(input: {
owner: $owner,
labbookName: $labbook_name
}) {
updatedEnvironment {
dockerSnippet
}
}
}
"""
vars = {'labbook_name': "custom-docker-lb-unittest",
'owner': 'default'}
r = client.execute(remove_query, variable_values=vars)
assert 'errors' not in r
assert r['data']['removeCustomDocker']['updatedEnvironment']['dockerSnippet'] == ""
```
#### File: lmsrvlabbook/tests/test_jobstatus_queries.py
```python
import pytest
import multiprocessing
import threading
import pprint
import time
import json
from lmsrvlabbook.tests.fixtures import fixture_working_dir
from graphene.test import Client
import graphene
from mock import patch
import rq
from gtmcore.dispatcher import Dispatcher, jobs
from gtmcore.configuration import Configuration
@pytest.fixture()
def temporary_worker():
"""A pytest fixture that creates a temporary directory and a config file to match. Deletes directory after test"""
def run_worker():
with rq.Connection():
qs = 'labmanager_unittests'
w = rq.Worker(qs)
w.work()
# This task is used to kill the worker. Sometimes if tests fail the worker runs forever and
# holds up the entire process. This gives each test 25 seconds to run before killing the worker
# and forcing the test to fail.
def watch_proc(p):
count = 0
while count < 4:
count = count + 1
time.sleep(1)
try:
p.terminate()
except:
pass
worker_proc = multiprocessing.Process(target=run_worker)
worker_proc.start()
watchdog_thread = threading.Thread(target=watch_proc, args=(worker_proc,))
watchdog_thread.start()
dispatcher = Dispatcher('labmanager_unittests')
yield worker_proc, dispatcher
class TestLabBookServiceQueries(object):
def test_query_finished_task(self, fixture_working_dir, temporary_worker):
"""Test listing labbooks"""
w, d = temporary_worker
job_id = d.dispatch_task(jobs.test_exit_success)
time.sleep(1)
query = """
{
jobStatus(jobId: "%s") {
result
status
jobMetadata
failureMessage
startedAt
finishedAt
}
}
""" % job_id.key_str
try:
r = fixture_working_dir[2].execute(query)
assert 'errors' not in r
assert int(r['data']['jobStatus']['result']) == 0
assert r['data']['jobStatus']['status'] == 'finished'
assert r['data']['jobStatus']['startedAt'] is not None
assert r['data']['jobStatus']['failureMessage'] is None
assert r['data']['jobStatus']['finishedAt']
assert r['data']['jobStatus']['jobMetadata'] == '{}'
except:
w.terminate()
raise
w.terminate()
def test_query_failed_task(self, fixture_working_dir, temporary_worker):
"""Test listing labbooks"""
w, d = temporary_worker
job_id = d.dispatch_task(jobs.test_exit_fail)
time.sleep(1)
query = """
{
jobStatus(jobId: "%s") {
result
status
jobMetadata
failureMessage
startedAt
finishedAt
}
}
""" % job_id
try:
r = fixture_working_dir[2].execute(query)
assert 'errors' not in r
assert r['data']['jobStatus']['result'] is None
assert r['data']['jobStatus']['status'] == 'failed'
assert r['data']['jobStatus']['failureMessage'] == \
'Exception: Intentional Exception from job `test_exit_fail`'
assert r['data']['jobStatus']['startedAt'] is not None
assert r['data']['jobStatus']['finishedAt'] is not None
# Assert the following dict is empty
assert not json.loads(r['data']['jobStatus']['jobMetadata'])
except:
w.terminate()
raise
w.terminate()
def test_query_started_task(self, fixture_working_dir, temporary_worker):
"""Test listing labbooks"""
w, d = temporary_worker
job_id = d.dispatch_task(jobs.test_sleep, args=(2,))
time.sleep(1)
query = """
{
jobStatus(jobId: "%s") {
result
status
jobMetadata
failureMessage
startedAt
finishedAt
}
}
""" % job_id
try:
r = fixture_working_dir[2].execute(query)
pprint.pprint(r)
assert 'errors' not in r
assert r['data']['jobStatus']['result'] is None
assert r['data']['jobStatus']['status'] == 'started'
assert r['data']['jobStatus']['failureMessage'] is None
assert r['data']['jobStatus']['startedAt'] is not None
assert json.loads(r['data']['jobStatus']['jobMetadata'])['sample'] == 'test_sleep metadata'
except:
time.sleep(3)
w.terminate()
raise
time.sleep(3)
w.terminate()
def test_query_queued_task(self, fixture_working_dir, temporary_worker):
"""Test listing labbooks"""
w, d = temporary_worker
job_id1 = d.dispatch_task(jobs.test_sleep, args=(2,))
job_id2 = d.dispatch_task(jobs.test_sleep, args=(2,))
time.sleep(0.5)
query = """
{
jobStatus(jobId: "%s") {
result
status
jobMetadata
failureMessage
startedAt
finishedAt
}
}
""" % job_id2
try:
r = fixture_working_dir[2].execute(query)
pprint.pprint(r)
assert 'errors' not in r
assert r['data']['jobStatus']['result'] is None
assert r['data']['jobStatus']['status'] == 'queued'
assert r['data']['jobStatus']['failureMessage'] is None
assert r['data']['jobStatus']['startedAt'] is None
except:
time.sleep(5)
w.terminate()
raise
time.sleep(5)
w.terminate()
```
#### File: activity/tests/test_activitydetailrecord.py
```python
import pytest
import json
from gtmcore.activity.records import ActivityDetailRecord, ActivityDetailType, ActivityAction
class TestActivityDetailRecord(object):
def test_constructor(self):
"""Test the constructor"""
adr = ActivityDetailRecord(ActivityDetailType.CODE)
assert type(adr) == ActivityDetailRecord
assert adr.type == ActivityDetailType.CODE
assert adr.key is None
assert adr.show is True
assert adr.importance == 0
assert adr.tags == []
assert adr.data == {}
adr = ActivityDetailRecord(ActivityDetailType.CODE, key="my_key", show=False, importance=23)
assert type(adr) == ActivityDetailRecord
assert adr.type == ActivityDetailType.CODE
assert adr.key == "my_key"
assert adr.show is False
assert adr.importance == 23
assert adr.tags == []
assert adr.data == {}
def test_log_str_prop(self):
"""Test the log string property"""
adr = ActivityDetailRecord(ActivityDetailType.CODE_EXECUTED, key="my_key", show=False, importance=233)
assert adr.log_str == "3,0,233,my_key,0"
adr = ActivityDetailRecord(ActivityDetailType.OUTPUT_DATA, key="my_key", show=True, importance=25)
assert adr.log_str == "1,1,25,my_key,0"
adr = ActivityDetailRecord(ActivityDetailType.OUTPUT_DATA, key="my_key", show=True, importance=25,
action=ActivityAction.EDIT)
assert adr.log_str == "1,1,25,my_key,2"
def test_from_log_str_legacy(self):
"""Test the creating from a log string"""
adr = ActivityDetailRecord.from_log_str("2,1,25,my_key")
assert type(adr) == ActivityDetailRecord
assert adr.type == ActivityDetailType.RESULT
assert adr.key == "my_key"
assert adr.show is True
assert adr.importance == 25
assert adr.tags == []
assert adr.data == {}
assert adr.action == ActivityAction.NOACTION
def test_from_log_str(self):
"""Test the creating from a log string"""
adr = ActivityDetailRecord.from_log_str("2,1,25,my_key,3")
assert type(adr) == ActivityDetailRecord
assert adr.type == ActivityDetailType.RESULT
assert adr.key == "my_key"
assert adr.show is True
assert adr.importance == 25
assert adr.tags == []
assert adr.data == {}
assert adr.action == ActivityAction.DELETE
def test_add_value(self):
"""Test adding values to the detail object"""
adr = ActivityDetailRecord(ActivityDetailType.CODE_EXECUTED, key="my_key2")
adr.add_value("text/plain", "this is some data")
adr.add_value("text/html", "<p>this is some data<\p>")
assert len(adr.data.keys()) == 2
assert adr.data["text/plain"] == "this is some data"
assert adr.data["text/html"] == "<p>this is some data<\p>"
with pytest.raises(ValueError):
adr.add_value("text/html", "<p>this is some data<\p>")
def test_data_size(self):
"""Test getting the size of details stored"""
adr = ActivityDetailRecord(ActivityDetailType.CODE_EXECUTED, key="my_key2")
assert adr.data_size == 0
adr.add_value("text/plain", "0000000000")
assert adr.data_size == 10
adr.add_value("text/html", "0000000000")
assert adr.data_size == 20
def test_to_dict(self):
"""Test converting to a dictionary"""
adr = ActivityDetailRecord(ActivityDetailType.CODE_EXECUTED, key="my_key2", show=True, importance=25)
adr.add_value("text/plain", "this is some data")
adr.add_value("text/html", "<p>this is some data<\p>")
dict_obj = adr.to_dict()
assert type(dict_obj) == dict
assert dict_obj['type'] == 3
assert dict_obj['action'] == 0
assert dict_obj['importance'] == 25
assert dict_obj['show'] == 1
assert dict_obj['data'] == {"text/plain": "this is some data",
"text/html": "<p>this is some data<\p>"}
def test_to_bytes_from_bytes(self):
"""Test converting to a byte array"""
adr = ActivityDetailRecord(ActivityDetailType.CODE_EXECUTED, key="my_key3", show=True, importance=225,
action=ActivityAction.CREATE)
adr.add_value("text/plain", "this is some data")
byte_array_no_compression = adr.to_bytes(compress=False)
assert type(byte_array_no_compression) == bytes
adr2 = ActivityDetailRecord.from_bytes(byte_array_no_compression, decompress=False)
assert type(adr2) == ActivityDetailRecord
assert adr2.type == ActivityDetailType.CODE_EXECUTED
assert adr2.action == ActivityAction.CREATE
assert adr2.key is None
assert adr2.show is True
assert adr2.importance == 225
assert adr2.tags == []
assert adr2.data == {"text/plain": "this is some data"}
def test_compression(self):
"""Test compression on large objects"""
adr = ActivityDetailRecord(ActivityDetailType.INPUT_DATA, key="my_ke3", show=True, importance=125)
adr.add_value("text/plain", "this is some data00000000000000000000000000000000000" * 1000)
byte_array_no_compression = adr.to_bytes(compress=False)
assert type(byte_array_no_compression) == bytes
adr2 = ActivityDetailRecord(ActivityDetailType.INPUT_DATA, key="my_ke3", show=True, importance=125)
adr2.add_value("text/plain", "this is some data00000000000000000000000000000000000" * 1000)
byte_array_compression = adr2.to_bytes(compress=True)
assert type(byte_array_compression) == bytes
assert len(byte_array_compression) < len(byte_array_no_compression)
adr3 = ActivityDetailRecord.from_bytes(byte_array_compression, decompress=True)
assert type(adr3) == ActivityDetailRecord
assert adr3.type == ActivityDetailType.INPUT_DATA
assert adr3.action == ActivityAction.NOACTION
assert adr3.key is None
assert adr3.show is True
assert adr3.importance == 125
assert adr3.tags == []
assert adr3.data == adr.data
def test_to_json(self):
"""Test converting to json"""
adr = ActivityDetailRecord(ActivityDetailType.ENVIRONMENT, key="my_key3", show=True, importance=225)
adr.add_value("text/plain", "this is some data")
adr_in_json = adr.to_json()
assert type(adr_in_json) == str
json_dict = json.loads(adr_in_json)
assert json_dict['type'] == 0
assert json_dict['importance'] == 225
assert json_dict['show'] is True
assert json_dict['data'] == {'text/plain': "this is some data"}
def test_jsonify_data(self):
"""Test converting to json"""
adr = ActivityDetailRecord(ActivityDetailType.ENVIRONMENT, key="my_key3fgjg", show=True, importance=45)
adr.add_value("text/plain", "this is some data to jsonify")
adr.add_value("text/markdown", "this is some data to `jsonify`")
data = adr.jsonify_data()
assert type(data) == dict
assert len(data.keys()) == 2
assert data['text/plain'] == "this is some data to jsonify"
assert data['text/markdown'] == "this is some data to `jsonify`"
```
#### File: activity/tests/test_detaildb.py
```python
import pytest
import os
from gtmcore.fixtures import mock_labbook, mock_config_with_detaildb
from gtmcore.activity.detaildb import ActivityDetailDB
class TestDetailDB(object):
def test_constructor(self, mock_labbook):
"""Test the constructor"""
db = ActivityDetailDB(mock_labbook[2].root_dir, mock_labbook[2].checkout_id, 4000000)
assert type(db) == ActivityDetailDB
assert db.logfile_limit == 4000000
assert db.checkout_id == mock_labbook[2].checkout_id
def test_file_number(self, mock_config_with_detaildb):
"""Test the file_number property"""
assert mock_config_with_detaildb[0].file_number == 0
assert mock_config_with_detaildb[0].file_number == 0
def test_file_number_reload(self, mock_config_with_detaildb):
"""Test the file_number property"""
assert mock_config_with_detaildb[0].file_number == 0
mock_config_with_detaildb[0]._write_metadata_file(increment=True)
assert mock_config_with_detaildb[0].file_number == 1
# reset locally stored file_number
new_detail_db_instance = ActivityDetailDB(mock_config_with_detaildb[1].root_dir,
mock_config_with_detaildb[1].checkout_id)
assert new_detail_db_instance.file_number == 1
def test_file_number_new_checkout_context(self, mock_config_with_detaildb):
"""Test the file_number property if a new branch has been created"""
assert mock_config_with_detaildb[0].file_number == 0
mock_config_with_detaildb[0]._write_metadata_file(increment=True)
assert mock_config_with_detaildb[0].file_number == 1
# reset locally stored file_number by changing the checkout ID
mock_config_with_detaildb[0].checkout_id = "adsl;jkadksflj;"
assert mock_config_with_detaildb[0].file_number == 0
def test_increment_metadata(self, mock_config_with_detaildb):
"""Test the file_number property if a new branch has been created"""
assert mock_config_with_detaildb[0].file_number == 0
mock_config_with_detaildb[0]._write_metadata_file(increment=False)
assert mock_config_with_detaildb[0].file_number == 0
mock_config_with_detaildb[0]._write_metadata_file(increment=True)
assert mock_config_with_detaildb[0].file_number == 1
def test_generate_detail_header(self, mock_config_with_detaildb):
"""Test generating a detail header"""
assert mock_config_with_detaildb[0]._generate_detail_header(10, 20) == \
b'__g__lsn\x00\x00\x00\x00\n\x00\x00\x00\x14\x00\x00\x00'
# Increment the file number a bunch
for _ in range(49):
mock_config_with_detaildb[0]._write_metadata_file(increment=True)
assert mock_config_with_detaildb[0]._generate_detail_header(511564, 6455412) == \
b'__g__lsn1\x00\x00\x00L\xce\x07\x00t\x80b\x00'
def test_parse_detail_header(self, mock_config_with_detaildb):
"""Test parsing a detail header"""
assert mock_config_with_detaildb[0]._parse_detail_header(b'__g__lsn\x00\x00\x00\x00\n\x00\x00\x00\x14\x00\x00\x00') == \
(0, 10, 20)
assert mock_config_with_detaildb[0]._parse_detail_header(b'__g__lsn1\x00\x00\x00L\xce\x07\x00t\x80b\x00') == \
(49, 511564, 6455412)
def test_parse_detail_key(self, mock_config_with_detaildb):
"""Test generating a detail key"""
basename, detail_header = mock_config_with_detaildb[0]._parse_detail_key('<KEY>
assert type(basename) == str
assert len(basename) == 36
assert detail_header == b'__g__lsn\x00\x00\x00\x00\n\x00\x00\x00\x14\x00\x00\x00'
def test_file_rotate(self, mock_labbook):
"""Test rotating the file"""
db = ActivityDetailDB(mock_labbook[2].root_dir, mock_labbook[2].checkout_id, 2000)
fp = db._open_for_append_and_rotate()
fp.write(("blah").encode())
fp.close()
# assert file exists
assert os.path.join(db.root_path, db.basename + '_0') == fp.name
assert os.path.exists(os.path.join(db.root_path, db.basename + '_0')) is True
assert os.path.exists(os.path.join(db.root_path, db.basename + '_1')) is False
fp = db._open_for_append_and_rotate()
fp.write(("blah" * 3000).encode())
fp.close()
# assert same file exists
assert os.path.join(db.root_path, db.basename + '_0') == fp.name
assert os.path.exists(os.path.join(db.root_path, db.basename + '_0')) is True
assert os.path.exists(os.path.join(db.root_path, db.basename + '_1')) is False
fp = db._open_for_append_and_rotate()
# assert it rolled
assert os.path.join(db.root_path, db.basename + '_1') == fp.name
assert os.path.exists(os.path.join(db.root_path, db.basename + '_0')) is True
assert os.path.exists(os.path.join(db.root_path, db.basename + '_1')) is True
def test_put_get(self, mock_config_with_detaildb):
"""Test putting and getting a record"""
my_val = b'thisisastreamofstuff'
detail_key = mock_config_with_detaildb[0].put(my_val)
basename, detail_header = mock_config_with_detaildb[0]._parse_detail_key(detail_key)
file_num, offset, length = mock_config_with_detaildb[0]._parse_detail_header(detail_header)
assert file_num == 0
assert offset == 0
assert length == len(my_val)
return_val = mock_config_with_detaildb[0].get(detail_key)
assert return_val == my_val
def test_put_get_errors(self, mock_config_with_detaildb):
"""Test putting and getting a record with validation errors"""
with pytest.raises(ValueError):
detail_key = mock_config_with_detaildb[0].put("astringvalue")
with pytest.raises(ValueError):
detail_key = mock_config_with_detaildb[0].get(None)
with pytest.raises(ValueError):
detail_key = mock_config_with_detaildb[0].get("")
with pytest.raises(ValueError):
detail_key = mock_config_with_detaildb[0].get(b"abytekey")
```
#### File: gtmcore/configuration/utils.py
```python
import time
import subprocess
from typing import List
from gtmcore.logging import LMLogger
logger = LMLogger.get_logger()
def call_subprocess(cmd_tokens: List[str], cwd: str, check: bool = True,
shell: bool = False) -> str:
"""Execute a subprocess call and properly benchmark and log
Args:
cmd_tokens: List of command tokens, e.g., ['ls', '-la']
cwd: Current working directory
check: Raise exception if command fails
shell: Run as shell command (not recommended)
Returns:
Decoded stdout of called process after completing
Raises:
subprocess.CalledProcessError
"""
logger.debug(f"Executing `{' '.join(cmd_tokens)}` in {cwd}")
start_time = time.time()
try:
r = subprocess.run(cmd_tokens, cwd=cwd, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
check=check, shell=shell)
finish_time = time.time()
elapsed_time = finish_time - start_time
logger.debug(f"Finished command `{' '.join(cmd_tokens)}` in {elapsed_time:.2f}s")
if elapsed_time > 1.0:
logger.warning(f"Successful command `{' '.join(cmd_tokens)}` took {elapsed_time:.2f}s")
return (r.stdout or b"").decode()
except subprocess.CalledProcessError as x:
fail_time = time.time() - start_time
logger.error(f"Command failed `{' '.join(cmd_tokens)}` after {fail_time:.2f}s: "
f"stderr={x.stderr}, stdout={x.stdout}")
raise
```
#### File: gtmcore/container/core.py
```python
import os
import docker
import docker.errors
import hashlib
import time
import json
from typing import Callable, Optional
from gtmcore.configuration import get_docker_client, Configuration
from gtmcore.logging import LMLogger
from gtmcore.inventory.inventory import InventoryManager, InventoryException
from gtmcore.container.utils import infer_docker_image_name, ps_search
from gtmcore.container.exceptions import ContainerBuildException
from gtmcore.dataset.cache import get_cache_manager_class
from gtmcore.container.cuda import should_launch_with_cuda_support
logger = LMLogger.get_logger()
def get_labmanager_ip() -> Optional[str]:
"""Method to get the monitored lab book container's IP address on the Docker bridge network
Returns:
str of IP address
"""
client = get_docker_client()
container = [c for c in client.containers.list()
if 'gigantum.labmanager' in c.name
and 'gmlb-' not in c.name][0]
ip = container.attrs['NetworkSettings']['Networks']['bridge']['IPAddress']
logger.info("container {} IP: {}".format(container.name, ip))
return ip
def get_container_ip(lb_key: str) -> str:
"""Return the IP address of the given labbook container"""
client = get_docker_client()
container = client.containers.get(lb_key)
return container.attrs['NetworkSettings']['Networks']['bridge']['IPAddress']
def _get_cached_image(env_dir: str, image_name: str) -> Optional[str]:
"""
Get Docker image id for the given environment specification (if it exsits).
This helps to determine if we can avoid having to rebuild the Docker image
by hashing the environemnt specification and determine if it changed. Any
change in content or version will cause the checksum to be different,
necessitating a rebuild. If there's no change, however, we can avoid potentially
costly rebuilds of the image.
Args:
env_dir: Environment directoryt for a LabBook
image_name: Name of the LabBook Docker image
Returns:
docker image id (Optional)
"""
# Determine if we need to rebuild by testing if the environment changed
cache_dir = '/mnt/gigantum/.labmanager/image-cache'
if not os.path.exists(cache_dir):
logger.info(f"Making environment cache at {cache_dir}")
os.makedirs(cache_dir, exist_ok=True)
env_cache_path = os.path.join(cache_dir, f"{image_name}.cache")
m = hashlib.sha256()
for root, dirs, files in os.walk(env_dir):
for f in [n for n in files if '.yaml' in n]:
m.update(os.path.join(root, f).encode())
m.update(open(os.path.join(root, f)).read().encode())
env_cksum = m.hexdigest()
if os.path.exists(env_cache_path):
old_env_cksum = open(env_cache_path).read()
else:
with open(env_cache_path, 'w') as cfile:
cfile.write(env_cksum)
return None
if env_cksum == old_env_cksum:
try:
i = get_docker_client().images.get(name=image_name)
return i.id
except docker.errors.ImageNotFound:
pass
else:
# Env checksum hash is outdated. Remove it.
os.remove(env_cache_path)
with open(env_cache_path, 'w') as cfile:
cfile.write(env_cksum)
return None
def _remove_docker_image(image_name: str) -> None:
try:
get_docker_client().images.get(name=image_name)
get_docker_client().images.remove(image_name)
except docker.errors.ImageNotFound:
logger.warning(f"Attempted to delete Docker image {image_name}, but not found")
def build_docker_image(root_dir: str, username: str, nocache: bool = False,
override_image_tag: Optional[str] = None,
feedback_callback: Optional[Callable] = None) -> str:
"""
Build a new docker image from the Dockerfile at the given directory, give this image
the name defined by the image_name argument.
Note! This method is static, it should **NOT** use any global variables or any other
reference to global state.
Also note - This will delete any existing image pertaining to the given labbook.
Thus if this call fails, there will be no docker images pertaining to that labbook.
Args:
root_dir: LabBook root directory (obtained by LabBook.root_dir)
override_image_tag: Tag of docker image; in general this should not be explicitly set.
username: Username of active user.
nocache: If True do not use docker cache.
feedback_callback: Optional method taking one argument (a string) to process each line of output
Returns:
A string container the short docker id of the newly built image.
Raises:
ContainerBuildException if container build fails.
"""
if not os.path.exists(root_dir):
raise ValueError(f'Expected env directory `{root_dir}` does not exist.')
env_dir = os.path.join(root_dir, '.gigantum', 'env')
lb = InventoryManager().load_labbook_from_directory(root_dir)
# Build image
owner = InventoryManager().query_owner(lb)
image_name = override_image_tag or infer_docker_image_name(labbook_name=lb.name,
owner=owner,
username=username)
reuse_image_id = _get_cached_image(env_dir, image_name)
if reuse_image_id:
logger.info(f"Reusing Docker image for {str(lb)}")
if feedback_callback:
feedback_callback(f"Using cached image {reuse_image_id}")
return reuse_image_id
try:
image_id = None
# From: https://docker-py.readthedocs.io/en/stable/api.html#docker.api.build.BuildApiMixin.build
# This builds the image and generates output status text.
for line in docker.from_env().api.build(path=env_dir,
tag=image_name,
pull=True,
nocache=nocache,
forcerm=True):
ldict = json.loads(line)
stream = (ldict.get("stream") or "").strip()
if feedback_callback:
feedback_callback(stream)
status = (ldict.get("status") or "").strip()
if feedback_callback:
feedback_callback(status)
if 'Successfully built'.lower() in stream.lower():
# When build, final line is in form of "Successfully build 02faas3"
# There is no other (simple) way to grab the image ID
image_id = stream.split(' ')[-1]
except docker.errors.BuildError as e:
_remove_docker_image(image_name)
raise ContainerBuildException(e)
if not image_id:
_remove_docker_image(image_name)
raise ContainerBuildException(f"Cannot determine docker image on LabBook from {root_dir}")
return image_id
def start_labbook_container(labbook_root: str, config_path: str, username: str,
override_image_id: Optional[str] = None) -> str:
""" Start a Docker container from a given image_name.
Args:
labbook_root: Root dir of labbook
config_path: Path to LabBook configuration file.
override_image_id: Optional explicit docker image id (do not infer).
username: Username of active user. Do not use with override_image_id.
Returns:
Tuple containing docker container id, dict mapping of exposed ports.
Raises:
"""
if username and override_image_id:
raise ValueError('Argument username and override_image_id cannot both be set')
lb = InventoryManager(config_file=config_path).load_labbook_from_directory(labbook_root)
if not override_image_id:
owner = InventoryManager().query_owner(lb)
tag = infer_docker_image_name(lb.name, owner, username)
else:
tag = override_image_id
mnt_point = labbook_root.replace('/mnt/gigantum', os.environ['HOST_WORK_DIR'])
volumes_dict = {
mnt_point: {'bind': '/mnt/labbook', 'mode': 'cached'},
'labmanager_share_vol': {'bind': '/mnt/share', 'mode': 'rw'}
}
# Set up additional bind mounts for datasets if needed.
submodules = lb.git.list_submodules()
for submodule in submodules:
try:
namespace, dataset_name = submodule['name'].split("&")
submodule_dir = os.path.join(lb.root_dir, '.gigantum', 'datasets', namespace, dataset_name)
ds = InventoryManager().load_dataset_from_directory(submodule_dir)
ds.namespace = namespace
cm_class = get_cache_manager_class(ds.client_config)
cm = cm_class(ds, username)
ds_cache_dir = cm.current_revision_dir.replace('/mnt/gigantum', os.environ['HOST_WORK_DIR'])
volumes_dict[ds_cache_dir] = {'bind': f'/mnt/labbook/input/{ds.name}', 'mode': 'ro'}
except InventoryException:
continue
# If re-mapping permissions, be sure to configure the container
if 'LOCAL_USER_ID' in os.environ:
env_var = [f"LOCAL_USER_ID={os.environ['LOCAL_USER_ID']}"]
else:
env_var = ["WINDOWS_HOST=1"]
# Get resource limits
resource_args = dict()
memory_limit = lb.client_config.config['container']['memory']
cpu_limit = lb.client_config.config['container']['cpu']
gpu_shared_mem = lb.client_config.config['container']['gpu_shared_mem']
if memory_limit:
# If memory_limit not None, pass to Docker to limit memory allocation to container
resource_args["mem_limit"] = memory_limit
if cpu_limit:
# If cpu_limit not None, pass to Docker to limit CPU allocation to container
# "nano_cpus" is an integer in factional parts of a CPU
resource_args["nano_cpus"] = round(cpu_limit * 1e9)
docker_client = get_docker_client()
# run with nvidia-docker if we have GPU support on the Host compatible with the project
should_run_nvidia, reason = should_launch_with_cuda_support(lb.cuda_version)
if should_run_nvidia:
logger.info(f"Launching container with GPU support:{reason}")
if gpu_shared_mem:
resource_args["shm_size"] = gpu_shared_mem
container_id = docker_client.containers.run(tag, detach=True, init=True, name=tag,
environment=env_var, volumes=volumes_dict,
runtime='nvidia', **resource_args).id
else:
logger.info(f"Launching container without GPU support. {reason}")
container_id = docker_client.containers.run(tag, detach=True, init=True, name=tag,
environment=env_var, volumes=volumes_dict,
**resource_args).id
labmanager_ip = ""
try:
labmanager_ip = get_labmanager_ip() or ""
except IndexError:
logger.warning("Cannot find labmanager IP")
labmanager_ip = labmanager_ip.strip()
cmd = f"echo {labmanager_ip} > /home/giguser/labmanager_ip"
for timeout in range(20):
time.sleep(0.5)
if docker_client.containers.get(container_id).status == 'running':
r = docker_client.containers.get(container_id).exec_run(f'sh -c "{cmd}"')
logger.info(f"Response to write labmanager_ip in {tag}: {r}")
break
else:
logger.error("After 10 seconds could not write IP to labmanager container."
f" Container status = {docker_client.containers.get(container_id).status}")
return container_id
def stop_labbook_container(container_id: str) -> bool:
""" Stop a running docker container.
Args:
container_id: ID of container to stop.
Returns
True if stopped, False if it was never running.
"""
try:
client = get_docker_client()
build_container = client.containers.get(container_id)
build_container.stop(timeout=10)
build_container.remove()
return True
except docker.errors.NotFound:
# No container to stop, but no reason to throw an exception
return False
```
#### File: gtmcore/dataset/dataset.py
```python
import os
import re
import yaml
import datetime
import json
from typing import (Dict, Optional)
from gtmcore.gitlib import GitAuthor
from gtmcore.dataset.schemas import validate_dataset_schema
from gtmcore.activity import ActivityStore, ActivityRecord, ActivityDetailType, ActivityType,\
ActivityAction, ActivityDetailRecord
from gtmcore.dataset.storage import get_storage_backend, StorageBackend
from gtmcore.inventory.repository import Repository
class Dataset(Repository):
"""Class representing a single LabBook"""
_default_activity_type = ActivityType.DATASET
_default_activity_detail_type = ActivityDetailType.DATASET
_default_activity_section = "Dataset Root"
def __init__(self, config_file: Optional[str] = None, namespace: Optional[str] = None,
author: Optional[GitAuthor] = None) -> None:
super().__init__(config_file, author)
# TODO - Need a more formalizes solution for differentiating Datasets from other repo types
self.client_config.config['git']['lfs_enabled'] = False
self.namespace = namespace
self._backend: Optional[StorageBackend] = None
def __str__(self):
if self._root_dir:
return f'<Dataset at `{self._root_dir}`>'
else:
return f'<Dataset UNINITIALIZED>'
def __eq__(self, other):
return isinstance(other, Dataset) and other.root_dir == self.root_dir
@property
def id(self) -> str:
if self._data:
return self._data["id"]
else:
raise ValueError("No ID assigned to the Dataset.")
@property
def name(self) -> str:
if self._data:
return self._data["name"]
else:
raise ValueError("No name assigned to the Dataset.")
@name.setter
def name(self, value: str) -> None:
if not value:
raise ValueError("value cannot be None or empty")
if not self._data:
self._data = {'name': value}
else:
self._data["name"] = value
self._validate_gigantum_data()
# Update data file
self._save_gigantum_data()
# Rename directory
if self._root_dir:
base_dir, _ = self._root_dir.rsplit(os.path.sep, 1)
os.rename(self._root_dir, os.path.join(base_dir, value))
else:
raise ValueError("Dataset root dir not specified. Failed to configure git.")
# Update the root directory to the new directory name
self._set_root_dir(os.path.join(base_dir, value))
@property
def creation_date(self) -> Optional[datetime.datetime]:
"""Get creation date from the gigantum.yaml file"""
if self._data:
created_at = self._data["created_on"]
d = datetime.datetime.strptime(created_at, '%Y-%m-%dT%H:%M:%S.%f')
d = d.replace(tzinfo=datetime.timezone.utc) # Make tz aware so rendering in API is consistent
d = d.replace(microsecond=0) # Make all times consistent
return d
else:
raise ValueError("No creation date set.")
@property
def build_details(self) -> str:
return self._data["build_info"]
@property
def description(self) -> str:
if self._data:
return self._data["description"]
else:
raise ValueError("No description assigned to this Dataset.")
@description.setter
def description(self, value) -> None:
if not self._data:
self._data = {'description': value}
else:
self._data["description"] = value
self._save_gigantum_data()
@property
def storage_type(self) -> str:
if self._data:
return self._data["storage_type"]
else:
raise ValueError("No storage type assigned to this Dataset.")
@storage_type.setter
def storage_type(self, value) -> None:
if not self._data:
self._data = {'storage_type': value}
else:
self._data["storage_type"] = value
self._save_gigantum_data()
def is_managed(self) -> bool:
"""Property to check if the dataset is managed"""
is_managed = self.backend.metadata.get("is_managed")
return is_managed if is_managed is not None else False
@property
def backend(self) -> StorageBackend:
"""Property to access the storage backend for this dataset"""
if not self._backend:
self._backend = get_storage_backend(self.storage_type)
self._backend.configuration = self.backend_config
return self._backend
@property
def backend_config(self) -> dict:
"""Property to load the storage.json file"""
config_file = os.path.join(self.root_dir, ".gigantum", "backend.json")
if os.path.exists(config_file):
with open(os.path.join(self.root_dir, ".gigantum", "backend.json"), 'rt') as sf:
data = json.load(sf)
else:
data = dict()
return data
@backend_config.setter
def backend_config(self, data: dict) -> None:
"""Save storage config data"""
if self._backend:
self._backend.configuration = data
# Remove defaults set at runtime that shouldn't be persisted
if "username" in data:
del data["username"]
if "gigantum_bearer_token" in data:
del data["gigantum_bearer_token"]
if "gigantum_id_token" in data:
del data["gigantum_id_token"]
config_file = os.path.join(self.root_dir, ".gigantum", "backend.json")
with open(config_file, 'wt') as sf:
json.dump(data, sf, indent=2)
self.git.add(config_file)
cm = self.git.commit("Updating backend config")
ar = ActivityRecord(ActivityType.DATASET,
message="Updated Dataset storage backend configuration",
show=True,
importance=255,
linked_commit=cm.hexsha,
tags=['config'])
adr = ActivityDetailRecord(ActivityDetailType.DATASET, show=False, importance=255,
action=ActivityAction.EDIT)
d = json.dumps(data, indent=2)
adr.add_value('text/markdown', f"Updated dataset storage backend configuration:\n\n ```{d}```")
ar.add_detail_object(adr)
ars = ActivityStore(self)
ars.create_activity_record(ar)
def _save_gigantum_data(self) -> None:
"""Method to save changes to the LabBook
Returns:
None
"""
if not self.root_dir:
raise ValueError("No root directory assigned to Dataset. Failed to get root directory.")
with open(os.path.join(self.root_dir, ".gigantum", "gigantum.yaml"), 'wt') as df:
df.write(yaml.safe_dump(self._data, default_flow_style=False))
df.flush()
def _load_gigantum_data(self) -> None:
"""Method to load the dataset YAML file to a dictionary
Returns:
None
"""
if not self.root_dir:
raise ValueError("No root directory assigned to Dataset. Failed to get root directory.")
with open(os.path.join(self.root_dir, ".gigantum", "gigantum.yaml"), 'rt') as df:
self._data = yaml.safe_load(df)
def _validate_gigantum_data(self) -> None:
"""Method to validate the Dataset data file contents
Returns:
None
"""
if not re.match("^(?!-)(?!.*--)[a-z0-9-]+(?<!-)$", self.name):
raise ValueError("Invalid `name`. Only a-z 0-9 and hyphens allowed. No leading or trailing hyphens.")
if len(self.name) > 100:
raise ValueError("Invalid `name`. Max length is 100 characters")
# Validate schema is supported by running version of the software and valid
if not validate_dataset_schema(self.schema, self.data):
import json
errmsg = f"Schema in Dataset {str(self)} does not match indicated version {self.schema}"
raise ValueError(errmsg)
```
#### File: dataset/io/manager.py
```python
import os
from typing import List, Callable
import subprocess
import glob
from natsort import natsorted
from operator import attrgetter
from gtmcore.dataset.dataset import Dataset
from gtmcore.dataset.manifest import Manifest
from gtmcore.dataset.io import PushObject, PushResult, PullResult, PullObject
from gtmcore.logging import LMLogger
logger = LMLogger.get_logger()
class IOManager(object):
"""Class to manage file IO with remote storage backends and glue everything together"""
def __init__(self, dataset: Dataset, manifest: Manifest) -> None:
self.dataset = dataset
self.manifest = manifest
self.push_dir = os.path.join(self.manifest.cache_mgr.cache_root, 'objects', '.push')
# Property to keep status state if needed when appending messages
self._status_msg = ""
def _commit_in_branch(self, commit_hash: str) -> bool:
"""Method to check if a commit is in the current branch, ignoring the last commit.
This is used for the purpose of only pushing objects that are part of the current branch. We ignore the last
commit because objects to push are stored in a file named with the revision at which the files were written.
This is different from the revision that contains the files (after written and untracked, changes are
committed and then an activity record is created with another commit). The last commit can be used in a
different branch where objects were written, but can't contain any objects to push in the current branch.
Args:
commit_hash(str): Commit hash to check if in branch
Returns:
bool
"""
try:
subprocess.run(['git', 'merge-base', '--is-ancestor', commit_hash, 'HEAD~1'], check=True,
cwd=self.dataset.root_dir)
return True
except subprocess.CalledProcessError:
return False
def objects_to_push(self, remove_duplicates: bool = False) -> List[PushObject]:
"""Return a list of named tuples of all objects that need to be pushed
Returns:
List[namedtuple]
"""
objects = list()
if os.path.exists(self.push_dir):
push_files = [f for f in os.listdir(self.push_dir) if os.path.isfile(os.path.join(self.push_dir, f))]
if push_files:
object_ids: List[str] = list()
for pf in push_files:
if os.path.basename(pf) == '.DS_Store':
continue
if not self._commit_in_branch(pf):
continue
with open(os.path.join(self.push_dir, pf), 'rt') as pfh:
lines = pfh.readlines()
lines = sorted(lines)
for line in lines:
line = line.strip()
dataset_path, object_path = line.split(',')
_, object_id = object_path.rsplit('/', 1)
# Handle de-duplicating objects if the backend supports it
if remove_duplicates is True:
if object_id in object_ids:
continue
object_ids.append(object_id)
objects.append(PushObject(dataset_path=dataset_path, object_path=object_path, revision=pf))
objects = natsorted(objects, key=attrgetter('dataset_path'))
return objects
def num_objects_to_push(self, remove_duplicates: bool = False) -> int:
"""Helper to get the total number of objects to push
Returns:
int
"""
return len(self.objects_to_push(remove_duplicates))
def _log_updater(self, message: str, append: bool = False) -> None:
"""A status updater method that simply logs. Used by default if a custom status updater isn't provided in some
methods that are expected to run in the background.
Args:
message(str): Message to update
Returns:
None
"""
if append:
self._status_msg = f"{self._status_msg}\n{message}"
else:
self._status_msg = message
logger.info(self._status_msg)
def push_objects(self, status_update_fn: Callable = None) -> PushResult:
"""Method to push all available objects
This method hands most of the work over to the StorageBackend implementation for the dataset. It is expected
that the StorageBackend will return a PushResult named tuple so the user can be properly notified and
everything stays consistent.
Returns:
IOResult
"""
if not status_update_fn:
status_update_fn = self._log_updater
objs: List[PushObject] = self.objects_to_push(remove_duplicates=self.dataset.backend.client_should_dedup_on_push)
try:
self.dataset.backend.prepare_push(self.dataset, objs, status_update_fn)
result = self.dataset.backend.push_objects(self.dataset, objs, status_update_fn)
logger.warning(result)
self.dataset.backend.finalize_push(self.dataset, status_update_fn)
except Exception as err:
logger.exception(err)
raise
# Remove push files that have been processed
files = glob.glob(f'{self.push_dir}/*')
for f in files:
os.remove(f)
# Collect objects that still need to be pushed due to errors and write push files
for failed_push in result.failure:
self.manifest.queue_to_push(failed_push.object_path, failed_push.dataset_path, failed_push.revision)
return result
def _gen_pull_objects(self, keys: List[str]) -> List[PullObject]:
"""
Args:
keys:
Returns:
"""
result = list()
revision = self.manifest.dataset_revision
for key in keys:
data = self.manifest.dataset_to_object_path(key)
result.append(PullObject(object_path=data, revision=revision, dataset_path=key))
return result
def pull_objects(self, keys: List[str], status_update_fn: Callable = None) -> PullResult:
"""Method to pull a single object
This method hands most of the work over to the StorageBackend implementation for the dataset. It is expected
that the StorageBackend will return a PushResult named tuple so the user can be properly notified and
everything stays consistent.
Returns:
PullResult
"""
if not status_update_fn:
status_update_fn = self._log_updater
objs: List[PullObject] = self._gen_pull_objects(keys)
# Pull the object
self.dataset.backend.prepare_pull(self.dataset, objs, status_update_fn)
result = self.dataset.backend.pull_objects(self.dataset, objs, status_update_fn)
self.dataset.backend.finalize_pull(self.dataset, status_update_fn)
# Relink the revision
self.manifest.link_revision()
# Return pull result
return result
def pull_all(self, status_update_fn: Callable = None) -> PullResult:
"""Helper to pull every object in the dataset, ignoring files that already exist and linking files if needed
Args:
status_update_fn: Callable to provide status during pull
Returns:
PullResult
"""
keys_to_pull = list()
for key in self.manifest.manifest:
# If dir, skip
if key[-1] == os.path.sep:
continue
# If object is linked to the revision already, skip
revision_path = os.path.join(self.manifest.cache_mgr.current_revision_dir, key)
if os.path.exists(revision_path):
continue
# Check if file exists in object cache and simply needs to be linked
obj_path = self.manifest.dataset_to_object_path(key)
if os.path.isfile(obj_path):
os.link(obj_path, revision_path)
continue
# Queue for downloading
keys_to_pull.append(key)
if keys_to_pull:
return self.pull_objects(keys_to_pull, status_update_fn)
else:
return PullResult(success=[], failure=[], message="Dataset already downloaded.")
```
#### File: dataset/tests/test_hash.py
```python
import pytest
import os
import time
from pathlib import Path
from hashlib import blake2b
from gtmcore.dataset.manifest.hash import SmartHash
from gtmcore.fixtures.datasets import mock_dataset_with_cache_dir, mock_dataset_with_manifest
def helper_append_file(cache_dir, revision, rel_path, content):
with open(os.path.join(cache_dir, revision, rel_path), 'at') as fh:
fh.write(content)
class TestHashing(object):
def test_init(self, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
assert sh.fast_hash_data == {}
@pytest.mark.asyncio
async def test_hash(self, event_loop, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
assert sh.fast_hash_data == {}
filename = "test1.txt"
helper_append_file(cache_dir, revision, filename, "pupper")
assert sh.fast_hash_data == {}
assert sh.is_cached(filename) is False
assert os.path.exists(os.path.join(cache_dir, revision, ".smarthash")) is False
hash_result = await sh.hash([filename])
hash_result = hash_result[0]
assert len(hash_result) == 128
@pytest.mark.asyncio
async def test_hash_same_as_nonchunked(self, event_loop, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
filename = "test1.txt"
helper_append_file(cache_dir, revision, filename, "asdfdsfgkdfshuhwedfgft345wfd" * 100000)
assert sh.fast_hash_data == {}
assert sh.is_cached(filename) is False
assert os.path.exists(os.path.join(cache_dir, revision, ".smarthash")) is False
hash_result = await sh.hash([filename])
hash_result = hash_result[0]
h = blake2b()
with open(sh.get_abs_path(filename), 'rb') as fh:
h.update(fh.read())
assert hash_result == h.hexdigest()
@pytest.mark.asyncio
async def test_hash_same_as_nonchunked_multiple(self, event_loop, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
filename1 = "test1.txt"
helper_append_file(cache_dir, revision, filename1, "asdfdsfgkdfshuhwedfgft345wfd" * 100000)
assert sh.is_cached(filename1) is False
filename2 = "test2.txt"
helper_append_file(cache_dir, revision, filename2, "gfggfgfgfgwee" * 100000)
assert sh.is_cached(filename2) is False
assert os.path.exists(os.path.join(cache_dir, revision, ".smarthash")) is False
assert sh.fast_hash_data == {}
h = blake2b()
with open(sh.get_abs_path(filename1), 'rb') as fh:
h.update(fh.read())
hash1 = h.hexdigest()
h = blake2b()
with open(sh.get_abs_path(filename2), 'rb') as fh:
h.update(fh.read())
hash2 = h.hexdigest()
hash_result = await sh.hash([filename1, filename2])
assert hash1 == hash_result[0]
assert hash2 == hash_result[1]
hash_result = await sh.hash([filename2, filename1])
assert hash2 == hash_result[0]
assert hash1 == hash_result[1]
@pytest.mark.asyncio
async def test_hash_list(self, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
os.makedirs(os.path.join(cache_dir, revision, "test_dir"))
filenames = ["test1.txt", "test2.txt", "test3.txt", "test_dir/nested.txt"]
for f in filenames:
helper_append_file(cache_dir, revision, f, "sdfadfgfdgh")
filenames.append('test_dir/') # Append the directory, since dirs can be stored in the manifest
hash_results = await sh.hash(filenames)
assert len(hash_results) == 5
@pytest.mark.asyncio
async def test_hash_big(self, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
os.makedirs(os.path.join(cache_dir, revision, "test_dir"))
helper_append_file(cache_dir, revision, 'test1.txt', "asdf " * 100000000)
helper_append_file(cache_dir, revision, 'test2.txt', "hgfd " * 100000000)
helper_append_file(cache_dir, revision, 'test3.txt', "jjhf " * 10000000)
helper_append_file(cache_dir, revision, 'test4.txt', "jjhf " * 10000000)
filenames = ['test1.txt', 'test2.txt', 'test3.txt', 'test4.txt']
hash_results = await sh.hash(filenames)
assert len(hash_results) == 4
for hr in hash_results:
assert len(hr) == 128
assert hash_results[0] != hash_results[1]
assert hash_results[0] != hash_results[2]
assert hash_results[0] != hash_results[3]
assert hash_results[1] != hash_results[2]
assert hash_results[1] != hash_results[3]
assert hash_results[2] == hash_results[3]
def test_fast_hash_save(self, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
assert sh.fast_hash_data == {}
assert os.path.exists(os.path.join(cache_dir, revision, ".smarthash")) is False
filename = "test1.txt"
helper_append_file(cache_dir, revision, filename, "pupper")
hash_result1 = sh.fast_hash([filename], save=False)
assert sh.fast_hash_data == {}
assert os.path.exists(os.path.join(cache_dir, revision, ".smarthash")) is False
hash_result2 = sh.fast_hash([filename])
assert hash_result1 == hash_result2
assert filename in sh.fast_hash_data
assert os.path.exists(os.path.join(cache_dir, revision, ".smarthash")) is True
def test_has_changed_fast(self, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
assert sh.fast_hash_data == {}
assert os.path.exists(os.path.join(cache_dir, revision, ".smarthash")) is False
filename = "test1.txt"
helper_append_file(cache_dir, revision, filename, "pupper")
assert sh.is_cached(filename) is False
hash_result = sh.fast_hash([filename])
hash_result = hash_result[0]
fname, fsize, mtime = hash_result.split("||")
assert fname == "test1.txt"
assert fsize == '6'
assert sh.fast_hash_data is not None
assert os.path.exists(os.path.join(cache_dir, revision, ".smarthash")) is True
assert sh.is_cached(filename) is True
assert sh.has_changed_fast(filename) is False
time.sleep(1.1)
assert sh.has_changed_fast(filename) is False
# Change file
helper_append_file(cache_dir, revision, filename, "jgfdjfdgsjfdgsj")
assert sh.has_changed_fast(filename) is True
assert sh.has_changed_fast(filename) is True
sh.fast_hash([filename])
assert sh.has_changed_fast(filename) is False
# Touch file, so only change mtime
time.sleep(1.1)
Path(sh.get_abs_path(filename)).touch()
assert sh.has_changed_fast(filename) is True
sh.fast_hash([filename])
assert sh.has_changed_fast(filename) is False
def test_has_changed_fast_from_loaded(self, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
assert sh.fast_hash_data == {}
filename = "test1.txt"
helper_append_file(cache_dir, revision, filename, "pupper")
hash_result = sh.fast_hash([filename])
hash_result = hash_result[0]
fname, fsize, mtime = hash_result.split("||")
assert fname == "test1.txt"
assert fsize == '6'
assert sh.fast_hash_data is not None
assert os.path.exists(os.path.join(cache_dir, revision, ".smarthash")) is True
assert sh.is_cached(filename) is True
assert sh.has_changed_fast(filename) is False
sh2 = SmartHash(ds.root_dir, cache_dir, revision)
assert sh2.fast_hash_data is not None
assert sh2.is_cached(filename) is True
assert sh2.has_changed_fast(filename) is False
assert sh2.fast_hash_data[filename] == hash_result
def test_fast_hash_list(self, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
os.makedirs(os.path.join(cache_dir, revision, "test_dir"))
filenames = ["test1.txt", "test2.txt", "test3.txt", "test_dir/nested.txt"]
for f in filenames:
helper_append_file(cache_dir, revision, f, "sdfadfgfdgh")
filenames.append('test_dir/') # Append the directory, since dirs can be stored in the manifest
hash_results = sh.fast_hash(filenames)
assert len(hash_results) == 5
for fname, result in zip(filenames, hash_results):
if fname == 'test_dir/':
assert len(result.split("||")) == 3
path, fsize, _ = result.split("||")
assert path == fname
assert fsize == '4096'
else:
assert len(result.split("||")) == 3
path, fsize, _ = result.split("||")
assert path == fname
assert fsize == '11'
def test_fast_hash_big(self, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
helper_append_file(cache_dir, revision, 'test1.txt', "asdf " * 100000000)
helper_append_file(cache_dir, revision, 'test2.txt', "hgfd " * 100000000)
helper_append_file(cache_dir, revision, 'test3.txt', "jjh " * 10000000)
helper_append_file(cache_dir, revision, 'test4.txt', "jjh " * 10000000)
filenames = ['test1.txt', 'test2.txt', 'test3.txt', 'test4.txt']
hash_results = sh.fast_hash(filenames)
fname, fsize, mtime = hash_results[0].split("||")
assert 'test1.txt' == fname
assert fsize == "500000000"
fname, fsize, mtime = hash_results[1].split("||")
assert 'test2.txt' in fname
assert fsize == "500000000"
fname, fsize, mtime = hash_results[2].split("||")
assert 'test3.txt' in fname
assert fsize == "40000000"
fname, fsize, mtime = hash_results[3].split("||")
assert 'test4.txt' in fname
assert fsize == "40000000"
assert hash_results[2] != hash_results[3]
def test_get_deleted_files(self, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
os.makedirs(os.path.join(cache_dir, revision, "test_dir"))
filenames = ["test1.txt", "test2.txt", "test3.txt", "test_dir/nested.txt"]
for f in filenames:
helper_append_file(cache_dir, revision, f, "sdfadfgfdgh")
hash_results = sh.fast_hash(filenames)
assert len(hash_results) == 4
assert len(sh.get_deleted_files(filenames)) == 0
test_new_filenames = ["test1.txt", "test_dir/nested.txt"]
deleted = sh.get_deleted_files(test_new_filenames)
assert len(deleted) == 2
assert deleted[0] == "test2.txt"
assert deleted[1] == "test3.txt"
```
#### File: dispatcher/tests/test_dispatcher.py
```python
import getpass
import threading
import json
import time
import shutil
import pytest
import datetime
import pprint
import multiprocessing
import tempfile
import uuid
import os
import rq_scheduler
import rq
from gtmcore.imagebuilder import ImageBuilder
from gtmcore.configuration import get_docker_client
from gtmcore.environment import ComponentManager, RepositoryManager
from gtmcore.fixtures import mock_config_file
import gtmcore.fixtures
from gtmcore.dispatcher import Dispatcher
from gtmcore.labbook import LabBook
from gtmcore.inventory.inventory import InventoryManager
import gtmcore.dispatcher.jobs as bg_jobs
@pytest.fixture()
def temporary_worker():
"""A pytest fixture that creates a temporary directory and a config file to match. Deletes directory after test"""
def run_worker():
with rq.Connection():
qs = 'labmanager_unittests'
w = rq.Worker(qs)
w.work()
# This task is used to kill the worker. Sometimes if tests fail the worker runs forever and
# holds up the entire process. This gives each test 25 seconds to run before killing the worker
# and forcing the test to fail.
def watch_proc(p):
count = 0
while count < 15:
count = count + 1
time.sleep(1)
try:
p.terminate()
except:
pass
worker_proc = multiprocessing.Process(target=run_worker)
worker_proc.start()
watchdog_thread = threading.Thread(target=watch_proc, args=(worker_proc,))
watchdog_thread.start()
dispatcher = Dispatcher('labmanager_unittests')
yield worker_proc, dispatcher
worker_proc.terminate()
class TestDispatcher(object):
def test_unallowed_task_not_run(self, temporary_worker):
w, d = temporary_worker
def oh_no(cats, dogs, bees):
raise RuntimeError("This should never happen!")
try:
# Only allowed tasks may be dispatched.
d.dispatch_task(oh_no, args=('x', 1, None))
except ValueError as e:
assert 'not in available' in str(e), "Attempt should result in ValueError"
else:
assert False, "Method not in registry should not have been allowed to run"
w.terminate()
def test_simple_task(self, temporary_worker):
w, d = temporary_worker
job_ref = d.dispatch_task(bg_jobs.test_exit_success)
time.sleep(1)
try:
res = d.query_task(job_ref)
assert res
assert res.status == 'finished'
assert res.result == 0
assert res.failure_message is None
assert res.finished_at is not None
finally:
w.terminate()
def test_failing_task(self, temporary_worker):
w, d = temporary_worker
job_ref = d.dispatch_task(bg_jobs.test_exit_fail)
time.sleep(1)
res = d.query_task(job_ref)
assert res
assert res.status == 'failed'
assert res.failure_message == 'Exception: Intentional Exception from job `test_exit_fail`'
w.terminate()
def test_query_failed_tasks(self, temporary_worker):
w, d = temporary_worker
job_ref = d.dispatch_task(bg_jobs.test_exit_fail)
time.sleep(1)
assert job_ref in [j.job_key for j in d.failed_jobs]
assert job_ref not in [j.job_key for j in d.finished_jobs]
t = d.query_task(job_ref)
t.failure_message == 'Exception: Intentional Exception from job `test_exit_fail`'
w.terminate()
def test_query_complete_tasks(self, temporary_worker):
w, d = temporary_worker
job_ref = d.dispatch_task(bg_jobs.test_exit_success)
time.sleep(1)
assert job_ref in [j.job_key for j in d.finished_jobs]
assert job_ref not in [j.job_key for j in d.failed_jobs]
def test_abort(self, temporary_worker):
w, d = temporary_worker
job_ref_1 = d.dispatch_task(bg_jobs.test_sleep, args=(3,))
time.sleep(1.2)
assert d.query_task(job_ref_1).status == 'started'
d.abort_task(job_ref_1)
time.sleep(0.1)
j = d.query_task(job_ref_1)
# There should be no result, cause it was cancelled
assert j.result is None
# RQ should identify the task as failed
assert j.status == "failed"
# Now assert the worker pid is still alive (so it can be assigned something else)
worker_pid = w.pid
try:
os.kill(int(worker_pid), 0)
assert True, "Worker process is still hanging around."
except OSError:
assert False, "Worker process is killed"
def test_simple_dependent_job(self, temporary_worker):
w, d = temporary_worker
job_ref_1 = d.dispatch_task(bg_jobs.test_sleep, args=(2,))
job_ref_2 = d.dispatch_task(bg_jobs.test_exit_success, dependent_job=job_ref_1)
time.sleep(0.5)
assert d.query_task(job_ref_2).status == 'deferred'
time.sleep(3)
assert d.query_task(job_ref_1).status == 'finished'
assert d.query_task(job_ref_2).status == 'finished'
n = d.query_task(job_ref_1)
assert n.meta.get('sample') == 'test_sleep metadata'
def test_fail_dependent_job(self, temporary_worker):
w, d = temporary_worker
job_ref_1 = d.dispatch_task(bg_jobs.test_exit_fail)
job_ref_2 = d.dispatch_task(bg_jobs.test_exit_success, dependent_job=job_ref_1)
time.sleep(3)
assert d.query_task(job_ref_1).status == 'failed'
assert d.query_task(job_ref_2).status == 'deferred'
def test_simple_scheduler(self, temporary_worker, mock_config_file):
# Run a simple tasks that increments the integer contained in a file.
w, d = temporary_worker
path = "/tmp/labmanager-unit-test-{}".format(os.getpid())
if os.path.exists(path):
os.remove(path)
d.schedule_task(bg_jobs.test_incr, args=(path,), repeat=3, interval=2)
time.sleep(8)
try:
with open(path) as fp:
assert json.load(fp)['amt'] == 3
except Exception as e:
raise e
finally:
pass
def test_run_only_once(self, temporary_worker, mock_config_file):
# Assert that this method only gets called once.
w, d = temporary_worker
path = "/tmp/labmanager-unit-test-{}".format(os.getpid())
if os.path.exists(path):
os.remove(path)
future_t = datetime.datetime.utcnow() + datetime.timedelta(seconds=1)
jr = d.schedule_task(bg_jobs.test_incr, scheduled_time=future_t, args=(path,), repeat=0)
time.sleep(4)
try:
with open(path) as fp:
assert json.load(fp)['amt'] == 1
except Exception as e:
raise e
finally:
w.terminate()
pass
def test_schedule_with_repeat_is_zero(self, temporary_worker, mock_config_file):
# When repeat is zero, it should run only once.
w, d = temporary_worker
path = "/tmp/labmanager-unit-test-{}".format(os.getpid())
if os.path.exists(path):
os.remove(path)
try:
jr = d.schedule_task(bg_jobs.test_incr, args=(path,), repeat=0, interval=4)
time.sleep(6)
n = d.unschedule_task(jr)
time.sleep(5)
with open(path) as fp:
assert json.load(fp)['amt'] in [1], "When repeat=0, the task should run only once."
finally:
w.terminate()
def test_unschedule_task(self, temporary_worker, mock_config_file):
w, d = temporary_worker
path = "/tmp/labmanager-unit-test-{}".format(os.getpid())
if os.path.exists(path):
os.remove(path)
try:
future_t = datetime.datetime.utcnow() + datetime.timedelta(seconds=5)
jr = d.schedule_task(bg_jobs.test_incr, scheduled_time=future_t, args=(path,), repeat=4, interval=1)
time.sleep(2)
n = d.unschedule_task(jr)
assert n, "Task should have been cancelled, instead it was not found."
time.sleep(5)
assert not os.path.exists(path=path)
finally:
w.terminate()
def test_unschedule_midway_through(self, temporary_worker, mock_config_file):
w, d = temporary_worker
path = "/tmp/labmanager-unit-test-{}".format(os.getpid())
if os.path.exists(path):
os.remove(path)
try:
future_t = None # i.e., start right now.
jr = d.schedule_task(bg_jobs.test_incr, scheduled_time=future_t, args=(path,), repeat=6, interval=5)
time.sleep(8)
n = d.unschedule_task(jr)
assert n, "Task should have been cancelled, instead it was not found."
time.sleep(5)
with open(path) as fp:
assert json.load(fp)['amt'] in [2]
finally:
w.terminate()
```
#### File: gtmcore/dispatcher/worker.py
```python
import multiprocessing
import argparse
import os
from gtmcore.logging import LMLogger
from rq import Connection, Queue, Worker
logger = LMLogger.get_logger()
def start_rq_worker(queue_name: str) -> None:
try:
with Connection():
q = Queue(name=queue_name)
logger.info("Starting RQ worker for queue `{}` in pid {}".format(queue_name, os.getpid()))
Worker(q).work()
except Exception as e:
logger.exception("Worker in pid {} failed with exception {}".format(os.getpid(), e))
raise
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run any number of RQ workers')
parser.add_argument('count', type=int, help='Number of workers')
try:
args = parser.parse_args()
except Exception as e:
logger.exception(e)
raise
logger.info("Starting {} RQ workers...".format(args.count))
procs = []
for i in range(0, args.count):
p = multiprocessing.Process(target=start_rq_worker, args=('labmanager_jobs',))
p.start()
procs.append(p)
logger.info("Started {} worker processes".format(len(procs)))
for p in procs:
p.join()
```
#### File: gtmcore/environment/apt.py
```python
from typing import (List, Dict, Optional)
from gtmcore.environment.packagemanager import PackageManager, PackageResult
from gtmcore.container.container import ContainerOperations
from gtmcore.labbook import LabBook
from gtmcore.logging import LMLogger
logger = LMLogger.get_logger()
class AptPackageManager(PackageManager):
"""Class to implement the apt package manager
Note: apt is somewhat limiting in the ability to access old versions of packages
"""
def search(self, search_str: str, labbook: LabBook, username: str) -> List[str]:
"""Method to search a package manager for packages based on a string. The string can be a partial string.
Args:
search_str: The string to search on
labbook: Subject LabBook
username: username of current user
Returns:
list(str): The list of package names that match the search string
"""
result = ContainerOperations.run_command(f"apt-cache search {search_str}", labbook, username,
fallback_image=self.fallback_image(labbook))
packages = []
if result:
lines = result.decode('utf-8').split('\n')
for l in lines:
if l:
packages.append(l.split(" - ")[0])
return packages
def list_versions(self, package_name: str, labbook: LabBook, username: str) -> List[str]:
"""Method to list all available versions of a package based on the package name
Args:
package_name: Name of the package to query
labbook: Subject LabBook
username: Username of current user
Returns:
list(str): Version strings
"""
result = ContainerOperations.run_command(f"apt-cache madison {package_name}", labbook, username,
override_image_tag=self.fallback_image(labbook))
package_versions: List[str] = []
if result:
lines = result.decode('utf-8').split('\n')
for l in lines:
if l:
parts = l.split(" | ")
if parts[1] not in package_versions:
package_versions.append(parts[1].strip())
else:
raise ValueError(f"Package {package_name} not found in apt.")
return package_versions
def latest_version(self, package_name: str, labbook: LabBook, username: str) -> str:
"""Method to get the latest version string for a package
Args:
package_name: Name of the package to query
labbook: Subject LabBook
username: username of current user
Returns:
str: latest version string
"""
versions = self.list_versions(package_name, labbook, username)
if versions:
return versions[0]
else:
raise ValueError("Could not retrieve version list for provided package name")
def latest_versions(self, package_names: List[str], labbook: LabBook, username: str) -> List[str]:
"""Method to get the latest version string for a list of packages
Args:
package_names: list of names of the packages to query
labbook: Subject LabBook
username: username of current user
Returns:
list: latest version strings
"""
return [self.latest_version(pkg, labbook, username) for pkg in package_names]
def list_installed_packages(self, labbook: LabBook, username: str) -> List[Dict[str, str]]:
"""Method to get a list of all packages that are currently installed
Note, this will return results for the computer/container in which it is executed. To get the properties of
a LabBook container, a docker exec command would be needed from the Gigantum application container.
return format is a list of dicts with the format (name: <package name>, version: <version string>)
Returns:
list
"""
result = ContainerOperations.run_command("apt list --installed", labbook, username,
fallback_image=self.fallback_image(labbook))
packages = []
if result:
lines = result.decode('utf-8').split('\n')
for line in lines:
if line is not None and line != "Listing..." and "/" in line:
parts = line.split(" ")
package_name, _ = parts[0].split("/")
version = parts[1].strip()
packages.append({'name': package_name, 'version': version})
return packages
def list_available_updates(self, labbook: LabBook, username: str) -> List[Dict[str, str]]:
"""Method to get a list of all installed packages that could be updated and the new version string
Note, this will return results for the computer/container in which it is executed. To get the properties of
a LabBook container, a docker exec command would be needed from the Gigantum application container.
return format is a list of dicts with the format
{name: <package name>, version: <currently installed version string>, latest_version: <latest version string>}
Returns:
list
"""
result = ContainerOperations.run_command("apt list --upgradable", labbook, username,
fallback_image=self.fallback_image(labbook))
packages = []
if result:
lines = result.decode('utf-8').split('\n')
for line in lines:
if line is not None and line != "Listing..." and "/" in line:
package_name, version_info_t = line.split("/")
version_info = version_info_t.split(' ')
packages.append({'name': package_name, 'latest_version': version_info[1],
'version': version_info[5][:-1]})
return packages
def validate_packages(self, package_list: List[Dict[str, str]], labbook: LabBook, username: str) \
-> List[PackageResult]:
"""Method to validate a list of packages, and if needed fill in any missing versions
Should check both the provided package name and version. If the version is omitted, it should be generated
from the latest version.
Args:
package_list: A list of dictionaries of packages to validate
labbook: The labbook instance
username: The username for the logged in user
Returns:
namedtuple: namedtuple indicating if the package and version are valid
"""
result = list()
for package in package_list:
pkg_result = PackageResult(package=package['package'], version=package['version'], error=True)
try:
version_list = self.list_versions(package['package'], labbook, username)
except ValueError:
result.append(pkg_result)
continue
if not version_list:
# If here, no versions found for the package...so invalid
result.append(pkg_result)
else:
if package['version']:
if package['version'] in version_list:
# Both package name and version are valid
pkg_result = pkg_result._replace(error=False)
result.append(pkg_result)
else:
# The package version is not in the list, so invalid
result.append(pkg_result)
else:
# You need to look up the version and then add
try:
pkg_result = pkg_result._replace(version=self.latest_version(package['package'],
labbook,
username))
pkg_result = pkg_result._replace(error=False)
result.append(pkg_result)
except ValueError:
result.append(pkg_result)
return result
def generate_docker_install_snippet(self, packages: List[Dict[str, str]], single_line: bool = False) -> List[str]:
"""Method to generate a docker snippet to install 1 or more packages
Args:
packages(list(dict)): A list of package names and versions to install
single_line(bool): If true, collapse
Returns:
list
"""
package_strings = [f"{x['name']}" for x in packages]
if single_line:
return [f"RUN apt-get -y --no-install-recommends install {' '.join(package_strings)}"]
else:
docker_strings = [f"RUN apt-get -y --no-install-recommends install {x}" for x in package_strings]
return docker_strings
```
#### File: gtmcore/environment/packagemanager.py
```python
import abc
from typing import (List, Dict, Optional)
from collections import namedtuple
from gtmcore.labbook import LabBook
import gtmcore.environment
# A namedtuple for the result of package validation
PackageResult = namedtuple('PackageResult', ['package', 'version', 'error'])
class PackageManager(metaclass=abc.ABCMeta):
"""Class to implement the standard interface for all available Package Managers
"""
@staticmethod
def fallback_image(labbook: LabBook) -> str:
""" Generate the image name of the LabManager if the docker image for
the given labbook cannot be found. """
cm = getattr(gtmcore.environment, 'ComponentManager')(labbook)
base = cm.base_fields
return f"{base['image']['namespace']}" \
f"/{base['image']['repository']}" \
f":{base['image']['tag']}"
@abc.abstractmethod
def search(self, search_str: str, labbook: LabBook, username: str) -> List[str]:
"""Method to search a package manager for packages based on a string. The string can be a partial string.
Args:
search_str: The string to search on
labbook: Subject LabBook
username: username of current user
Returns:
list(str): The list of package names that match the search string
"""
raise NotImplemented
@abc.abstractmethod
def list_versions(self, package_name: str, labbook: LabBook, username: str) -> List[str]:
"""Method to list all available versions of a package based on the package name with the latest package first
Args:
package_name: Name of the package to query
labbook: Subject LabBook
username: username of current user
Returns:
list(str): Version strings
"""
raise NotImplemented
@abc.abstractmethod
def latest_version(self, package_name: str, labbook: LabBook, username: str) -> str:
"""Method to get the latest version string for a package
Args:
package_name: Name of the package to query
labbook: Subject LabBook
username: username of current user
Returns:
str: latest version string
"""
raise NotImplemented
@abc.abstractmethod
def latest_versions(self, package_names: List[str], labbook: LabBook, username: str) -> List[str]:
"""Method to get the latest version string for a list of packages
Args:
package_names(list): list of names of the packages to query
labbook: Subject LabBook
username: username of current user
Returns:
list: latest version strings
"""
raise NotImplemented
@abc.abstractmethod
def list_installed_packages(self, labbook: LabBook, username: str) -> List[Dict[str, str]]:
"""Method to get a list of all packages that are currently installed
Note, this will return results for the computer/container in which it is executed. To get the properties of
a LabBook container, a docker exec command would be needed from the Gigantum application container.
return format is a list of dicts with the format {name: <package name>, version: <version string>}
Returns:
list
"""
raise NotImplemented
@abc.abstractmethod
def list_available_updates(self, labbook: LabBook, username: str) -> List[Dict[str, str]]:
"""Method to get a list of all installed packages that could be updated and the new version string
Note, this will return results for the computer/container in which it is executed. To get the properties of
a LabBook container, a docker exec command would be needed from the Gigantum application container.
return format is a list of dicts with the format
{name: <package name>, version: <currently installed version string>, latest_version: <latest version string>}
Returns:
list
"""
raise NotImplemented
@abc.abstractmethod
def validate_packages(self, package_list: List[Dict[str, str]], labbook: LabBook, username: str) \
-> List[PackageResult]:
"""Method to validate a list of packages, and if needed fill in any missing versions
Should check both the provided package name and version. If the version is omitted, it should be generated
from the latest version.
Args:
package_list(list): A list of dictionaries of packages to validate
labbook(str): The labbook instance
username(str): The username for the logged in user
Returns:
namedtuple: namedtuple indicating if the package and version are valid
"""
raise NotImplemented
@abc.abstractmethod
def generate_docker_install_snippet(self, packages: List[Dict[str, str]], single_line: bool = False) -> List[str]:
"""Method to generate a docker snippet to install 1 or more packages
Args:
packages(list(dict)): A list of package names and versions to install
single_line(bool): If true, collapse
Returns:
str
"""
raise NotImplemented
```
#### File: environment/tests/test_environment_repository.py
```python
import pytest
from gtmcore.environment import BaseRepository
from gtmcore.fixtures import (mock_config_with_repo, ENV_UNIT_TEST_REPO, ENV_UNIT_TEST_BASE, ENV_UNIT_TEST_REV)
class TestEnvironmentRepository(object):
def test_get_list_index_base_image(self, mock_config_with_repo):
"""Test accessing the list version of the index"""
repo = BaseRepository(mock_config_with_repo[0])
data = repo.get_base_list()
assert type(data) == list
assert len(data) == 5
assert any(n.get('id') == ENV_UNIT_TEST_BASE for n in data)
assert any(n.get('repository') == ENV_UNIT_TEST_REPO for n in data)
def test_get_component_index_base(self, mock_config_with_repo):
"""Test accessing the detail version of the index"""
repo = BaseRepository(mock_config_with_repo[0])
data = repo.get_base_versions(ENV_UNIT_TEST_REPO,
ENV_UNIT_TEST_BASE)
assert type(data) == list
assert len(data) >= 1
assert data[-1][1]['id'] == ENV_UNIT_TEST_BASE
assert data[-1][1]['repository'] == ENV_UNIT_TEST_REPO
def test_get_component_version_base(self, mock_config_with_repo):
"""Test accessing the a single version of the index"""
repo = BaseRepository(mock_config_with_repo[0])
data = repo.get_base(ENV_UNIT_TEST_REPO,
ENV_UNIT_TEST_BASE,
ENV_UNIT_TEST_REV)
assert type(data) == dict
assert data['id'] == ENV_UNIT_TEST_BASE
assert data['revision'] == ENV_UNIT_TEST_REV
assert 'image' in data
assert len(data['package_managers']) == 2
assert data['repository'] == ENV_UNIT_TEST_REPO
def test_get_component_version_base_does_not_exist(self, mock_config_with_repo):
"""Test accessing the a single version of the index that does not exist"""
repo = BaseRepository(mock_config_with_repo[0])
with pytest.raises(ValueError):
repo.get_base('gig-dev_environment-componentsXXX',
'quickstart-jupyterlab', '0.1')
with pytest.raises(ValueError):
repo.get_base(ENV_UNIT_TEST_REPO,
'quickstart-jupyterlab', '3')
with pytest.raises(ValueError):
repo.get_base(ENV_UNIT_TEST_REPO,
'quickstart-jupyterlabXXX', 0)
with pytest.raises(ValueError):
repo.get_base(ENV_UNIT_TEST_REPO,
'quickstart-jupyterlab', 99)
```
#### File: gtmcore/environment/utils.py
```python
from gtmcore.environment.packagemanager import PackageManager
from gtmcore.environment.pip import PipPackageManager
from gtmcore.environment.conda import Conda2PackageManager, Conda3PackageManager
from gtmcore.environment.apt import AptPackageManager
def get_package_manager(manager: str) -> PackageManager:
"""Helper class to instantiate a package manager based on manager string"""
if manager in ["pip2", "pip3", "pip"]:
return PipPackageManager()
elif manager == "conda2":
return Conda2PackageManager()
elif manager == "conda3":
return Conda3PackageManager()
elif manager in ["apt", "apt-get"]:
return AptPackageManager()
else:
raise ValueError(f"Unsupported package manager `{manager}`")
```
#### File: files/tests/test_labbook_fileops.py
```python
import pytest
import tempfile
import os
import pprint
from gtmcore.labbook import LabBook
from gtmcore.files import FileOperations as FO
from gtmcore.fixtures import mock_config_file, mock_labbook, remote_labbook_repo, sample_src_file
class TestLabbookFileOperations(object):
def test_insert_file_success_1(self, mock_labbook, sample_src_file):
lb = mock_labbook[2]
new_file_data = FO.insert_file(lb, "code", sample_src_file)
base_name = os.path.basename(sample_src_file)
assert os.path.exists(os.path.join(lb.root_dir, 'code', base_name))
assert new_file_data['key'] == f'{base_name}'
assert new_file_data['is_dir'] is False
assert new_file_data['is_favorite'] is False
def test_insert_file_upload_id(self, mock_labbook):
lb = mock_labbook[2]
test_file = os.path.join(tempfile.gettempdir(), "asdfasdf-testfile.txt")
with open(test_file, 'wt') as sample_f:
# Fill sample file with some deterministic crap
sample_f.write("n4%nm4%M435A EF87kn*C" * 40)
# This is basically checking for rename
new_file_data = FO.insert_file(lb, "code", test_file, "testfile.txt")
assert os.path.exists(os.path.join(lb.root_dir, 'code', 'testfile.txt'))
assert new_file_data['key'] == 'testfile.txt'
assert new_file_data['is_dir'] is False
def test_insert_file_success_2(self, mock_labbook, sample_src_file):
lb = mock_labbook[2]
FO.makedir(lb, "output/testdir")
new_file_data = FO.insert_file(lb, "output", sample_src_file, "testdir")
base_name = os.path.basename(new_file_data['key'])
assert os.path.exists(os.path.join(lb.root_dir, 'output', 'testdir', base_name))
assert new_file_data['key'] == f'testdir/{base_name}'
assert new_file_data['is_dir'] is False
def test_insert_and_make_intermediary_directories(self, mock_labbook, sample_src_file):
lb = mock_labbook[2]
FO.insert_file(lb, "code", sample_src_file, "/super/random/dir/inside.file")
p = os.path.join(lb.root_dir, 'code', "super/random/dir/inside.file")
assert os.path.isfile(p)
def test_insert_file_fail_due_to_gitignore(self, mock_labbook):
lb = mock_labbook[2]
git_hash_1 = lb.git.commit_hash
lines = [l.strip() for l in open(os.path.join(lb.root_dir, '.gitignore')).readlines()]
assert any(['.DS_Store' in l for l in lines])
# Note: .DS_Store is in the gitignore directory.
test_file = os.path.join(tempfile.gettempdir(), ".DS_Store")
with open(test_file, 'wt') as sample_f:
# Fill sample file with some deterministic crap
sample_f.write("This file should not be allowed to be inserted into labbook. " * 40)
git_hash_2 = lb.git.commit_hash
with pytest.raises(Exception):
r = lb.insert_file('input', src_file=sample_f.name, dst_dir='')
# Make sure no commits were made
assert git_hash_1 == git_hash_2
# Make sure the inserted file that doesn't match wasn't added.
assert '.DS_Store' not in os.listdir(os.path.join(lb.root_dir, 'input'))
def test_remove_file_success(self, mock_labbook, sample_src_file):
lb = mock_labbook[2]
new_file_data = FO.insert_file(lb, "code", sample_src_file)
base_name = os.path.basename(new_file_data['key'])
assert os.path.exists(os.path.join(lb.root_dir, 'code', base_name))
FO.delete_files(lb, 'code', [base_name])
assert not os.path.exists(os.path.join(lb.root_dir, 'code', base_name))
def test_remove_file_fail(self, mock_labbook, sample_src_file):
lb = mock_labbook[2]
FO.insert_file(lb, "code", sample_src_file)
new_file_path = os.path.join('blah', 'invalid.txt')
with pytest.raises(ValueError):
FO.delete_files(lb, 'code', [new_file_path])
def test_remove_file_fail_old_prototype(self, mock_labbook, sample_src_file):
lb = mock_labbook[2]
new_file_data = FO.insert_file(lb, "code", sample_src_file)
base_name = os.path.basename(new_file_data['key'])
assert os.path.exists(os.path.join(lb.root_dir, 'code', base_name))
with pytest.raises(ValueError):
FO.delete_files(lb, 'code', base_name)
def test_remove_dir(self, mock_labbook, sample_src_file):
lb = mock_labbook[2]
FO.makedir(lb, "output/testdir")
new_file_path = FO.insert_file(lb, "output", sample_src_file, "testdir")
base_name = os.path.basename(new_file_path['key'])
assert os.path.exists(os.path.join(lb.root_dir, 'output', 'testdir', base_name))
# Note! Now that remove() uses force=True, no special action is needed for directories.
# Delete the directory
FO.delete_files(lb, "output", ["testdir"])
assert not os.path.exists(os.path.join(lb.root_dir, 'output', 'testdir', base_name))
assert not os.path.exists(os.path.join(lb.root_dir, 'output', 'testdir'))
def test_remove_empty_dir(self, mock_labbook, sample_src_file):
lb = mock_labbook[2]
FO.makedir(lb, "output/testdir")
new_file_path = FO.insert_file(lb, "output", sample_src_file, "testdir")
base_name = os.path.basename(new_file_path['key'])
assert os.path.exists(os.path.join(lb.root_dir, 'output', 'testdir', base_name))
# Delete the directory
FO.delete_files(lb, "output", ["testdir"])
assert not os.path.exists(os.path.join(lb.root_dir, 'output', 'testdir', base_name))
assert not os.path.exists(os.path.join(lb.root_dir, 'output', 'testdir'))
def test_remove_many_files(self, mock_labbook, sample_src_file):
lb = mock_labbook[2]
test_files = [f"testfile{x}.txt" for x in range(15)]
for test_file in test_files:
with open(os.path.join(lb.root_dir, 'code', test_file), 'wt') as sample_f:
sample_f.write("blah")
assert os.path.exists(os.path.join(lb.root_dir, 'code', test_file))
lb.git.add_all()
lb.git.commit("making test data")
FO.delete_files(lb, "code", test_files)
for test_file in test_files:
assert not os.path.exists(os.path.join(lb.root_dir, 'code', test_file))
def test_move_file_as_rename_in_same_dir(self, mock_labbook, sample_src_file):
lb = mock_labbook[2]
# insert file
new_file_data = FO.insert_file(lb, "code", sample_src_file, '')
base_name = os.path.basename(new_file_data['key'])
assert os.path.exists(os.path.join(lb.root_dir, 'code', base_name))
assert new_file_data['key'] == base_name
# move to rename
moved_rel_path = os.path.join(f'{base_name}.MOVED')
r = FO.move_file(lb, 'code', new_file_data['key'], moved_rel_path)
assert len(r) == 1
assert not os.path.exists(os.path.join(lb.root_dir, 'code', base_name))
assert os.path.exists(os.path.join(lb.root_dir, 'code', f'{base_name}.MOVED'))
assert os.path.isfile(os.path.join(lb.root_dir, 'code', f'{base_name}.MOVED'))
def test_move_single_file(self, mock_labbook, mock_config_file, sample_src_file):
lb = mock_labbook[2]
f = FO.insert_file(lb, 'code', sample_src_file)['key']
FO.makedir(lb, 'code/target_dir')
results = FO.move_file(lb, 'code', f, 'target_dir')
assert len(results) == 1
pprint.pprint(results)
assert results[0]['is_dir'] == False
assert results[0]['key'] == 'target_dir/' + os.path.basename(sample_src_file)
def test_move_single_file_to_section_top(self, mock_labbook, mock_config_file, sample_src_file):
lb = mock_labbook[2]
FO.makedir(lb, 'code/inner_dir')
f = FO.insert_file(lb, 'code', sample_src_file, 'inner_dir')['key']
# Move file to top of code section
results = FO.move_file(lb, 'code', f, dst_rel_path='')
# Results should be returned for "code" -- the file just moved there and the
assert len(results) == 1
assert results[0]['is_dir'] == False
assert results[0]['key'] == os.path.basename(f)
def test_move_empty_directory(self, mock_labbook, mock_config_file, sample_src_file):
lb = mock_labbook[2]
FO.makedir(lb, 'code/stable_dir')
FO.makedir(lb, 'code/empty_dir')
# We'll move "empty_dir" into "stable_dir" - there should only be one element in returned list
res = FO.move_file(lb, 'code', 'empty_dir', 'stable_dir')
assert len(res) == 1
assert res[0]['is_dir'] is True
assert res[0]['key'] == 'stable_dir/empty_dir/'
def test_move_loaded_directory_with_one_file(self, mock_labbook, mock_config_file, sample_src_file):
lb = mock_labbook[2]
new_file_data = FO.insert_file(lb, "code", sample_src_file)
base_name = os.path.basename(new_file_data['key'])
assert os.path.exists(os.path.join(lb.root_dir, 'code', base_name))
# make new subdir
os.makedirs(os.path.join(lb.root_dir, 'code', 'subdir'))
# .. and then put a file in it
mv_file_res = FO.move_file(lb, "code", base_name, os.path.join('subdir', base_name))
# Should be 2, because it returns the info of the directory it was moved into
assert len(mv_file_res) == 1
assert mv_file_res[0]['key'] == f'subdir/{base_name}'
assert mv_file_res[0]['is_dir'] == False
# Move "subdir" into "target_dir", there should be two activity records
FO.makedir(lb, "code/target_dir", create_activity_record=True)
mv_dir_res = FO.move_file(lb, "code", 'subdir', 'target_dir')
assert len(mv_dir_res) == 2
assert mv_dir_res[0]['key'] == 'target_dir/subdir/'
assert mv_dir_res[0]['is_dir'] is True
assert mv_dir_res[1]['key'] == f'target_dir/subdir/{base_name}'
assert mv_dir_res[1]['is_dir'] is False
assert not os.path.exists(os.path.join(lb.root_dir, 'code', 'subdir'))
assert os.path.exists(os.path.join(lb.root_dir, 'code', 'target_dir/subdir'))
def test_move_loaded_directory_with_full_tree(self, mock_labbook, mock_config_file, sample_src_file):
lb = mock_labbook[2]
FO.makedir(lb, 'code/level_1/level_2A', create_activity_record=True)
FO.makedir(lb, 'code/level_1/level_2B', create_activity_record=True)
FO.makedir(lb, 'code/target_dir', create_activity_record=True)
FO.makedir(lb, 'code/target_dir/existing_dir_counted_anyway', create_activity_record=True)
FO.makedir(lb, 'code/this-dir-must-be-ignored', create_activity_record=True)
FO.insert_file(lb, 'code', sample_src_file, dst_path='level_1/level_2B')
# Move "level_1" into target_dir
results = FO.move_file(lb, 'code', 'level_1', 'target_dir')
assert len(results) == 4
def test_makedir_simple(self, mock_labbook):
# Note that "score" refers to the count of .gitkeep files.
lb = mock_labbook[2]
long_dir = "code/non/existant/dir/should/now/be/made"
dirs = ["code/cat_dir", "code/dog_dir", "code/mouse_dir/", "code/mouse_dir/new_dir", long_dir]
for d in dirs:
FO.makedir(lb, d)
assert os.path.isdir(os.path.join(lb.root_dir, d))
assert os.path.isfile(os.path.join(lb.root_dir, d, '.gitkeep'))
score = 0
for root, dirs, files in os.walk(os.path.join(lb.root_dir, 'code', 'non')):
for f in files:
if f == '.gitkeep':
score += 1
# Ensure that count of .gitkeep files equals the number of subdirs, excluding the code dir.
assert score == len(LabBook.make_path_relative(long_dir).split(os.sep)) - 1
def test_makedir_record(self, mock_labbook):
# Note that "score" refers to the count of .gitkeep files.
lb = mock_labbook[2]
assert os.path.exists(os.path.join(lb.root_dir, 'code', 'test')) is False
FO.makedir(lb, "code/test", create_activity_record=True)
assert os.path.exists(os.path.join(lb.root_dir, 'code', 'test')) is True
assert lb.is_repo_clean is True
FO.makedir(lb, "code/test2", create_activity_record=False)
assert os.path.exists(os.path.join(lb.root_dir, 'code', 'test2')) is True
assert lb.is_repo_clean is False
def test_walkdir(self, mock_labbook):
lb = mock_labbook[2]
dirs = ["code/cat_dir", "code/dog_dir", "code/mouse_dir/", "code/mouse_dir/new_dir", "code/.hidden_dir"]
for d in dirs:
FO.makedir(lb, d)
for d in ['.hidden_dir/', '', 'dog_dir', 'mouse_dir/new_dir/']:
open('/tmp/myfile.c', 'w').write('data')
FO.insert_file(lb, 'code', '/tmp/myfile.c', d)
dir_walks_hidden = FO.walkdir(lb, 'code', show_hidden=True)
assert any([os.path.basename('/tmp/myfile.c') in d['key'] for d in dir_walks_hidden])
assert not any(['.git' in d['key'].split(os.path.sep) for d in dir_walks_hidden])
assert not any(['.gigantum' in d['key'] for d in dir_walks_hidden])
assert all([d['key'][0] != '/' for d in dir_walks_hidden])
# Spot check some entries
assert len(dir_walks_hidden) == 15
assert dir_walks_hidden[0]['key'] == '.hidden_dir/'
assert dir_walks_hidden[0]['is_dir'] is True
assert dir_walks_hidden[3]['key'] == 'mouse_dir/'
assert dir_walks_hidden[3]['is_dir'] is True
assert dir_walks_hidden[6]['key'] == '.hidden_dir/.gitkeep'
assert dir_walks_hidden[6]['is_dir'] is False
assert dir_walks_hidden[13]['key'] == 'mouse_dir/new_dir/.gitkeep'
assert dir_walks_hidden[13]['is_dir'] is False
# Since the file is in a hidden directory, it should not be found.
dir_walks = FO.walkdir(lb, 'code')
# Spot check some entries
assert len(dir_walks) == 7
assert dir_walks[0]['key'] == 'cat_dir/'
assert dir_walks[0]['is_dir'] is True
assert dir_walks[1]['key'] == 'dog_dir/'
assert dir_walks[1]['is_dir'] is True
assert dir_walks[2]['key'] == 'mouse_dir/'
assert dir_walks[2]['is_dir'] is True
assert dir_walks[3]['is_dir'] is False
assert dir_walks[4]['is_dir'] is False
assert dir_walks[5]['is_dir'] is True
assert dir_walks[5]['key'] == 'mouse_dir/new_dir/'
assert dir_walks[6]['is_dir'] is False
def test_listdir(self, mock_labbook, sample_src_file):
def write_test_file(base, name):
with open(os.path.join(base, name), 'wt') as f:
f.write("Blah blah")
lb = mock_labbook[2]
dirs = ["code/new_dir", ".hidden_dir"]
for d in dirs:
FO.makedir(lb, d)
write_test_file(lb.root_dir, 'test1.txt')
write_test_file(lb.root_dir, 'test2.txt')
write_test_file(lb.root_dir, '.hidden.txt')
write_test_file(lb.root_dir, 'code/test_subdir1.txt')
write_test_file(lb.root_dir, 'code/test_subdir2.txt')
write_test_file(lb.root_dir, 'code/new_dir/tester.txt')
# List just the code dir
data = FO.listdir(lb, "code", base_path='')
assert len(data) == 3
assert data[0]['key'] == 'new_dir/'
assert data[1]['key'] == 'test_subdir1.txt'
assert data[2]['key'] == 'test_subdir2.txt'
data = FO.listdir(lb, "input", base_path='')
assert len(data) == 0
# List just the code/subdir dir
data = FO.listdir(lb, "code", base_path='new_dir')
assert len(data) == 1
assert data[0]['key'] == 'new_dir/tester.txt'
def test_listdir_expect_error(self, mock_labbook, sample_src_file):
lb = mock_labbook[2]
with pytest.raises(ValueError):
FO.listdir(lb, "code", base_path='blah')
def test_walkdir_with_favorites(self, mock_labbook, sample_src_file):
lb = mock_labbook[2]
dirs = ["code/cat_dir", "code/dog_dir"]
for d in dirs:
FO.makedir(lb, d)
sfile = '/tmp/testwalkdirwithfavorites.file'
for d in ['', 'dog_dir', 'cat_dir']:
open(sfile, 'w').write('xxx')
FO.insert_file(lb, 'code', sfile, d)
sample_filename = os.path.basename(sfile)
# Since the file is in a hidden directory, it should not be found.
dir_walks = FO.walkdir(lb, 'code')
# Spot check some entries
assert len(dir_walks) == 5
assert dir_walks[0]['key'] == 'cat_dir/'
assert dir_walks[0]['is_dir'] is True
assert dir_walks[0]['is_favorite'] is False
assert dir_walks[1]['key'] == 'dog_dir/'
assert dir_walks[1]['is_dir'] is True
assert dir_walks[1]['is_favorite'] is False
assert dir_walks[2]['is_favorite'] is False
assert dir_walks[2]['is_dir'] is False
assert dir_walks[3]['is_favorite'] is False
assert dir_walks[3]['is_dir'] is False
assert dir_walks[4]['is_favorite'] is False
assert dir_walks[4]['is_dir'] is False
lb.create_favorite("code", sample_filename, description="Fav 1")
lb.create_favorite("code", f"dog_dir/{sample_filename}", description="Fav 2")
lb.create_favorite("code", f"cat_dir/", description="Fav 3", is_dir=True)
dir_walks = FO.walkdir(lb, 'code')
# Spot check some entries
assert len(dir_walks) == 5
assert dir_walks[0]['key'] == 'cat_dir/'
assert dir_walks[0]['is_dir'] is True
assert dir_walks[0]['is_favorite'] is True
assert dir_walks[1]['key'] == 'dog_dir/'
assert dir_walks[1]['is_dir'] is True
assert dir_walks[1]['is_favorite'] is False
assert dir_walks[2]['is_favorite'] is True
assert dir_walks[2]['is_dir'] is False
assert dir_walks[3]['is_favorite'] is False
assert dir_walks[3]['is_dir'] is False
assert dir_walks[4]['is_favorite'] is True
assert dir_walks[4]['is_dir'] is False
```
#### File: gtmcore/gitlib/git_fs_shim.py
```python
from gtmcore.gitlib.git_fs import GitFilesystem
from gtmcore.logging import LMLogger
import subprocess
logger = LMLogger.get_logger()
class GitFilesystemShimmed(GitFilesystem):
def add(self, filename):
"""Add a file to a commit
Args:
filename(str): Filename to add.
Returns:
None
"""
logger.info("Adding file {} to Git repository in {}".format(filename, self.working_directory))
try:
r = subprocess.run(['git', 'add', f'{filename}'], stderr=subprocess.PIPE, stdout=subprocess.PIPE,
check=True, cwd=self.working_directory)
except subprocess.CalledProcessError as x:
logger.error(f'{x.stdout}, {x.stderr}')
raise
```
#### File: gtmcore/gitlib/git.py
```python
import abc
import importlib
from typing import Dict, List, Optional, Tuple
# Dictionary of supported implementations.
# Key is the value to put in the config_dict["backend"].
# Value is a list with the first entry being the module and the second the class
SUPPORTED_GIT_INTERFACES = {'filesystem': ["gtmcore.gitlib.git_fs", "GitFilesystem"],
'filesystem-shim': ["gtmcore.gitlib.git_fs_shim", "GitFilesystemShimmed"]}
def get_git_interface(config_dict):
"""Factory method that instantiates a GitInterface implementation based on provided configuration information
Note: `backend` is a required key in config_dict that specifies the gitlib backend implementation to use.
Supported Implementations:
- "filesystem" - Provides an interface that works on any repo on the filesystem
Args:
config_dict(dict): Dictionary of configuration information
Returns:
GitRepoInterface
"""
if "backend" not in config_dict:
raise ValueError("You must specify the `backend` parameter to instantiate a GitInterface implementation")
if config_dict["backend"] not in SUPPORTED_GIT_INTERFACES:
raise ValueError("Unsupported `backend` parameter {}. Valid backends: {}".format(config_dict["backend"],
",".join(SUPPORTED_GIT_INTERFACES.keys())))
# If you are here OK to import class
backend_class = getattr(importlib.import_module(SUPPORTED_GIT_INTERFACES[config_dict["backend"]][0]),
SUPPORTED_GIT_INTERFACES[config_dict["backend"]][1])
# Instantiate with the config dict and return to the user
return backend_class(config_dict)
class GitAuthor(object):
"""Simple Class to store user information for author/committer"""
def __init__(self, name, email):
"""
Args:
name(str): User's first and last name
email(str): User's email address
"""
self.name = name
self.email = email
def __str__(self):
return "{} - {}".format(self.name, self.email)
class GitRepoInterface(metaclass=abc.ABCMeta):
def __init__(self, config_dict, author=None, committer=None):
"""Constructor
config_dict should contain any custom params needed for the backend. For example, the working directory
for a local backend or a service URL for a web service based backend.
Args:
config_dict(dict): Configuration details for the interface
author(GitAuthor): User info for the author, if omitted, assume the "system"
committer(GitAuthor): User info for the committer. If omitted, set to the author
"""
self.config = config_dict
self.author = None
self.committer = None
self.working_directory = None
self.update_author(author=author, committer=committer)
def set_working_directory(self, directory):
"""Method to change the current working directory of the repository
Args:
directory(str): Absolute path to the working dir
Returns:
None
"""
raise NotImplemented
def update_author(self, author, committer=None):
"""Method to get the current branch name
Args:
author(GitAuthor): User info for the author, if omitted, assume the "system"
committer(GitAuthor): User info for the committer. If omitted, set to the author
Returns:
None
"""
if author:
if type(author) != GitAuthor:
raise ValueError("Must provide a GitAuthor instance to specify the author")
self.author = author
else:
self.author = GitAuthor("Gigantum AutoCommit", "<EMAIL>")
if committer:
if type(committer) != GitAuthor:
raise ValueError("Must provide a GitAuthor instance to specify the committer")
self.committer = committer
else:
self.committer = GitAuthor("Gigantum AutoCommit", "<EMAIL>")
@property
def commit_hash(self):
"""Get the current commit hash
Returns:
str
"""
raise NotImplemented
@property
def commit_hash_short(self):
"""Get the current commit hash, limit to 8 characters
Returns:
str
"""
raise NotImplemented
@property
def committed_on(self):
"""Get the datetime the commit occurred
Returns:
datetime.datetime
"""
raise NotImplemented
@property
def git_path(self):
"""Get the full git path of the active branch
Returns:
str
"""
raise NotImplemented
@abc.abstractmethod
def get_current_branch_name(self):
"""Method to get the current branch name
Returns:
str
"""
raise NotImplemented
# CREATE METHODS
@abc.abstractmethod
def initialize(self, bare=False):
"""Initialize a new repo
Args:
bare(bool): If True, use the --bare option
Returns:
None
"""
raise NotImplemented
@abc.abstractmethod
def clone(self, source, directory: Optional[str] = None):
"""Clone a repo
Args:
source (str): Git ssh or https string to clone
Returns:
None
"""
raise NotImplemented
# CREATE METHODS
# LOCAL CHANGE METHODS
@abc.abstractmethod
def status(self) -> Dict[str, List[Tuple[str, str]]]:
"""Get the status of a repo
Should return a dictionary of lists of tuples of the following format:
{
"staged_new": [(filename, status), ...],
"unstaged": [(filename, status), ...],
"untracked": [filename, ...]
}
status is the status of the file (new, modified, deleted)
Returns:
(dict(list))
"""
raise NotImplemented
@abc.abstractmethod
def add(self, filename):
"""Add a file to a commit
Args:
filename(str): Filename to add.
Returns:
None
"""
raise NotImplemented
@abc.abstractmethod
def add_all(self, relative_directory=None):
"""Add all changes/files using the `git add -A` command
Args:
relative_directory(str): Relative directory (from the root_dir) to add everything
Returns:
None
"""
raise NotImplemented
@abc.abstractmethod
def remove(self, filename, force=False, keep_file=True):
"""Remove a file from tracking
Args:
filename(str): Filename to remove.
force(bool): Force removal
keep_file(bool): If true, don't delete the file (e.g. use the --cached flag)
Returns:
None
"""
raise NotImplemented
@abc.abstractmethod
def diff_unstaged(self, filename=None, ignore_white_space=True):
"""Method to return the diff for unstaged files, optionally for a specific file
Returns a dictionary of the format:
{
"#": [(<line_string>, <change_string>), ...],
...
}
Args:
filename(str): Optional filename to filter diff. If omitted all files will be diffed
ignore_white_space (bool): If True, ignore whitespace during diff. True if omitted
Returns:
dict
"""
raise NotImplemented
@abc.abstractmethod
def diff_staged(self, filename=None, ignore_white_space=True):
"""Method to return the diff for staged files, optionally for a specific file
Returns a dictionary of the format:
{
"#": [(<line_string>, <change_string>), ...],
...
}
Args:
filename(str): Optional filename to filter diff. If omitted all files will be diffed
ignore_white_space (bool): If True, ignore whitespace during diff. True if omitted
Returns:
dict
"""
raise NotImplemented
@abc.abstractmethod
def diff_commits(self, commit_a='HEAD~1', commit_b='HEAD', ignore_white_space=True):
"""Method to return the diff between two commits
Returns a dictionary of the format:
{
"#": [(<line_string>, <change_string>), ...],
...
}
Args:
commit_a(str): Commit hash for the first commit
commit_b(str): Commit hash for the second commit
ignore_white_space (bool): If True, ignore whitespace during diff. True if omitted
Returns:
dict
"""
raise NotImplemented
@abc.abstractmethod
def commit(self, message, author=None, committer=None):
"""Method to perform a commit operation
Commit operation should use self.author and self.committer. If author/committer provided
the implementation should update self.author and self.committer
Args:
message(str): Commit message
author(GitAuthor): User info for the author, if omitted, assume the "system"
committer(GitAuthor): User info for the committer. If omitted, set to the author
Returns:
git.Commit -- hash of new commit
"""
raise NotImplemented
# LOCAL CHANGE METHODS
# HISTORY METHODS
@abc.abstractmethod
def log(self, path_info=None, max_count=10, filename=None, skip=None, since=None, author=None):
"""Method to get the commit history, optionally for a single file, with pagination support
Returns an ordered list of dictionaries, one entry per commit. Dictionary format:
{
"commit": <commit hash (str)>,
"author": {"name": <name (str)>, "email": <email (str)>},
"committer": {"name": <name (str)>, "email": <email (str)>},
"committed_on": <commit datetime (datetime.datetime)>,
"message: <commit message (str)>
}
Args:
path_info(str): Optional path info to filter (e.g., hash1, hash2..hash1, master)
filename(str): Optional filename to filter on
max_count(int): Optional number of commit records to return
skip(int): Optional number of commit records to skip (supports building pagination)
since(datetime.datetime): Optional *date* to limit on
author(str): Optional filter based on author name
Returns:
(list(dict))
"""
raise NotImplemented
@abc.abstractmethod
def log_entry(self, commit):
"""Method to get single commit records
Returns a single dictionary in format:
{
"commit": <commit hash (str)>,
"author": {"name": <name (str)>, "email": <email (str)>},
"committer": {"name": <name (str)>, "email": <email (str)>},
"committed_on": <commit datetime (datetime.datetime)>,
"message: <commit message (str)>
}
Args:
commit: <commit hash (str)>
Returns:
(dict)
"""
raise NotImplemented
@abc.abstractmethod
def blame(self, filename):
"""Method to get the revision and author for each line of a file
Returns an ordered list of dictionaries, one entry per change. Dictionary format:
{
"commit": <commit (str)>,
"author": {"name": <name (str)>, "email": <email (str)>},
"committed_on": <datetime (datetime)>,
"message": <commit message (str)>
"content": <content block (str)>
}
Args:
filename(str): Filename to query
Returns:
list(dict)
"""
raise NotImplemented
# HISTORY METHODS
# BRANCH METHODS
@abc.abstractmethod
def create_branch(self, name):
"""Method to create a new branch from the current HEAD
Args:
name(str): Name of the branch
Returns:
None
"""
raise NotImplemented
@abc.abstractmethod
def publish_branch(self, branch_name, remote_name="origin"):
"""Method to track a remote branch, check it out, and push
Args:
branch_name(str): Name of the branch
remote_name(str): Name of the remote
Returns:
None
"""
raise NotImplemented
@abc.abstractmethod
def list_branches(self) -> Dict[str, List[str]]:
"""Method to list branches. Should return a dictionary of the format:
{
"local": [<name>, ...]
"remote": [<name>, ...]
}
where local are branches currently available locally
Returns:
dict
"""
raise NotImplemented
@abc.abstractmethod
def delete_branch(self, name, remote=False, force=False):
"""Method to delete a branch
Args:
name(str): Name of the branch to delete
remote(bool): If True, delete a remote branch
force(bool): If True, force delete
Returns:
None
"""
raise NotImplemented
@abc.abstractmethod
def rename_branch(self, old_name, new_name):
"""Method to rename a branch
Args:
old_name(str): The old branch name
new_name(str): The new branch name
Returns:
None
"""
raise NotImplemented
@abc.abstractmethod
def checkout(self, branch_name: str):
"""Method to switch to a different branch
Args:
branch_name(str): Name of the branch to switch to
Returns:
None
"""
raise NotImplemented
# BRANCH METHODS
# TAG METHODS
@abc.abstractmethod
def create_tag(self, name, message):
"""Method to create a tag
Args:
name(str): Name of the tag
message(str): Message with the tag
Returns:
None
"""
raise NotImplemented
def list_tags(self):
"""Method to list tags
Returns:
(list(dict)): list of dicts with `name` and `message` fields
"""
raise NotImplemented
# TAG METHODS
# REMOTE METHODS
@abc.abstractmethod
def list_remotes(self):
"""Method to list remote information
Returns a list of dictionaries with the format:
{
"name": <remote name>,
"url": <remote location>,
}
Returns:
list(dict)
"""
raise NotImplemented
@abc.abstractmethod
def add_remote(self, name, url, kwargs=None):
"""Method to add a new remote
Args:
name(str): Name of the remote
url(str): Connection string to the remote
kwargs(dict): Dictionary of kwargs to send to the git remote add command
Returns:
None
"""
raise NotImplemented
@abc.abstractmethod
def remove_remote(self, name):
"""Method to remove a remote
Args:
name(str): Name of the remote
Returns:
None
"""
raise NotImplemented
@abc.abstractmethod
def fetch(self, refspec=None, remote="origin"):
"""Method to download objects and refs from a remote
Args:
refspec(str): string describing the mapping between remote ref and local ref
remote(str): name of remote, default to `origin`
Returns:
None
"""
raise NotImplemented
@abc.abstractmethod
def pull(self, refspec=None, remote="origin"):
"""Method fetch and integrate a remote
Args:
refspec(str): string describing the mapping between remote ref and local ref
remote(str): name of remote, default to `origin`
Returns:
None
"""
raise NotImplemented
@abc.abstractmethod
def push(self, remote_name="origin", refspec=None, tags=False):
"""Method update remote refs along with associated objects
Args:
remote_name(str): Name of the remote repository
tags(bool): If true, push tags
Returns:
"""
raise NotImplemented
# REMOTE METHODS
# MERGE METHODS
@abc.abstractmethod
def merge(self, branch_name):
"""Method to join a future branch history with the current branch
Args:
branch_name(str): Name of the FUTURE branch to merge into the current PAST branch
Returns:
None
"""
raise NotImplemented
# MERGE METHODS
# UNDO METHODS
@abc.abstractmethod
def discard_changes(self, filename=None):
"""Discard all changes, or changes in a single file.
Args:
filename(str): Optional filename. If omitted, all changes are discarded
Returns:
None
"""
raise NotImplemented
# UNDO METHODS
# SUBMODULE METHODS
@abc.abstractmethod
def add_submodule(self, name, relative_path, repository, branch=None):
"""Method to add a submodule at the provided relative path to the repo root and commit the change
Args:
name(str): Name for the submodule
relative_path(str): Relative path from the repo root where the submodule should go
repository(str): URL to the remote repository
branch(str): If not None, the branch that should be used
Returns:
None
"""
raise NotImplemented
@abc.abstractmethod
def list_submodules(self):
"""Method to list submodules
Should return a list of dicts with the format:
{
"name": <name of the submodule>,
"url": <url of repo>,
"branch": <name of the branch>
}
Returns:
list(dict)
"""
raise NotImplemented
@abc.abstractmethod
def update_submodules(self, init=True):
"""Method to update submodules and optionally init if needed
Args:
init(bool): Flag indicating if submodules should be initialized
Returns:
None
"""
raise NotImplemented
@abc.abstractmethod
def remove_submodules(self, submodule_name):
"""Method to remove submodule reference and delete the files
submodule_path:
submodule_name(str): Name of the submodule
Returns:
None
"""
raise NotImplemented
```
#### File: gtmcore/imagebuilder/imagebuilder.py
```python
import datetime
import functools
import glob
import os
import yaml
from gtmcore.environment.componentmanager import ComponentManager
from typing import (Any, Dict, List)
from gtmcore.labbook import LabBook
from gtmcore.logging import LMLogger
from gtmcore.environment.utils import get_package_manager
from gtmcore.mitmproxy.mitmproxy import CURRENT_MITMPROXY_TAG
logger = LMLogger.get_logger()
class ImageBuilder(object):
"""Class to ingest indexes describing base images, environments, and dependencies into Dockerfiles. """
def __init__(self, labbook: LabBook) -> None:
"""Create a new image builder given the path to labbook.
Args:
labbook: Subject LabBook
"""
self.labbook = labbook
if not os.path.exists(self.labbook.root_dir):
raise IOError("Labbook directory {} does not exist.".format(self.labbook.root_dir))
self._validate_labbook_tree()
def _get_yaml_files(self, directory: str) -> List[str]:
"""Method to get all YAML files in a directory
Args:
directory(str): Directory to search
Returns:
list
"""
return [x for x in glob.glob("{}{}*.yaml".format(directory, os.path.sep))]
def _validate_labbook_tree(self) -> None:
"""Throw exception if labbook directory structure not in expected format. """
subdirs = [['.gigantum'],
['.gigantum', 'env'],
['.gigantum', 'env', 'base'],
['.gigantum', 'env', 'custom'],
['.gigantum', 'env', 'package_manager']]
for subdir in subdirs:
if not os.path.exists(os.path.join(self.labbook.root_dir, *subdir)):
raise ValueError("Labbook directory missing subdir `{}'".format(subdir))
def _extra_base_images(self) -> List[str]:
"""Add other needed images via multi-stage build"""
docker_lines = []
cm = ComponentManager(self.labbook)
if 'rstudio' in cm.base_fields['development_tools']:
docker_lines.append("FROM gigantum/mitmproxy_proxy:" + CURRENT_MITMPROXY_TAG)
return docker_lines
def _import_baseimage_fields(self) -> Dict[str, Any]:
"""Load fields from base_image yaml file into a convenient dict. """
root_dir = os.path.join(self.labbook.root_dir, '.gigantum', 'env', 'base')
base_images = self._get_yaml_files(root_dir)
logger.debug("Searching {} for base image file".format(root_dir))
if len(base_images) != 1:
raise ValueError(f"There should only be one base image in {root_dir}, found {len(base_images)}")
logger.info("Using {} as base image file for labbook at {}.".format(base_images[0], self.labbook.root_dir))
with open(base_images[0]) as base_image_file:
fields = yaml.safe_load(base_image_file)
return fields
def _load_baseimage(self) -> List[str]:
"""Search expected directory structure to find the base image. Only one should exist. """
fields = self._import_baseimage_fields()
generation_ts = str(datetime.datetime.now())
docker_owner_ns = fields['image']['namespace']
docker_repo = fields['image']['repository']
docker_tag = fields['image']['tag']
docker_lines: List[str] = list()
docker_lines.append("# Dockerfile generated on {}".format(generation_ts))
docker_lines.append("# Name: {}".format(fields["name"]))
docker_lines.append("# Description: {}".format(fields["description"]))
docker_lines.append("")
# Must remove '_' if its in docker hub namespace.
prefix = '' if '_' in docker_owner_ns else f'{docker_owner_ns}/'
docker_lines.append("FROM {}{}:{}".format(prefix, docker_repo, docker_tag))
return docker_lines
def _load_packages(self) -> List[str]:
"""Load packages from yaml files in expected location in directory tree. """
root_dir = os.path.join(self.labbook.root_dir, '.gigantum', 'env', 'package_manager')
package_files = [os.path.join(root_dir, n) for n in os.listdir(root_dir) if 'yaml' in n]
docker_lines = ['## Adding individual packages']
apt_updated = False
for package in sorted(package_files):
pkg_fields: Dict[str, Any] = {}
with open(package) as package_content:
pkg_fields.update(yaml.safe_load(package_content))
# Generate the appropriate docker command for the given package info
pkg_info = {"name": str(pkg_fields['package']),
"version": str(pkg_fields.get('version'))}
if not pkg_fields.get('from_base'):
if pkg_fields['manager'] == 'apt' and not apt_updated:
docker_lines.append('RUN apt-get -y update')
apt_updated = True
docker_lines.extend(
get_package_manager(pkg_fields['manager']).generate_docker_install_snippet([pkg_info]))
return docker_lines
def _load_docker_snippets(self) -> List[str]:
docker_lines = ['# Custom docker snippets']
root_dir = os.path.join(self.labbook.root_dir, '.gigantum', 'env', 'docker')
if not os.path.exists(root_dir):
logger.warning(f"No `docker` subdirectory for environment in labbook")
return []
for snippet_file in [f for f in os.listdir(root_dir) if '.yaml' in f]:
docker_data = yaml.safe_load(open(os.path.join(root_dir, snippet_file)))
docker_lines.append(f'# Custom Docker: {docker_data["name"]} - {len(docker_data["content"])}'
f'line(s) - (Created {docker_data["timestamp_utc"]})')
docker_lines.extend(docker_data['content'])
return docker_lines
def _post_image_hook(self) -> List[str]:
"""Contents that must be after baseimages but before development environments. """
docker_lines = ["# Post-image creation hooks",
'COPY entrypoint.sh /usr/local/bin/entrypoint.sh',
'RUN chmod u+x /usr/local/bin/entrypoint.sh',
'']
return docker_lines
def _entrypoint_hooks(self):
""" Contents of docker setup that must be at end of Dockerfile. """
env_vars = "ENV LB_HOME=/mnt/labbook"
env_vars = f"{env_vars} LB_CODE=/mnt/labbook/code"
env_vars = f"{env_vars} LB_INPUT=/mnt/labbook/input"
env_vars = f"{env_vars} LB_OUTPUT=/mnt/labbook/output"
env_vars = f"{env_vars} PROJECT_ROOT=/mnt/labbook"
env_vars = f"{env_vars} PROJECT_CODE=/mnt/labbook/code"
env_vars = f"{env_vars} PROJECT_INPUT=/mnt/labbook/input"
env_vars = f"{env_vars} PROJECT_OUTPUT=/mnt/labbook/output"
return [
'## Entrypoint hooks',
env_vars,
"# Run Environment",
'ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]',
'WORKDIR /mnt/labbook',
'',
'# Use this command to make the container run indefinitely',
'CMD ["tail", "-f", "/dev/null"]',
'']
def assemble_dockerfile(self, write: bool = True) -> str:
"""Create the content of a Dockerfile per the fields in the indexed data.
Returns:
str - Content of Dockerfile in single string using os.linesep as line separator.
"""
assembly_pipeline = [self._extra_base_images,
self._load_baseimage,
self._load_packages,
self._load_docker_snippets,
self._post_image_hook,
self._entrypoint_hooks]
# flat map the results of executing the pipeline.
try:
docker_lines: List[str] = functools.reduce(lambda a, b: a + b, [f() for f in assembly_pipeline], [])
except KeyError as e:
logger.error('Component file missing key: {}'.format(e))
raise
except Exception as e:
logger.error(e)
raise
dockerfile_name = os.path.join(self.labbook.root_dir, ".gigantum", "env", "Dockerfile")
if write:
logger.info("Writing Dockerfile to {}".format(dockerfile_name))
with open(dockerfile_name, "w") as dockerfile:
dockerfile.write('\n'.join(docker_lines))
else:
logger.info("Dockerfile NOT being written; write=False; {}".format(dockerfile_name))
return os.linesep.join(docker_lines)
```
#### File: gtmcore/logging/timefunc.py
```python
import time
from .logger import LMLogger
logger = LMLogger.get_logger()
def timefunc(method):
""" Decorator to be used to time functions. The usage is:
@timefunc
def function(....)
in which `def function....` is the standard invocation.
This is a debuggined and logging utility only and should not
be left aroud in production code.
Used for performance instrumentation.
"""
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
logger.info('function: %r %2.2f ms' % (method.__name__, (te - ts) * 1000))
return result
return timed
```
#### File: workflows/tests/test_gitlab.py
```python
import pytest
import responses
from gtmcore.workflows.gitlab import GitLabManager, ProjectPermissions, GitLabException
@pytest.fixture()
def gitlab_mngr_fixture():
"""A pytest fixture that returns a GitLabRepositoryManager instance"""
yield GitLabManager("repo.gigantum.io", "usersrv.gigantum.io", "fakeaccesstoken")
@pytest.fixture()
def property_mocks_fixture():
"""A pytest fixture that returns a GitLabRepositoryManager instance"""
responses.add(responses.GET, 'https://usersrv.gigantum.io/key',
json={'key': 'afaketoken'}, status=200)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook',
json=[{
"id": 26,
"description": "",
}],
status=200)
yield
class TestGitLabManager(object):
@responses.activate
def test_user_token(self, gitlab_mngr_fixture):
"""test the user_token property"""
# Setup responses mock for this test
responses.add(responses.GET, 'https://usersrv.gigantum.io/key',
json={'key': 'afaketoken'}, status=200)
assert gitlab_mngr_fixture._gitlab_token is None
# Get token
token = gitlab_mngr_fixture.user_token
assert token == '<PASSWORD>'
assert gitlab_mngr_fixture._gitlab_token == '<PASSWORD>'
# Assert token is returned and set on second call and does not make a request
responses.add(responses.GET, 'https://usersrv.gigantum.io/key', status=400)
assert token == gitlab_mngr_fixture.user_token
@responses.activate
def test_user_token_error(self, gitlab_mngr_fixture):
"""test the user_token property"""
# Setup responses mock for this test
responses.add(responses.GET, 'https://usersrv.gigantum.io/key',
json={'message': 'it failed'}, status=400)
# Make sure error is raised when getting the key fails and returns !=200
with pytest.raises(GitLabException):
_ = gitlab_mngr_fixture.user_token
def test_repository_id(self):
"""test the repository_id property"""
assert GitLabManager.get_repository_id("tester", "test-lb-1") == "tester%2Ftest-lb-1"
@responses.activate
def test_exists_true(self, property_mocks_fixture, gitlab_mngr_fixture):
"""test the exists method for a repo that should exist"""
assert gitlab_mngr_fixture.repository_exists("testuser", "test-labbook") is True
@responses.activate
def test_exists_false(self, gitlab_mngr_fixture):
"""test the exists method for a repo that should not exist"""
responses.add(responses.GET, 'https://usersrv.gigantum.io/key',
json={'key': 'afaketoken'}, status=200)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fderp',
json=[{
"message": "404 Project Not Found"
}],
status=404)
assert gitlab_mngr_fixture.repository_exists("testuser", "derp") is False
@responses.activate
def test_create(self, gitlab_mngr_fixture, property_mocks_fixture):
"""test the create method"""
# Setup responses mock for this test
responses.add(responses.POST, 'https://repo.gigantum.io/api/v4/projects',
json={
"id": 27,
"description": "",
},
status=201)
responses.add(responses.POST, 'https://usersrv.gigantum.io/webhook/testuser/new-labbook',
json={
"success": True
},
status=201)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fnew-labbook',
json=[{
"message": "404 Project Not Found"
}],
status=404)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fnew-labbook',
json=[{
"id": 27,
"description": "",
}],
status=200)
gitlab_mngr_fixture.create_labbook("testuser", "new-labbook", visibility="private")
assert gitlab_mngr_fixture.repository_exists("testuser", "new-labbook") is True
@responses.activate
def test_create_errors(self, gitlab_mngr_fixture, property_mocks_fixture):
"""test the create method"""
# Should fail because the repo "already exists"
with pytest.raises(ValueError):
gitlab_mngr_fixture.create_labbook("testuser", "test-labbook", visibility="private")
# Should fail because the call to gitlab failed
responses.add(responses.POST, 'https://repo.gigantum.io/api/v4/projects',
json={
"id": 27,
"description": "",
},
status=400)
with pytest.raises(ValueError):
gitlab_mngr_fixture.create_labbook("testuser", "test-labbook", visibility="private")
@responses.activate
def test_get_collaborators(self, gitlab_mngr_fixture, property_mocks_fixture):
"""Test the get_collaborators method"""
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members',
json=[
{
"id": 29,
"name": "<NAME>",
"username": "janed",
"access_level": ProjectPermissions.OWNER.value,
"expires_at": None
},
{
"id": 30,
"name": "<NAME>",
"username": "jd",
"access_level": ProjectPermissions.READ_ONLY.value,
"expires_at": None
}
],
status=200)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members',
status=400)
collaborators = gitlab_mngr_fixture.get_collaborators("testuser", "test-labbook")
assert len(collaborators) == 2
assert collaborators[0] == (29, 'janed', ProjectPermissions.OWNER)
assert collaborators[1] == (30, 'jd', ProjectPermissions.READ_ONLY)
# Verify it fails on error to gitlab (should get second mock on second call)
with pytest.raises(ValueError):
gitlab_mngr_fixture.get_collaborators("testuser", "test-labbook")
@responses.activate
def test_add_collaborator(self, gitlab_mngr_fixture, property_mocks_fixture):
"""Test the add_collaborator method"""
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/users?username=person100',
json=[
{
"id": 100,
"name": "New Person",
"username": "person100",
"state": "active",
}
],
status=200)
responses.add(responses.POST, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members',
json={
"id": 100,
"name": "New Person",
"username": "person100",
"state": "active",
},
status=201)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members',
json=[
{
"id": 29,
"name": "<NAME>",
"username": "janed",
"access_level": 40,
"expires_at": None
},
{
"id": 100,
"name": "New Person",
"username": "person100",
"access_level": 30,
"expires_at": None
}
],
status=200)
gitlab_mngr_fixture.add_collaborator("testuser", "test-labbook", "person100",
ProjectPermissions.READ_WRITE)
collaborators = gitlab_mngr_fixture.get_collaborators("testuser", "test-labbook")
assert len(collaborators) == 2
assert collaborators[0] == (29, 'janed', ProjectPermissions.OWNER)
assert collaborators[1] == (100, 'person100', ProjectPermissions.READ_WRITE)
@responses.activate
def test_add_collaborator_errors(self, gitlab_mngr_fixture, property_mocks_fixture):
"""Test the add_collaborator method exception handling"""
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/users?username=person100',
json=[
{
"id": 100,
"name": "New Person",
"username": "person100",
"state": "active",
}
],
status=400)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/users?username=person100',
json=[
{
"id": 100,
"name": "New Person",
"username": "person100",
"state": "active",
}
],
status=201)
responses.add(responses.POST, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members',
json={
"id": 100,
"name": "New Person",
"username": "person100",
"state": "active",
},
status=400)
with pytest.raises(ValueError):
_ = gitlab_mngr_fixture.add_collaborator("testuser", "test-labbook", "person100", ProjectPermissions.OWNER)
with pytest.raises(ValueError):
_ = gitlab_mngr_fixture.add_collaborator("testuser", "test-labbook", "person100", ProjectPermissions.READ_ONLY)
@responses.activate
def test_delete_collaborator(self, gitlab_mngr_fixture, property_mocks_fixture):
"""Test the delete_collaborator method"""
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/users?username=person100',
json=[
{
"id": 100,
"name": "New Person",
"username": "person100",
"state": "active",
}
],
status=200)
responses.add(responses.DELETE, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members/100',
status=204)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members',
json=[
{
"id": 29,
"name": "<NAME>",
"username": "janed",
"access_level": 40,
"expires_at": None
}
],
status=200)
gitlab_mngr_fixture.delete_collaborator("testuser", "test-labbook", 'person100')
collaborators = gitlab_mngr_fixture.get_collaborators("testuser", "test-labbook")
assert len(collaborators) == 1
assert collaborators[0] == (29, 'janed', ProjectPermissions.OWNER)
@responses.activate
def test_delete_collaborator_error(self, gitlab_mngr_fixture, property_mocks_fixture):
"""Test the delete_collaborator method exception handling"""
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/users?username=person100',
json=[
{
"id": 100,
"name": "<NAME>",
"username": "person100",
"state": "active",
}
],
status=200)
responses.add(responses.DELETE, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members/100',
status=204)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members',
json=[
{
"id": 29,
"name": "<NAME>",
"username": "janed",
"access_level": 40,
"expires_at": None
}
],
status=400)
# What is this test even for?
# gitlab_mngr_fixture.delete_collaborator("testuser", "test-labbook", 'person100')
# with pytest.raises(TestGitLabManager):
# gitlab_mngr_fixture.delete_collaborator("testuser", "test-labbook", 'person100')
@responses.activate
def test_error_on_missing_repo(self, gitlab_mngr_fixture):
"""Test the exception handling on a repo when it doesn't exist"""
responses.add(responses.GET, 'https://usersrv.gigantum.io/key',
json={'key': 'afaketoken'}, status=200)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook',
json=[{
"message": "404 Project Not Found"
}],
status=404)
with pytest.raises(ValueError):
gitlab_mngr_fixture.get_collaborators("testuser", "test-labbook")
with pytest.raises(ValueError):
gitlab_mngr_fixture.add_collaborator("testuser", "test-labbook", "test", ProjectPermissions.READ_ONLY)
with pytest.raises(ValueError):
gitlab_mngr_fixture.delete_collaborator("testuser", "test-labbook", 100)
@responses.activate
def test_configure_git_credentials(self, gitlab_mngr_fixture):
"""test the configure_git_credentials method"""
host = "test.gigantum.io"
username = "testuser"
# Setup responses mock for this test
responses.add(responses.GET, 'https://usersrv.gigantum.io/key',
json={'key': 'afaketoken'}, status=200)
# Check that creds are empty
token = gitlab_mngr_fixture._check_if_git_credentials_configured(host, username)
assert token is None
# Set creds
gitlab_mngr_fixture.configure_git_credentials(host, username)
# Check that creds are configured
token = gitlab_mngr_fixture._check_if_git_credentials_configured(host, username)
assert token == "<PASSWORD>"
# Set creds
gitlab_mngr_fixture.clear_git_credentials(host)
# Check that creds are configured
token = gitlab_mngr_fixture._check_if_git_credentials_configured(host, username)
assert token is None
@responses.activate
def test_delete(self, gitlab_mngr_fixture, property_mocks_fixture):
"""test the create method"""
# Setup responses mock for this test
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fnew-labbook',
json=[{
"id": 27,
"description": "",
}],
status=200)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fnew-labbook',
json=[{
"id": 27,
"description": "",
}],
status=200)
responses.add(responses.DELETE, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fnew-labbook',
json={
"message": "202 Accepted"
},
status=202)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fnew-labbook',
json=[{
"message": "404 Project Not Found"
}],
status=404)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fnew-labbook',
json=[{
"message": "404 Project Not Found"
}],
status=404)
responses.add(responses.DELETE, 'https://usersrv.gigantum.io/webhook/testuser/new-labbook',
json={},
status=204)
assert gitlab_mngr_fixture.repository_exists("testuser", "new-labbook") is True
gitlab_mngr_fixture.remove_repository("testuser", "new-labbook")
assert gitlab_mngr_fixture.repository_exists("testuser", "new-labbook") is False
with pytest.raises(ValueError):
gitlab_mngr_fixture.remove_repository("testuser", "new-labbook")
```
#### File: gigantum-client/testing/Test_02_ImportExport.py
```python
import time
import uuid
import pprint
import json
from colors import color
from misc import (gqlquery as run_query, endpt_post, USERNAME,
make_random_file, container_under_test, drop_file,
cleanup_random_files)
createLabbookQuery = '''
mutation CreateLabbook($name: String!) {
createLabbook(input: {
name: $name,
description: "Created via test harness",
repository: "gigantum_environment-components",
componentId: "python3-minimal",
revision: 4
}) {
labbook {
id
name
}
}
}
'''
labbookQuery = '''
query GetLabbook($owner: String!, $name: String!) {
labbook(name: $name, owner: $owner) {
id
owner
name
description
sizeBytes
backgroundJobs {
jobKey
status
result
jobMetadata
failureMessage
}
}
}
'''
exportLabbookQuery = '''
mutation export($owner: String!, $name: String!) {
exportLabbook(input: {
owner: $owner,
labbookName: $name
}) {
jobKey
}
}
'''
def export_labbook(endpoint, variables) -> float:
# Publish labbook mutation
v = variables
d = run_query(endpoint, 'Export Labbook', exportLabbookQuery, v)
job_key = d['data']['publishLabbook']['jobKey']
waiting = True
t0 = time.time()
while waiting:
d = run_query(endpoint, 'Query Export Status', labbookQuery,
variables)
bgjobs = d['data']['labbook']['backgroundJobs']
for j in bgjobs:
md = json.loads(j['jobMetadata'])
if md.get('method') == 'export_labbook':
if j['status'] in ['failed', 'finished']:
tfin = time.time()
pub_time = tfin-t0
print(f'Exported project {d["data"]["labbook"]["owner"]}'
f'/{d["data"]["labbook"]["name"]} '
f'(size {d["data"]["labbook"]["sizeBytes"]}b) '
f'in {pub_time:.2f}s')
waiting = False
return pub_time
time.sleep(1)
def check_limit(desc, time_allowed, time_executed):
if time_executed > time_allowed:
failt = color('OVERTIME', 'orange')
print(f'[{failt}] {desc} (max {time_allowed:.2f}s; took {time_executed:.2f}s)')
else:
passt = color('PASS', 'green')
print(f'[{passt}] {desc} (max {time_allowed:.2f}s; took {time_executed:.2f}s)')
if __name__ == '__main__':
lbname = f'cli-{uuid.uuid4().hex[:4]}'
print(f'Using labbook name: {lbname}')
endpoint = endpt_post
container_id = container_under_test()
run_query(endpoint, 'Create Labbook', createLabbookQuery,
{'name': lbname})
drop_file(container_id, make_random_file(1000000), USERNAME, USERNAME, lbname, 'code')
drop_file(container_id, make_random_file(1000000), USERNAME, USERNAME, lbname, 'input')
t = export_labbook(endpoint, variables={'name': lbname, 'owner': USERNAME})
check_limit("Export 2MB LB", 5.0, t)
print(f'## Export {lbname} (50 MB file in code and input)')
drop_file(container_id, make_random_file(50000000), USERNAME, USERNAME, lbname, 'code')
drop_file(container_id, make_random_file(50000000), USERNAME, USERNAME, lbname, 'input')
t = export_labbook(endpoint, variables={'name': lbname, 'owner': USERNAME})
check_limit("Export 100MB LB", 15.0, t)
print(f'## Export {lbname} (100MB file in code and input)')
drop_file(container_id, make_random_file(1000000000), USERNAME, USERNAME, lbname, 'code')
drop_file(container_id, make_random_file(1000000000), USERNAME, USERNAME, lbname, 'input')
t = export_labbook(endpoint, variables={'name': lbname, 'owner': USERNAME})
check_limit("Export 2GB LB", 40.0, t)
cleanup_random_files()
``` |
{
"source": "jjw-DL/mmdetection3d_Noted",
"score": 3
} |
#### File: core/utils/gaussian.py
```python
import numpy as np
import torch
def gaussian_2d(shape, sigma=1):
"""Generate gaussian map.
Args:
shape (list[int]): Shape of the map.
sigma (float): Sigma to generate gaussian map.
Defaults to 1.
Returns:
np.ndarray: Generated gaussian map.
"""
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m + 1, -n:n + 1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_heatmap_gaussian(heatmap, center, radius, k=1):
"""Get gaussian masked heatmap.
Args:
heatmap (torch.Tensor): Heatmap to be masked.
center (torch.Tensor): Center coord of the heatmap.
radius (int): Radius of gausian.
K (int): Multiple of masked_gaussian. Defaults to 1.
Returns:
torch.Tensor: Masked heatmap.
"""
diameter = 2 * radius + 1 # eg:radius=2
gaussian = gaussian_2d((diameter, diameter), sigma=diameter / 6) # 根据半径生成gaussian map eg:(5,5)
x, y = int(center[0]), int(center[1]) # 获取中心坐标
height, width = heatmap.shape[0:2] # 获取heatmap的高和宽
# 对gaussian map上下左右进行截断防止越界
left, right = min(x, radius), min(width - x, radius + 1) # eg:2 和 3
top, bottom = min(y, radius), min(height - y, radius + 1)# eg:2 和 3
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] # 截取heatmap的mask
masked_gaussian = torch.from_numpy(
gaussian[radius - top:radius + bottom,
radius - left:radius + right]).to(heatmap.device,
torch.float32) # 截取gaussian map并转化为tensor 0 - 5
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0:
torch.max(masked_heatmap, masked_gaussian * k, out=masked_heatmap) # 将gaussian赋值到mask对应位置,masked_heatmap与heatmap在内存上是一样的
return heatmap
def gaussian_radius(det_size, min_overlap=0.5):
"""Get radius of gaussian.
Args:
det_size (tuple[torch.Tensor]): Size of the detection result.
min_overlap (float): Gaussian_overlap. Defaults to 0.5.
Returns:
torch.Tensor: Computed radius.
"""
height, width = det_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = torch.sqrt(b1**2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = torch.sqrt(b2**2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = torch.sqrt(b3**2 - 4 * a3 * c3)
r3 = (b3 + sq3) / 2
return min(r1, r2, r3)
``` |
{
"source": "jjwhitham/latitude_challenge",
"score": 3
} |
#### File: latitude_challenge/delimited_writer/fixed_width_writer.py
```python
import random
import os
# For generating cp1252 characters
from encodings.cp1252 import decoding_table
class MockFixedWidthFileWriter:
""" A helper class to generate mock fixed width data for
testing delimited_writer.py
"""
def __init__(self, encoding_props, fixed_width_newline):
self.encoding_props = encoding_props
self.fixed_width_newline = fixed_width_newline
def generate_fixed_width_data(self):
""" Using the decoding table for cp1252, picks a random character for each column.
Fills the column with a random multiple of this character, between 1 and the
column length, padding to the right with spaces.
"""
data = []
# Generate 10 lines of data
num_lines = 10
for line in range(num_lines):
line_data = []
offsets = self.encoding_props.offsets
for offset in offsets:
repeats = random.randint(1, offset)
column_data = ""
# Choose a random char from decoding table, skipping the
# first 33 chars, as they are hardware control chars/non-visible
rand_char = random.randint(33, 255)
# Replace control char (127), undefined chars ('\ufffe')
# & no-break space (160) with "!"
if rand_char in [127, 129, 141, 143, 144, 157, 160]:
rand_char = 33
column_data += decoding_table[rand_char] * repeats
line_data.append(column_data.ljust(offset))
data.append(line_data)
return data
def write_fixed_width_file(self, fixed_width_data):
""" Writes fixed width file with specified newline=fixed_width_newline.
f.write("\n") to achieve this in a platform-independent way,
as per Python docs:
https://docs.python.org/3/library/os.html#os.linesep
"""
fixed_width_filename = self.encoding_props.fixed_width_filename
fixed_width_newline = self.fixed_width_newline
fixed_width_encoding = self.encoding_props.fixed_width_encoding
with open(
fixed_width_filename,
"w",
encoding=fixed_width_encoding,
newline=fixed_width_newline,
) as f:
for line in fixed_width_data:
for field in line:
f.write(field)
f.write("\n")
``` |
{
"source": "jjwilke/sst-macro",
"score": 3
} |
#### File: bin/tools/ref.py
```python
import os
files = """
"""
def check_files(fxn):
configStatus = ""
try:
configStatus = open("../config.status").read()
except:
sys.exit("could not find valid config.status file")
import re
match = re.compile("srcdir=(.*)").search(configStatus)
if not match:
sys.exit("could not located srcdir in config.status")
srcdir = match.groups()[0].strip().strip('"').strip("'")
refdir = os.path.join(srcdir, "tests", "reference")
for f in files.strip().splitlines():
path = f.strip().split()[0]
fname = os.path.split(path)[-1].replace("chk","tmp")
ref = os.path.join(refdir, fname.replace("tmp","ref"))
fxn(fname, ref)
``` |
{
"source": "JJWilliams27/Cricket_RainAffectedMatchAnalysis",
"score": 3
} |
#### File: JJWilliams27/Cricket_RainAffectedMatchAnalysis/scrape_cricinfo.py
```python
import pandas as pd
import pdb
import requests
from espncricinfo.player import Player
from espncricinfo.match import Match
from espncricinfo.series import Series
# Get cricket world cup
#cwc19 = Series('8039')
# Get all matches
#matches = Series.get_events_for_season(cwc19,2019)
# Construct original table
#groupstage = pd.read_csv('cwc19_final_table.csv')
# Get County Championship
#cchamp1 = Series('8052')
#cchamp2 = Series('8204')
# Get county champsionship seasons
print('Getting Match IDs')
#matches = Series.get_events_for_season(cchamp1,2018)
#matches.append(Series.get_events_for_season(cchamp2,2018))
m=Match(1166949)
i1,i2,i3,i4 = m.getbattingdataframe()
pdb.set_trace()
req = requests.get(m.json_url)
url = m.espn_api_url
def get_json(url):
r = requests.get(url)
if r.status_code == 404:
raise "Not Found"
else:
return r.json()
test = get_json(url)
#notes
#gameInfo
#debuts
#rosters
#matchcards
#news
#article
#videos
#leaders
#header
pdb.set_trace()
``` |
{
"source": "JJWilliams27/Reddit_NLP",
"score": 3
} |
#### File: JJWilliams27/Reddit_NLP/plot_time_series_subplot.py
```python
import os
import pandas as pd
import numpy as np
import datetime as dt
import csv
import matplotlib.pyplot as plt
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# All Posts
# Read CSV
df = pd.read_csv('ClimateSkepticsAllPosts.csv',index_col=0,parse_dates=True)
def get_yearmonth(timestamp):
month = timestamp[5:7]
year = timestamp[:4]
monthyear = str(year) + '/' + str(month)
return monthyear
df['YearMonth'] = df['timestamp'].apply(lambda x: get_yearmonth(x)) # Bin Data in Months
df_grp = df.groupby('YearMonth')
Y_M = []
Num_Posts = []
for i, grp in df_grp:
grplen = len(grp)
Num_Posts.append(grplen) # Get Number of Posts per Month
Y_M.append(i)
# New DateFrame
dateTime = pd.DataFrame()
dateTime['YearMonth'] = Y_M
dateTime['Posts'] = Num_Posts
dateTime.to_csv('ClimateSkeptics_Posts_per_Month.csv')
datetime_list = []
for i in list(range(0,len(dateTime))):
month = int(dateTime['YearMonth'][i][5:7])
year = int(dateTime['YearMonth'][i][0:4])
datetime_list.append(dt.date(year,month,1))
dateTime['Date'] = datetime_list
# All Submissions
# Read CSV
path=os.getcwd()
fullpath=path+'/Outputs/CS_FULL/LDA_Dataframes/topic_timeseries_10.csv'
df = pd.read_csv(fullpath,index_col=0,parse_dates=True)
df['YearMonth'] = df['timestamp'].apply(lambda x: get_yearmonth(x)) # Bin Data in Months
df_grp = df.groupby('YearMonth')
Y_M = []
Num_Posts = []
for i, grp in df_grp:
grplen = len(grp)
Num_Posts.append(grplen) # Get Number of Posts per Month
Y_M.append(i)
# New DateFrame
dateTime2 = pd.DataFrame()
dateTime2['YearMonth'] = Y_M
dateTime2['Posts'] = Num_Posts
dateTime2.to_csv('ClimateSkeptics_Submissions_per_Month.csv')
datetime_list = []
for i in list(range(0,len(dateTime2))):
month = int(dateTime2['YearMonth'][i][5:7])
year = int(dateTime2['YearMonth'][i][0:4])
datetime_list.append(dt.date(year,month,1))
dateTime2['Date'] = datetime_list
# Get Subscribers
subs = pd.read_csv('climateskeptics_subscribers.csv')
subs.columns = ['timestamp','Subscribers']
datetime_list = []
for i in list(range(0,len(subs))):
day = subs['timestamp'][i][:2]
month = subs['timestamp'][i][3:5]
year = subs['timestamp'][i][6:10]
datetime_list.append(dt.date(int(year),int(month),int(day)))
subs['Date'] = datetime_list
# NOW DO SPECIFIC SEARCHES
# CLIMATEGATE
cgate_posts = pd.read_csv('CS_CGate_posts.csv')
cgate_posts = cgate_posts.drop(['title','url','comms_num'],axis=1)
cgate_coms = pd.read_csv('CS_CGate_comments.csv')
cgate_df = pd.concat([cgate_posts,cgate_coms])
cgate_df['YearMonth'] = cgate_df['timestamp'].apply(lambda x: get_yearmonth(x)) # Bin Data in Months
cgate_df.drop_duplicates(subset ="id", keep = 'first', inplace = True) # Remove duplicates based on ID
cgate_df_grp = cgate_df.groupby('YearMonth')
Y_M = []
Num_Posts = []
for i, grp in cgate_df_grp:
grplen = len(grp)
Num_Posts.append(grplen) # Get Number of Posts per Month
Y_M.append(i)
# New DateFrame
CG_dateTime = pd.DataFrame()
CG_dateTime['YearMonth'] = Y_M
CG_dateTime['Posts'] = Num_Posts
datetime_list = []
for i in list(range(0,len(CG_dateTime))):
month = int(CG_dateTime['YearMonth'][i][5:7])
year = int(CG_dateTime['YearMonth'][i][0:4])
datetime_list.append(dt.date(year,month,1))
CG_dateTime['Date'] = datetime_list
# IPCC AR4
ipcc_posts = pd.read_csv('CS_IPCC_posts.csv')
ipcc_posts = ipcc_posts.drop(['title','url','comms_num'],axis=1)
ipcc_coms = pd.read_csv('CS_IPCC_comments.csv')
ipcc_df = pd.concat([ipcc_posts,ipcc_coms])
ipcc_df['YearMonth'] = ipcc_df['timestamp'].apply(lambda x: get_yearmonth(x)) # Bin Data in Months
ipcc_df.drop_duplicates(subset ="id", keep = 'first', inplace = True) # Remove duplicates based on ID
ipcc_df_grp = ipcc_df.groupby('YearMonth')
Y_M = []
Num_Posts = []
for i, grp in ipcc_df_grp:
grplen = len(grp)
Num_Posts.append(grplen) # Get Number of Posts per Month
Y_M.append(i)
# New DateFrame
IPCC_dateTime = pd.DataFrame()
IPCC_dateTime['YearMonth'] = Y_M
IPCC_dateTime['Posts'] = Num_Posts
datetime_list = []
for i in list(range(0,len(IPCC_dateTime))):
month = int(IPCC_dateTime['YearMonth'][i][5:7])
year = int(IPCC_dateTime['YearMonth'][i][0:4])
datetime_list.append(dt.date(year,month,1))
IPCC_dateTime['Date'] = datetime_list
# Paris COP21
cop21_posts = pd.read_csv('CS_COP21_posts.csv')
cop21_posts = cop21_posts.drop(['title','url','comms_num'],axis=1)
cop21_coms = pd.read_csv('CS_COP21_comments.csv')
cop21_df = pd.concat([cop21_posts,cop21_coms])
cop21_df.drop_duplicates(subset ="id", keep = 'first', inplace = True) # Remove duplicates based on ID
cop21_df['YearMonth'] = cop21_df['timestamp'].apply(lambda x: get_yearmonth(x)) # Bin Data in Months
cop21_df_grp = cop21_df.groupby('YearMonth')
Y_M = []
Num_Posts = []
for i, grp in cop21_df_grp:
grplen = len(grp)
Num_Posts.append(grplen) # Get Number of Posts per Month
Y_M.append(i)
# New DateFrame
COP21_dateTime = pd.DataFrame()
COP21_dateTime['YearMonth'] = Y_M
COP21_dateTime['Posts'] = Num_Posts
datetime_list = []
for i in list(range(0,len(COP21_dateTime))):
month = int(COP21_dateTime['YearMonth'][i][5:7])
year = int(COP21_dateTime['YearMonth'][i][0:4])
datetime_list.append(dt.date(year,month,1))
COP21_dateTime['Date'] = datetime_list
# Cooling/Snow/Freeze/Cold
cold_posts = pd.read_csv('CS_cooling_posts.csv')
cold_posts = cold_posts.drop(['title','url','comms_num'],axis=1)
cold_coms = pd.read_csv('CS_cooling_comments.csv')
cold_df = pd.concat([cold_posts,cold_coms])
cold_df.drop_duplicates(subset ="id", keep = 'first', inplace = True) # Remove duplicates based on ID
cold_df['YearMonth'] = cold_df['timestamp'].apply(lambda x: get_yearmonth(x)) # Bin Data in Months
cold_df_grp = cold_df.groupby('YearMonth')
Y_M = []
Num_Posts = []
for i, grp in cold_df_grp:
grplen = len(grp)
Num_Posts.append(grplen) # Get Number of Posts per Month
Y_M.append(i)
# New DateFrame
cold_dateTime = pd.DataFrame()
cold_dateTime['YearMonth'] = Y_M
cold_dateTime['Posts'] = Num_Posts
datetime_list = []
for i in list(range(0,len(cold_dateTime))):
month = int(cold_dateTime['YearMonth'][i][5:7])
year = int(cold_dateTime['YearMonth'][i][0:4])
datetime_list.append(dt.date(year,month,1))
cold_dateTime['Date'] = datetime_list
# Plot
fig = plt.figure()
ax = plt.subplot(221)
ax.text(0.02, 0.95, 'A', transform=ax.transAxes,fontsize=16, fontweight='bold', va='top')
p1 = dateTime.plot(x='Date',y='Posts',linewidth=3,legend=False,fontsize=10,color='coral',ax=ax)
p1.set_ylabel("Number of Posts", fontsize=14)
p1.set_xlabel("Date", fontsize=14)
ax2 = plt.subplot(222)
ax2.text(0.02, 0.95, 'B', transform=ax2.transAxes,fontsize=16, fontweight='bold', va='top')
p2 = dateTime2.plot(x='Date',y='Posts',linewidth=3,legend=False,fontsize=10,color='crimson',ax=ax2)
p2.set_ylabel("Number of Submissions", fontsize=14)
p2.set_xlabel("Date", fontsize=14)
ax3 = plt.subplot(223)
ax3.text(0.02, 0.95, 'C', transform=ax3.transAxes,fontsize=16, fontweight='bold', va='top')
p3 = subs.plot(x='Date',y='Subscribers',linewidth=3,legend=False,fontsize=10,color='lightskyblue',ax=ax3)
p3.set_xlim(min(dateTime['Date']), max(dateTime['Date']))
p3.set_xlabel("Date",fontsize=14)
p3.set_ylabel("Subscribers",fontsize=14)
ax4 = plt.subplot(224)
ax4.text(0.02, 0.95, 'D', transform=ax4.transAxes,fontsize=16, fontweight='bold', va='top')
p4 = CG_dateTime.plot(x='Date',y='Posts',legend=False,fontsize=10,color='blue',ax=ax4,label='"Climategate"')
IPCC_dateTime.plot(x='Date',y='Posts',legend=False,fontsize=10,color='red',ax=ax4,label='"IPCC"')
COP21_dateTime.plot(x='Date',y='Posts',legend=False,fontsize=10,color='magenta',ax=ax4,label='"Paris"')
cold_dateTime.plot(x='Date',y='Posts',legend=False,fontsize=10,color='cyan',ax=ax4,label='"Cooling"')
ax4.set_xlabel("Date", fontsize=14)
ax4.set_ylabel("Number of Submissions", fontsize=14)
ax4.legend(loc='upper right')
#fig.tight_layout()
plt.show()
```
#### File: JJWilliams27/Reddit_NLP/scrape_reddit.py
```python
import praw
from psaw import PushshiftAPI
import pandas as pd
import datetime as dt
import os
# Options
save_posts = 1
save_comments = 1
get_top_submissions = 0
get_all_submissions = 1
get_comments_for_timeseries = 0
get_submissions_for_timeseries = 0
# All Posts
start_epoch = int(dt.datetime(2008, 1, 1).timestamp()) # Set start point for post extraction
number_of_submissions = None # Set number of posts (None = all posts)
# Create Functions
def get_date(created):
return dt.datetime.fromtimestamp(created)
# Set up Reddit API
reddit = praw.Reddit(client_id='INSERT_CLIENT_ID_HERE', \
client_secret='INSERT_CLIENT_SECRET_HERE', \
user_agent='INSERT_USER_AGENT_HERE', \
username='INSERT_USERNAME_HERE', \
password='<PASSWORD>')
api = PushshiftAPI(reddit) # Use Pushshift API to get around 1000 submission limit imposed by praw
# Access Climate Skepticism Subreddit
subreddit = reddit.subreddit('ClimateSkeptics')
# Loop through top submissions and append to output dataframe
if get_top_submissions == 1:
# Create Output Dictionary
topics_dict = { "title":[], \
"score":[], \
"id":[], "url":[], \
"comms_num": [], \
"created": [], \
"body":[]}
# Access Top x posts
print("Retrieving Submissions")
top_subreddit = subreddit.top(limit=500)
print("Appending Submissions to Dataframe")
count = 0
for submission in top_subreddit:
print(count)
path = os.getcwd()
conversedict = {}
dirname = path + '/Comments'
if not os.path.exists(dirname):
os.mkdir(dirname)
outname = dirname + '/' + submission.id + '.csv'
# Remove limit on comment extraction
submission.comments.replace_more(limit=None)
topics_dict["title"].append(submission.title)
topics_dict["score"].append(submission.score)
topics_dict["id"].append(submission.id)
topics_dict["url"].append(submission.url)
topics_dict["comms_num"].append(submission.num_comments)
topics_dict["created"].append(submission.created)
topics_dict["body"].append(submission.selftext)
temp_array = []
for comment in submission.comments.list():
temp_array.append(comment)
if comment.id not in conversedict:
comment.created = get_date(comment.created)
conversedict[comment.id] = [comment.body,comment.ups,comment.created,{}] # Original = [comment.body,{}]
if comment.parent() != submission.id:
parent = str(comment.parent())
conversedict[parent][3][comment.id] = [comment.ups, comment.body, comment.created]
#conversedict[comment.id] = [comment.body,{}]
#if comment.parent() != submission.id:
# parent = str(comment.parent())
# pdb.set_trace()
# conversedict[parent][1][comment.id] = [comment.ups, comment.body]
converse_df = pd.DataFrame(conversedict)
count = count+1
if save_comments == 1:
converse_df.to_csv('%s' %(outname), index=False)
# Convert Dictionary to Pandas Dataframe
print("Creating Dataframe")
topics_data = pd.DataFrame(topics_dict)
# Convert Date to Timestamp
_timestamp = topics_data["created"].apply(get_date)
topics_data = topics_data.assign(timestamp = _timestamp)
# Export as CSV
if save_posts == 1:
print("Saving as csv")
topics_data.to_csv('%sTop500Posts_Test.csv' %(subreddit), index=False)
if get_all_submissions == 1:
years=[2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019]
total_posts = []
for year in years:
print('Getting Submissions for %s' %(year))
start_epoch = int(dt.datetime(year, 1, 1).timestamp()) # Set start point for post extraction
end_epoch = int(dt.datetime(year,12,31).timestamp()) # Set end point
# Create Output Dictionary
topics_dict = { "title":[], \
"score":[], \
"id":[], "url":[], \
"comms_num": [], \
"created": [], \
"body":[]}
# Access Top x posts
print("Retrieving Submissions")
all_subreddit = list(api.search_submissions(before=end_epoch,after=start_epoch,subreddit=subreddit,filter=['url','author','title','subreddit'],limit=number_of_submissions))
total_posts.append(len(all_subreddit))
print("Appending Submissions to Dataframe")
count = 1
num = len(all_subreddit)
for submission in all_subreddit:
print(str(count) + '/' + str(num))
path = os.getcwd()
dirname = path + '/Comments'
conversedict = {}
if not os.path.exists(dirname):
os.mkdir(dirname)
outname = dirname + '/' + submission.id + '.csv'
# Remove limit on comment extraction
topics_dict["title"].append(submission.title)
topics_dict["score"].append(submission.score)
topics_dict["id"].append(submission.id)
topics_dict["url"].append(submission.url)
topics_dict["comms_num"].append(submission.num_comments)
topics_dict["created"].append(submission.created)
topics_dict["body"].append(submission.selftext)
temp_array = []
for comment in submission.comments.list():
temp_array.append(comment)
if comment.id not in conversedict:
try:
conversedict[comment.id] = [comment.body,comment.ups,comment.created,{}] # Original = [comment.body,{}]
if comment.parent() != submission.id:
parent = str(comment.parent())
conversedict[parent][3][comment.id] = [comment.ups, comment.body, comment.created]
#conversedict[comment.id] = [comment.body,{}]
#if comment.parent() != submission.id:
# parent = str(comment.parent())
# pdb.set_trace()
# conversedict[parent][1][comment.id] = [comment.ups, comment.body]
except:
pass # Skip if no comments
converse_df = pd.DataFrame(conversedict)
count = count+1
if save_comments == 1:
converse_df.to_csv('%s' %(outname), index=False)
# Convert Dictionary to Pandas Dataframe
print("Creating Dataframe")
topics_data = pd.DataFrame(topics_dict)
# Convert Date to Timestamp
_timestamp = topics_data["created"].apply(get_date)
topics_data = topics_data.assign(timestamp = _timestamp)
if save_posts == 1:
print("Saving as csv")
topics_data.to_csv('%sAllPosts' %(subreddit) + str(year) + '.csv', index=False)
if get_comments_for_timeseries == 1:
# Create Output Dictionary
topics_dict = { "created":[], \
"score":[], \
"id":[], \
"body": []}
searches = ['IPCC','AR4','AR5'] # Kirilenko et al 2015 use climate change and global warming as search terms
for search in searches:
# Access Top x posts
print("Retrieving Submissions")
all_subreddit_comments = list(api.search_comments(q=search,after=start_epoch,subreddit=subreddit,filter=['url','author','title','subreddit'],limit=number_of_submissions))
print("Appending Comments to Dataframe")
count = 0
num = len(all_subreddit_comments)
for submission in all_subreddit_comments:
print(str(count) + '/' + str(num))
path = os.getcwd()
dirname = path + '/Comments'
if not os.path.exists(dirname):
os.mkdir(dirname)
outname = dirname + '/' + submission.id + '.csv'
# Remove limit on comment extraction
topics_dict["created"].append(submission.created)
topics_dict["score"].append(submission.score)
topics_dict["id"].append(submission.id)
topics_dict["body"].append(submission.body)
count = count+1
# Convert Dictionary to Pandas Dataframe
print("Creating Dataframe")
topics_data = pd.DataFrame(topics_dict)
# Convert Date to Timestamp
_timestamp = topics_data["created"].apply(get_date)
topics_data = topics_data.assign(timestamp = _timestamp)
# Export as CSV
if save_posts == 1:
print("Saving as csv")
topics_data.to_csv('%s_IPCC_Comments.csv' %(subreddit), index=False)
if get_submissions_for_timeseries == 1:
# Create Output Dictionary
topics_dict = { "created":[], \
"score":[], \
"id":[], "url":[], \
"comms_num": [], \
"title": [], \
"body":[]}
searches = ['IPCC','AR4','AR5'] # Kirilenko et al 2015 use climate change and global warming as search terms
for search in searches:
# Access Top x posts
print("Retrieving Submissions")
all_subreddit = list(api.search_submissions(q=search,after=start_epoch,subreddit=subreddit,filter=['url','author','title','subreddit'],limit=number_of_submissions))
print("Appending Submissions to Dataframe")
count = 0
num = len(all_subreddit)
for submission in all_subreddit:
print(str(count) + '/' + str(num))
path = os.getcwd()
dirname = path + '/Comments'
if not os.path.exists(dirname):
os.mkdir(dirname)
outname = dirname + '/' + submission.id + '.csv'
# Remove limit on comment extraction
topics_dict["created"].append(submission.created)
topics_dict["title"].append(submission.title)
topics_dict["score"].append(submission.score)
topics_dict["id"].append(submission.id)
topics_dict["url"].append(submission.url)
topics_dict["comms_num"].append(submission.num_comments)
topics_dict["body"].append(submission.selftext)
count = count+1
# Convert Dictionary to Pandas Dataframe
print("Creating Dataframe")
topics_data = pd.DataFrame(topics_dict)
# Convert Date to Timestamp
_timestamp = topics_data["created"].apply(get_date)
topics_data = topics_data.assign(timestamp = _timestamp)
# Export as CSV
if save_posts == 1:
print("Saving as csv")
topics_data.to_csv('%s_IPCC_Posts.csv' %(subreddit), index=False)
``` |
{
"source": "jjwithanr/todocal",
"score": 3
} |
#### File: jjwithanr/todocal/todocal.py
```python
import tkinter as tk
from tkinter import ttk, messagebox, font
from datetime import *
import sqlite3 as sq
import GoogleCal
class Scheduler(tk.Frame):
def __init__(self, root, task):
tk.Frame.__init__(self, root)
self.root = root
self.task = task
self.selected = []
self._draw()
def show_tasks(self):
self.Tasks.delete(0,'end')
for i in self.task: self.Tasks.insert('end', i)
def get_selected(self):
self.selected = [self.task[i] for i in self.Tasks.curselection()]
self.selectWindow.destroy()
self.schedule_event()
def open_task_selection(self) -> list:
# Create new window
self.selectWindow = tk.Toplevel(self.root)
self.selectWindow.title("Choose tasks to schedule")
self.selectWindow.geometry("400x300")
icon = tk.PhotoImage(file="todo-icon.png")
self.selectWindow.iconphoto(False, icon)
# Window widgets
scrollbar = tk.Scrollbar(self.selectWindow)
scrollbar.pack(side="right", fill = "both")
self.Tasks = tk.Listbox(self.selectWindow, height=11, width=30, font=font.Font(size=15), selectmode=tk.EXTENDED)
self.Tasks.pack(fill=tk.BOTH, expand=True)
self.Tasks.config(yscrollcommand = scrollbar.set)
scrollbar.config(command=self.Tasks.yview)
# ! add warning for no selected tasks?
self.close_bio_btn = tk.Button(self.selectWindow, text="Confirm", width=25, command=self.get_selected)
self.close_bio_btn.pack(side=tk.TOP, pady=5)
self.show_tasks()
return self.selected
def schedule_event(self):
# ? move spinbox to selectedWindow?
# ? add footer saying what Google account signed into.
# ! sign-in button and schedule button?
def getAvailability() -> dict:
# Get time from spinboxes and convert to string
s = self.start_hour.get() + ":" + self.start_min.get() + self.start_clock12hr.get()
e = self.end_hour.get() + ":" + self.end_min.get() + self.end_clock12hr.get()
if not (s and e):
messagebox.showerror("Error", "Invalid time range")
return {}
# Convert string into datetime time objects
start = datetime.strptime(s, "%I:%M%p").time()
end = datetime.strptime(e, "%I:%M%p").time()
if start >= end:
messagebox.showerror("Error","Invalid time range")
return {1:1}
else:
# * insert setting for time duration here
return GoogleCal.schedule_time(start, end, time_duaration=7)
if not self.task:
# Must have a task to schedule task
messagebox.showinfo('Cannot schedule', 'Add a task')
else:
# Check busyness and find available date
scheduled_times = getAvailability()
if scheduled_times == {1:1}: return 0
elif scheduled_times:
# All tasks are in the description
# NOTE: can use settings for single select task scheduling? this is default ...
GoogleCal.create_event(scheduled_times["start"], scheduled_times["end"], description="\n".join(self.selected) if self.selected else "\n".join(self.task))
started = scheduled_times["start"].strftime("%I:%M%p")
ended = scheduled_times["end"].strftime("%I:%M%p")
date = scheduled_times["end"].strftime("%b %d")
messagebox.showinfo("Sucess", "Created event from " + started + " to " + ended + " on " + date)
else:
messagebox.showinfo("Failure", "You are unavailable during this time")
def _draw(self):
scheduleFrame = tk.Frame(master=self.root)
scheduleFrame.pack(fill=tk.BOTH, side=tk.BOTTOM, expand=False)
calendarLabel = ttk.Label(scheduleFrame, text='Schedule Time:')
calendarLabel.pack(side=tk.LEFT, padx=5)
# Spinboxes to pick time
self.start_hour = ttk.Spinbox(master=scheduleFrame, from_=1,to=12, wrap=True, width=3, state="readonly")
self.start_hour.set(4)
self.start_hour.pack(side=tk.LEFT, padx=5)
self.start_min = ttk.Spinbox(master=scheduleFrame, from_=0,to=59, wrap=True, width=3, state="readonly")
self.start_min.set(0)
self.start_min.pack(side=tk.LEFT, padx=5)
self.start_clock12hr = ttk.Spinbox(master=scheduleFrame, values=("AM", "PM"), wrap=True, width=3)
self.start_clock12hr.set("PM")
self.start_clock12hr.pack(side=tk.LEFT, padx=5)
bufferLabel = tk.Label(scheduleFrame, text="to")
bufferLabel.pack(side=tk.LEFT, padx=5)
self.end_hour = ttk.Spinbox(master=scheduleFrame, from_=1,to=12, wrap=True, width=3, state="readonly")
self.end_hour.set(6)
self.end_hour.pack(side=tk.LEFT, padx=5)
self.end_min = ttk.Spinbox(master=scheduleFrame, from_=0,to=59, wrap=True, width=3, state="readonly")
self.end_min.set(0)
self.end_min.pack(side=tk.LEFT, padx=5)
self.end_clock12hr = ttk.Spinbox(master=scheduleFrame, values=("AM", "PM"), wrap=True, width=3)
self.end_clock12hr.set("PM")
self.end_clock12hr.pack(side=tk.LEFT, padx=5)
# Callback to create event for desired time
scheduleBtn = ttk.Button(scheduleFrame, text='Confirm', width=10, command=self.open_task_selection)
scheduleBtn.pack(side=tk.LEFT, padx=5)
class View(tk.Frame):
def __init__(self, root):
tk.Frame.__init__(self, root)
self.root = root
self._draw()
def _draw(self):
scrollbar = tk.Scrollbar(self.root)
scrollbar.pack(side="right", fill = "both")
viewFrame = tk.Frame(master=self.root)
viewFrame.pack(fill=tk.BOTH, side=tk.LEFT, expand=True, padx=10, pady=10)
self.viewTasks = tk.Listbox(viewFrame, height=11, width = 30, font=font.Font(size=15), selectmode=tk.SINGLE)
self.viewTasks.pack(fill=tk.BOTH, side=tk.LEFT, expand=True)
self.viewTasks.config(yscrollcommand = scrollbar.set)
scrollbar.config(command=self.viewTasks.yview)
self.viewTasks.config(selectmode=tk.SINGLE)
# ? Add a scheduled tasks view? shows which ones are already assigned with its date next to it?
class MainApp(tk.Frame):
def __init__(self, root):
tk.Frame.__init__(self, root)
self.root = root
self._init_database()
self._draw()
def addTask(self):
word = self.entry.get()
if len(word)==0:
messagebox.showinfo('Empty Entry', 'Enter task name')
else:
self.task.append(word)
self.cur.execute('insert into tasks values (?)', (word,))
self.listUpdate()
self.entry.delete(0,'end')
def listUpdate(self):
self.clearList()
for i in self.task:
self.view.viewTasks.insert('end', i)
def delOne(self):
try:
val = self.view.viewTasks.get(self.view.viewTasks.curselection())
if val in self.task:
self.oldTasks.append(val)
self.task.remove(val)
self.listUpdate()
self.cur.execute('delete from tasks where title = ?', (val,))
except:
messagebox.showinfo('Cannot Delete', 'No Task Item Selected')
def deleteAll(self):
mb = messagebox.askyesno('Delete All','Are you sure?')
if mb == True:
while(len(self.task) != 0):
deleted = self.task.pop()
self.oldTasks.append(deleted)
self.cur.execute('delete from tasks')
self.listUpdate()
def undoTask(self):
try:
word = self.oldTasks.pop()
self.task.append(word)
self.cur.execute('insert into tasks values (?)', (word,))
self.listUpdate()
except:
messagebox.showerror("Error", "Nothing to undo")
def clearList(self):
self.view.viewTasks.delete(0,'end')
def bye(self):
self.root.destroy()
def retrieveDB(self):
while(len(self.task) != 0):
self.task.pop()
for row in self.cur.execute('select title from tasks'):
self.task.append(row[0])
def _init_database(self):
self.conn = sq.connect('todo.db')
self.cur = self.conn.cursor()
self.cur.execute('create table if not exists tasks (title text)')
self.task = []
self.oldTasks = []
self.oldIndex = 0
def new_settings(self):
self.settingsWindow = tk.Toplevel(self.root)
self.settingsWindow.title("Scheduler Settings")
self.settingsWindow.geometry("400x200")
# Configure time length
# self.new_bio_entry = tk.Entry(self.settingsWindow, width=50)
# self.new_bio_entry.pack()
# ! convert this to # of weeks / week label
self.start_hour = ttk.Spinbox(master=settingsWindow, from_=1,to=4, wrap=True, width=5, state="readonly")
self.start_hour.set(1)
self.start_hour.pack(padx=5)
self.start_min = ttk.Spinbox(master=settingsWindow, from_=0,to=59, wrap=True, width=3, state="readonly")
self.start_min.set(0)
self.start_min.pack(side=tk.LEFT, padx=5)
# Confirm button
self.change_bio_btn = tk.Button(self.settingsWindow, text="Update bio", width=25, command=self.save_bio)
self.change_bio_btn.pack(pady=5)
def _draw(self):
menu_bar = tk.Menu(self.root)
self.root['menu'] = menu_bar
menu_settings = tk.Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(menu=menu_settings, label='Settings')
# Change from 1 week to up to 1 month
# ! time settings
# Change from all tasks in description to single select task and make that the title
menu_settings.add_command(label="Schedule Preferences", command = lambda x: x)
# Add buttons
buttonFrame = tk.Frame(master=self.root, width=50)
buttonFrame.pack(fill=tk.BOTH, side=tk.LEFT, padx=10)
todoLabel = ttk.Label(buttonFrame, text = 'To-Do List')
todoLabel.pack(pady=7)
taskLabel = ttk.Label(buttonFrame, text='Enter task title: ')
taskLabel.pack(pady=5)
self.entry = ttk.Entry(buttonFrame, width=21)
self.entry.pack(pady=5)
addBtn = ttk.Button(buttonFrame, text='Add task', width=20, command=self.addTask)
addBtn.pack(pady=5)
delBtn = ttk.Button(buttonFrame, text='Delete', width=20, command=self.delOne)
delBtn.pack(pady=5)
clearBtn = ttk.Button(buttonFrame, text='Delete all', width=20, command=self.deleteAll)
clearBtn.pack(pady=5)
undoBtn = ttk.Button(buttonFrame, text='Undo delete', width=20, command=self.undoTask)
undoBtn.pack(pady=5)
exitBtn = ttk.Button(buttonFrame, text='Exit', width=20, command=self.bye)
exitBtn.pack(pady=5)
self.scheduler = Scheduler(self.root, self.task)
self.view = View(self.root)
self.retrieveDB()
self.listUpdate()
self.root.mainloop()
self.conn.commit()
self.cur.close()
if __name__ == "__main__":
root = tk.Tk()
root.title('ToDo Cal')
icon = tk.PhotoImage(file="todo-icon.png")
root.iconphoto(False, icon)
root.geometry("650x280")
root.configure(bg="#d6d6d6")
root.minsize(650, 280)
MainApp(root)
``` |
{
"source": "JJWLewis/tiny_python_projects",
"score": 4
} |
#### File: tiny_python_projects/02_crowsnest/crowsnest.py
```python
import argparse
def get_args():
"""Get the comand-line arguments"""
parser = argparse.ArgumentParser(
description='Rock the Casbah',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('word', metavar='word',
default='octopus', help='Name of thing seen')
return parser.parse_args()
def main():
"""Sentence Constructor"""
thing_seen = get_args().word
if start_vowel(thing_seen):
article = 'An' if thing_seen[0].isupper() else 'an'
else:
article = 'A' if thing_seen[0].isupper() else 'a'
print(f'Ahoy, Captain, {article} {thing_seen} off the larboard bow!')
def start_vowel(word):
"""Checks if word starts with a vowel. Returns True if so"""
return word[0].lower() in ['a', 'e', 'i', 'o', 'u']
if __name__ == "__main__":
main()
``` |
{
"source": "j-jwolf/language-speed-test",
"score": 3
} |
#### File: language-speed-test/src/test.py
```python
import sys
import time
import os
def read(fn):
with open(fn) as f:
data = f.readlines()
return data
def write(fn, data):
f = open(fn, "w")
for line in data:
f.write(line)
f.close()
return
def consoleOut(fn):
command = "IF EXIST "+fn+" ( rm "+fn+" )"
res = os.system(command)
if(res != 0): print("PROCESS RETURNED "+res)
return
def main():
start = time.time()
fn = sys.argv[1]
destination = sys.argv[2]
testCount = int(sys.argv[3])
destroyAfter = int(sys.argv[4])
print("new file name: ", destination, end="\n")
print("Starting test, reading file ", fn, " ", testCount, " times\n-----------------------", end="\n")
for i in range(testCount):
data = read(fn)
write(destination, data)
if(destroyAfter):
consoleOut(destination)
end = time.time()
print("Time elapsed: ", (end-start))
return
main()
``` |
{
"source": "j-jwolf/weather",
"score": 3
} |
#### File: src/pyfiles/getcoords.py
```python
import os
import sys
import json
def main():
fn = "loc.txt"
with open(fn) as file: content = file.readlines()
rawData = ""
for line in content: rawData += line
jsonList = json.loads(rawData)
coords = jsonList["loc"]
print(coords)
return
main()
``` |
{
"source": "jjwong/codefights",
"score": 2
} |
#### File: python/2_slithering_in_strings/convert_tabs.py
```python
def convertTabs(code, x):
return re.sub('\t', ' ' * x, code)
```
#### File: python/2_slithering_in_strings/feedback_review.py
```python
import textwrap
def feedbackReview(feedback, size):
return textwrap.wrap(feedback, width=size)
```
#### File: python/3_lurking_in_lists/print_list.py
```python
def printList(lst):
return "This is your list: " + str(lst)
```
#### File: python/3_lurking_in_lists/remove_tasks.py
```python
def removeTasks(k, toDo):
del toDo[k-1::k]
return toDo
``` |
{
"source": "jjx323/VIfIP",
"score": 3
} |
#### File: jjx323/VIfIP/addNoise.py
```python
import numpy as np
import matplotlib.pyplot as plt
def addGaussianNoise(d, para, testMode='n'):
# This function add noises as follows:
# d_i = d_i with probability 1-para['rate']
# d_i = d_i + \epsilon \xi_i with probability para['rate']
# where \xi_i follow the standard normal distribution,
# para['rate'] is the corruption percentage,
# para['noise_level'] is the noise level,
#
# The output:
# d: the noisy data;
# sig: is the covariance of the added noises.
#
# Ref: <NAME>, A variational Bayesian method to inverse problems with implusive noise,
# Journal of Computational Physics, 231, 2012, 423-435 (Page 428, Section 4).
if testMode == 'y':
np.random.seed(1)
noise_level = para['noise_level']
len_d = len(d)
r = para['rate']
noise = np.random.normal(0, 1, len_d)
select = (np.random.uniform(0, 1, len_d) < r)
d = d + noise_level*np.max(np.abs(d))*noise*select
sig = noise_level*np.max(np.abs(d))
return d, sig
def addImplusiveNoise(d, para, testMode='n'):
# This function add noise as follows:
# d_i = d_i with probability 1-para['rate']
# d_i = d_i + para['noise_levle']*U[-1, 1] with probability para['rate']
# Here, U[-1, 1] stands for uniform probability distribution between -1 and 1
# para['rate'] is the corruption percentage,
# para['noise_level'] is the noise level,
#
# The output:
# d: the noisy data
if testMode == 'y':
np.random.seed(1)
noise_level = para['noise_level']
len_d = len(d)
r = para['rate']
select = (np.random.uniform(0, 1, len_d) < r)
noise = np.random.uniform(-1, 1, len_d)
d = d + noise_level*noise*select
return d
``` |
{
"source": "jjxsg/cricNotifier",
"score": 3
} |
#### File: cricNotifier/utils/cli.py
```python
import argparse
def parse_arguments():
parser = argparse.ArgumentParser(description="cricNotifier")
parser.add_argument('-t', '--timeout',
help='duration of system notification')
parser.add_argument('-i', '--interval',
help='duration between each notification')
parser.add_argument('-nl', '--nologs',
action='store_true',
help='disable console logs',
default=False
)
args = parser.parse_args()
return args, parser
``` |
{
"source": "jjxs/xuanke",
"score": 2
} |
#### File: xuanke/xuanke/urls.py
```python
from django.contrib import admin
from django.urls import path
from xuanzuo.views import WechatLoginView, SeatApiView, SeatListApiView
# def trigger_error(request):
# division_by_zero = 1 / 0
urlpatterns = [
path('admin/', admin.site.urls),
path('member/login', WechatLoginView.as_view()),
path('seat', SeatApiView.as_view()),
path('seatList', SeatListApiView.as_view()),
# path('sentry-debug/', trigger_error),
]
``` |
{
"source": "jjxu217/pytorch-sso",
"score": 3
} |
#### File: classification/models/alexnet.py
```python
import torch.nn as nn
import torch.nn.functional as F
from torchsso.utils.accumulator import TensorAccumulator
__all__ = ['alexnet', 'alexnet_mcdropout']
class AlexNet(nn.Module):
def __init__(self, num_classes=10):
super().__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=5)
self.conv2 = nn.Conv2d(64, 192, kernel_size=5, padding=2)
self.conv3 = nn.Conv2d(192, 384, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(384, 256, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.fc = nn.Linear(256, num_classes)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = F.relu(self.conv5(x))
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class AlexNetMCDropout(AlexNet):
mc_dropout = True
def __init__(self, num_classes=10, dropout_ratio=0.5, val_mc=10):
super(AlexNetMCDropout, self).__init__(num_classes)
self.dropout_ratio = dropout_ratio
self.val_mc = val_mc
def forward(self, x):
dropout_ratio = self.dropout_ratio
x = F.relu(F.dropout(self.conv1(x), p=dropout_ratio))
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = F.relu(F.dropout(self.conv2(x), p=dropout_ratio))
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = F.relu(F.dropout(self.conv3(x), p=dropout_ratio))
x = F.relu(F.dropout(self.conv4(x), p=dropout_ratio))
x = F.relu(F.dropout(self.conv5(x), p=dropout_ratio))
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def prediction(self, x):
acc_prob = TensorAccumulator()
m = self.val_mc
for _ in range(m):
output = self.forward(x)
prob = F.softmax(output, dim=1)
acc_prob.update(prob, scale=1/m)
prob = acc_prob.get()
return prob
def alexnet(**kwargs):
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
"""
model = AlexNet(**kwargs)
return model
def alexnet_mcdropout(**kwargs):
model = AlexNetMCDropout(**kwargs)
return model
```
#### File: torchsso/autograd/samplegrad.py
```python
from contextlib import contextmanager
import torch
import torch.nn as nn
import torch.nn.functional as F
@contextmanager
def save_sample_grads(model: nn.Module):
handles = []
for module in model.children():
params = list(module.parameters())
params = [p for p in params if p.requires_grad]
if len(params) == 0:
continue
handles.append(module.register_forward_hook(_forward_postprocess))
handles.append(module.register_backward_hook(_backward_postprocess))
yield
for handle in handles:
handle.remove()
def _forward_postprocess(module: nn.Module, input: torch.Tensor, output: torch.Tensor):
data_input = input[0].clone().detach()
if isinstance(module, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
bnorm = module
f = bnorm.num_features
if isinstance(module, nn.BatchNorm1d):
shape = (1, f)
elif isinstance(module, nn.BatchNorm2d):
shape = (1, f, 1, 1)
else:
shape = (1, f, 1, 1, 1)
# restore normalized input
data_input_norm = (output - bnorm.bias.view(shape)).div(bnorm.weight.view(shape))
data_input = data_input_norm
setattr(module, 'data_input', data_input)
def _backward_postprocess(module: nn.Module, grad_input: torch.Tensor, grad_output: torch.Tensor):
grad_output = grad_output[0].clone().detach()
data_input = getattr(module, 'data_input', None)
assert data_input is not None, 'backward is called before forward.'
assert data_input.size(0) == grad_output.size(0)
args = [module, data_input, grad_output]
if isinstance(module, nn.Linear):
grad_linear(*args)
elif isinstance(module, nn.Conv2d):
grad_conv2d(*args)
elif isinstance(module, nn.BatchNorm1d):
grad_batchnorm1d(*args)
elif isinstance(module, nn.BatchNorm2d):
grad_batchnorm2d(*args)
else:
raise ValueError(f'Unsupported module class: {module.__class__}.')
def grad_linear(module: nn.Module, data_input: torch.Tensor, grad_output: torch.Tensor):
assert isinstance(module, nn.Linear)
linear = module
assert data_input.ndimension() == 2 # n x f_in
assert grad_output.ndimension() == 2 # n x f_out
if linear.weight.requires_grad:
grads = torch.einsum('bi,bj->bij', grad_output, data_input) # n x f_out x f_in
setattr(linear.weight, 'grads', grads) # n x f_out x f_in
if hasattr(linear, 'bias') and linear.bias.requires_grad:
setattr(linear.bias, 'grads', grad_output) # n x f_out
def grad_conv2d(module: nn.Module, data_input: torch.Tensor, grad_output: torch.Tensor):
assert isinstance(module, nn.Conv2d)
conv2d = module
assert data_input.ndimension() == 4 # n x c_in x h_in x w_in
assert grad_output.ndimension() == 4 # n x c_out x h_out x w_out
if conv2d.weight.requires_grad:
# n x (c_in)(k_h)(k_w) x (h_out)(w_out)
input2d = F.unfold(data_input,
kernel_size=conv2d.kernel_size, stride=conv2d.stride,
padding=conv2d.padding, dilation=conv2d.dilation)
# n x c_out x h_out x w_out
n, c_out, h, w = grad_output.size()
# n x c_out x (h_out)(w_out)
grad_output2d = grad_output.view(n, c_out, -1)
c_out, c_in, k_h, k_w = conv2d.weight.size()
grads_2d = torch.einsum('bik,bjk->bij', grad_output2d, input2d) # n x c_out x (c_in)(k_h)(k_w)
setattr(conv2d.weight, 'grads', grads_2d.view(n, c_out, c_in, k_h, k_w)) # n x c_out x c_in x k_h x k_w
if hasattr(conv2d, 'bias') and conv2d.bias.requires_grad:
setattr(conv2d.bias, 'grads', grad_output.sum(dim=(2, 3))) # n x c_out
def grad_batchnorm1d(module: nn.Module, data_input: torch.Tensor, grad_output: torch.Tensor):
assert isinstance(module, nn.BatchNorm1d)
batchnorm1d = module
assert data_input.ndimension() == 2 # n x f
assert grad_output.ndimension() == 2 # n x f
assert batchnorm1d.affine
if batchnorm1d.weight.requires_grad:
grads = data_input.mul(grad_output) # n x f
setattr(batchnorm1d.weight, 'grads', grads)
if batchnorm1d.bias.requires_grad:
setattr(batchnorm1d.bias, 'grads', grad_output) # n x f
def grad_batchnorm2d(module: nn.Module, data_input: torch.Tensor, grad_output: torch.Tensor):
assert isinstance(module, nn.BatchNorm2d)
batchnorm2d = module
assert data_input.ndimension() == 4 # n x c x h x w
assert grad_output.ndimension() == 4 # n x c x h x w
assert batchnorm2d.affine
if batchnorm2d.weight.requires_grad:
grads = data_input.mul(grad_output).sum(dim=(2, 3)) # n x c
setattr(batchnorm2d.weight, 'grads', grads)
if batchnorm2d.bias.requires_grad:
setattr(batchnorm2d.bias, 'grads', grad_output.sum(dim=(2, 3))) # n x c
```
#### File: curv/cov/batchnorm.py
```python
from torchsso import Curvature, DiagCurvature
class CovBatchNorm1d(Curvature):
def update_in_backward(self, grad_output_data):
pass
class DiagCovBatchNorm1d(DiagCurvature):
def update_in_backward(self, grad_output):
data_input = getattr(self._module, 'data_input', None) # n x f
assert data_input is not None
in_in = data_input.mul(data_input) # n x f
grad_grad = grad_output.mul(grad_output) # n x f
data_w = in_in.mul(grad_grad).mean(dim=0) # f x 1
self._data = [data_w]
if self.bias:
data_b = grad_grad.mean(dim=0) # f x 1
self._data.append(data_b)
class CovBatchNorm2d(Curvature):
def update_in_backward(self, grad_output):
pass
class DiagCovBatchNorm2d(DiagCurvature):
def update_in_backward(self, grad_out):
data_input = getattr(self._module, 'data_input', None) # n x c x h x w
assert data_input is not None
in_in = data_input.mul(data_input).sum(dim=(2, 3)) # n x c
grad_grad = grad_out.mul(grad_out).sum(dim=(2, 3)) # n x c
data_w = in_in.mul(grad_grad).mean(dim=0) # c x 1
self._data = [data_w]
if self.bias:
data_b = grad_grad.mean(dim=0) # c x 1
self._data.append(data_b)
```
#### File: curv/cov/linear.py
```python
import torch
from torchsso import Curvature, DiagCurvature, KronCurvature
class CovLinear(Curvature):
def update_in_backward(self, grad_output):
data_input = getattr(self._module, 'data_input', None) # n x f_in
assert data_input is not None
n = data_input.shape[0]
if self.bias:
ones = torch.ones((n, 1), device=data_input.device, dtype=data_input.dtype)
data_input = torch.cat((data_input, ones), 1) # n x (f_in+1)
grad = torch.einsum('bi,bj->bij', grad_output, data_input) # n x f_out x f_in
grad = grad.reshape((n, -1)) # n x (f_out)(f_in)
data = torch.einsum('bi,bj->ij', grad, grad)
self._data = [data]
def precondition_grad(self, params):
pass
class DiagCovLinear(DiagCurvature):
def update_in_backward(self, grad_output):
data_input = getattr(self._module, 'data_input', None) # n x f_in
assert data_input is not None
n = data_input.shape[0]
in_in = data_input.mul(data_input) # n x f_in
grad_grad = grad_output.mul(grad_output) # n x f_out
data_w = torch.einsum('ki,kj->ij', grad_grad,
in_in).div(n) # f_out x f_in
self._data = [data_w]
if self.bias:
data_b = grad_grad.mean(dim=0) # f_out x 1
self._data.append(data_b)
class KronCovLinear(KronCurvature):
def update_in_forward(self, input_data):
n = input_data.shape[0] # n x f_in
if self.bias:
ones = input_data.new_ones((n, 1))
# shape: n x (f_in+1)
input_data = torch.cat((input_data, ones), 1)
# f_in x f_in or (f_in+1) x (f_in+1)
A = torch.einsum('ki,kj->ij', input_data, input_data).div(n)
self._A = A
def update_in_backward(self, grad_output):
n = grad_output.shape[0] # n x f_out
# f_out x f_out
G = torch.einsum(
'ki,kj->ij', grad_output, grad_output).div(n)
self._G = G
def precondition_grad(self, params):
A_inv, G_inv = self.inv
# todo check params == list?
if self.bias:
grad = torch.cat(
(params[0].grad, params[1].grad.view(-1, 1)), 1)
preconditioned_grad = G_inv.mm(grad).mm(A_inv)
params[0].grad.copy_(preconditioned_grad[:, :-1])
params[1].grad.copy_(preconditioned_grad[:, -1])
else:
grad = params[0].grad
preconditioned_grad = G_inv.mm(grad).mm(A_inv)
params[0].grad.copy_(preconditioned_grad)
def sample_params(self, params, mean, std_scale):
A_ic, G_ic = self.std
if self.bias:
m = torch.cat(
(mean[0], mean[1].view(-1, 1)), 1)
param = m.add(std_scale, G_ic.mm(
torch.randn_like(m)).mm(A_ic))
params[0].data.copy_(param[:, 0:-1])
params[1].data.copy_(param[:, -1])
else:
m = mean[0]
param = mean.add(std_scale, G_ic.mm(
torch.randn_like(m)).mm(A_ic))
params[0].data = param
def _get_shape(self):
linear = self._module
w = getattr(linear, 'weight')
f_out, f_in = w.shape
G_shape = (f_out, f_out)
if self.bias:
A_shape = (f_in + 1, f_in + 1)
else:
A_shape = (f_in, f_in)
return A_shape, G_shape
```
#### File: curv/fisher/batchnorm.py
```python
import torch
from torchsso import DiagCovBatchNorm2d, Fisher
class DiagFisherBatchNorm2d(DiagCovBatchNorm2d, Fisher):
def __init__(self, *args, **kwargs):
DiagCovBatchNorm2d.__init__(self, *args, **kwargs)
Fisher.__init__(self)
def update_in_backward(self, grad_out):
if self.do_backward:
assert self.prob is not None
data_input = getattr(self._module, 'data_input', None) # n x c x h x w
assert data_input is not None
n = grad_out.shape[0] # n x c x h x w
pg = torch.mul(grad_out, self.prob.reshape(n, 1, 1, 1))
grad_grad = pg.mul(grad_out).sum(dim=(2, 3)) # n x c
in_in = data_input.mul(data_input).sum(dim=(2, 3)) # n x c
data_w = in_in.mul(grad_grad).mean(dim=0) # c x 1
self._data = [data_w]
if self.bias:
data_b = grad_grad.mean(dim=0) # c x 1
self._data.append(data_b)
self.accumulate_cov(self._data)
else:
self._data = self.finalize()
```
#### File: curv/fisher/__init__.py
```python
import torch
import torch.nn.functional as F
from torchsso.utils import TensorAccumulator
class Fisher(object):
def __init__(self):
self.prob = None
self._do_backward = True
self._acc_cov = TensorAccumulator()
@property
def do_backward(self):
return self._do_backward
def turn_on_backward(self):
self._do_backward = True
def turn_off_backward(self):
self._do_backward = False
def accumulate_cov(self, cov):
self._acc_cov.update(cov)
def finalize(self):
return self._acc_cov.get()
def update_as_presoftmax(self, prob):
raise NotImplementedError('This method supports only torchsso.KronFisherLinear.')
def get_closure_for_fisher(optimizer, model, data, target, approx_type=None, num_mc=1):
_APPROX_TYPE_MC = 'mc'
def turn_off_param_grad():
for group in optimizer.param_groups:
group['curv'].turn_on_backward()
for param in group['params']:
param.requires_grad = False
def turn_on_param_grad():
for group in optimizer.param_groups:
group['curv'].turn_off_backward()
for param in group['params']:
param.requires_grad = True
def closure():
for group in optimizer.param_groups:
assert isinstance(group['curv'], Fisher), f"Invalid Curvature type: {type(group['curv'])}."
optimizer.zero_grad()
output = model(data)
prob = F.softmax(output, dim=1)
is_sampling = approx_type is None or approx_type == _APPROX_TYPE_MC
if is_sampling:
turn_off_param_grad()
if approx_type == _APPROX_TYPE_MC:
dist = torch.distributions.Categorical(prob)
_target = dist.sample((num_mc,))
for group in optimizer.param_groups:
group['curv'].prob = torch.ones_like(prob[:, 0]).div(num_mc)
for i in range(num_mc):
loss = F.cross_entropy(output, _target[i])
loss.backward(retain_graph=True)
else:
for i in range(model.num_classes):
for group in optimizer.param_groups:
group['curv'].prob = prob[:, i]
loss = F.cross_entropy(output, torch.ones_like(target).mul(i))
loss.backward(retain_graph=True)
turn_on_param_grad()
else:
raise ValueError('Invalid approx type: {}'.format(approx_type))
loss = F.cross_entropy(output, target)
loss.backward()
return loss, output
return closure
```
#### File: curv/hessian/conv.py
```python
from torchsso import KronCovConv2d, KronHessian
class KronHessianConv2d(KronCovConv2d, KronHessian):
def __init__(self, module, ema_decay=1., damping=0, post_curv=None, recursive_approx=False):
KronHessian.__init__(self, module, ema_decay, damping, post_curv, recursive_approx)
def update_in_backward(self, grad_output):
KronHessian.update_in_backward(self, grad_output)
```
#### File: utils/chainer_communicators/__init__.py
```python
import numpy as np
def create_communicator(communicator_name='pure_nccl',
mpi_comm=None,
rsv_comm_dtype=np.float32,
agv_comm_dtype=np.float32,
use_hiercoll=False,
dims=None,
):
if mpi_comm is None:
import mpi4py.MPI
mpi_comm = mpi4py.MPI.COMM_WORLD
if communicator_name != 'pure_nccl' and rsv_comm_dtype != np.float32:
raise ValueError(
'rsv_comm_dtype is only available at \'pure_nccl\' communicator')
if communicator_name != 'pure_nccl' and agv_comm_dtype != np.float32:
raise ValueError(
'agv_comm_dtype is only available at \'pure_nccl\' communicator')
if communicator_name != 'pure_nccl' and dims is not None:
raise ValueError(
'dims is only available at \'pure_nccl\' communicator')
if communicator_name == 'pure_nccl':
from torchsso.utils.chainer_communicators.pure_nccl_communicator \
import PureNCCLCommunicator
return PureNCCLCommunicator(mpi_comm,
rsv_comm_dtype=rsv_comm_dtype,
agv_comm_dtype=agv_comm_dtype,
use_hiercoll=use_hiercoll,
dims=dims
)
else:
raise ValueError(
'Unrecognized communicator_name: {}'.format(communicator_name))
```
#### File: torchsso/utils/cholesky_cupy.py
```python
try:
import cupy
from torchsso.utils.cupy import to_cupy, from_cupy
except:
# print("No cupy detected")
pass
def cholesky(m, upper=True):
m_cp = to_cupy(m)
m_chl_cp = cupy.linalg.decomposition.cholesky(m_cp)
if upper:
m_chl_cp = m_chl_cp.transpose()
return from_cupy(m_chl_cp)
```
#### File: torchsso/utils/logger.py
```python
import os
import time
import json
import shutil
# Select the best-resolution timer function
try:
_get_time = time.perf_counter
except AttributeError:
if os.name == 'nt':
_get_time = time.clock
else:
_get_time = time.time
class Logger(object):
def __init__(self, out, logname):
self.out = out
self.logname = logname
self._log = []
self._start_at = None
if not os.path.isdir(self.out):
os.makedirs(self.out)
def start(self):
self._start_at = _get_time()
@property
def elapsed_time(self):
if self._start_at is None:
raise RuntimeError('training has not been started yet')
return _get_time() - self._start_at
def write(self, log):
self._log.append(log)
tmp_path = os.path.join(self.out, 'tmp')
with open(tmp_path, 'w') as f:
json.dump(self._log, f, indent=4)
path = os.path.join(self.out, self.logname)
shutil.move(tmp_path, path)
``` |
{
"source": "JJYDXFS/ChoutuXiuXiu",
"score": 3
} |
#### File: ChoutuXiuXiu/py_modules/FaceLoc.py
```python
import cv2
import face_recognition
import json
def myFaceLoc(BASE_DIR, img_path, timestamp):
'''
人脸定位模块
@param
BASE_DIR: 服务器存储文件全局路径
img_path: 待检测的图片路径
timestamp: 时间戳,用于生成图片命名
@return
返回图片相对前端的路径
'''
image = cv2.imread(img_path)
face_locations_noCNN=face_recognition.face_locations(image)
face_num2=len(face_locations_noCNN)
for i in range(0,face_num2):
top = face_locations_noCNN[i][0]
right = face_locations_noCNN[i][1]
bottom = face_locations_noCNN[i][2]
left = face_locations_noCNN[i][3]
start = (left, top)
end = (right, bottom)
color = (0,255,255)
thickness = 2
cv2.rectangle(image, start, end, color, thickness)
outname = BASE_DIR+"\\results\\face_"+timestamp+".jpg"
cv2.imwrite(outname,image)
return json.dumps({'result_list':['results\\face_'+timestamp+'.jpg']},ensure_ascii=False)
```
#### File: ChoutuXiuXiu/py_modules/FaceRe.py
```python
import cv2
import face_recognition
import numpy as np
import json
def myFaceRe(BASE_DIR, imageData, index, timestamp, save_path):
'''
人脸识别模块
@refer
https://github.com/ageitgey/face_recognition/blob/master/examples/facerec_from_webcam_faster.py
@param
BASE_DIR: 服务器存储文件全局路径
imageData: 图像数据,单图检测传入图片路径,视频检测传入图片数据
index: index=-1时对应单图检测,index为正整数时对应视频检测的图片下标
timestamp: 时间戳,用于定义新建目录名
save_path: 图片保存路径,仅在index≠-1时有效
@return
单图检测模式下返回图片相对前端的路径
视频检测模式下不返回有意义值
'''
if index == -1: unknown_img = cv2.imread(imageData)
else: unknown_img = imageData
face_locations = face_recognition.face_locations(unknown_img)
face_encodings = face_recognition.face_encodings(unknown_img, face_locations)
# 导入人脸数据
name_list = ['<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','Duke of Edinburgh','IU','song wei','The Queen Mother','King George V']
known_face_encodings = np.load(BASE_DIR+'\\data\\face_encoding.npy')
face_names = []
# if len(face_encodings) == 0 :
# print("没有识别到人脸")
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# 直接使用最短距离
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = name_list[best_match_index]
face_names.append(name)
# 标记结果
for (top, right, bottom, left), name in zip(face_locations, face_names):
cv2.rectangle(unknown_img, (left, top), (right, bottom), (0, 0, 200), 2)
cv2.rectangle(unknown_img, (left, bottom - 35), (right, bottom), (0, 0, 200), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(unknown_img, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
if index == -1: # 对于单张识别
outname = BASE_DIR + "\\results\\face_"+timestamp+".jpg"
cv2.imwrite(outname,unknown_img)
return json.dumps({'result_list':['results\\face_'+timestamp+'.jpg']},ensure_ascii=False)
else: # 对于视频识别,按给定保存路径存,并命名
outname = save_path+"\\image{}.jpg".format(index)
cv2.imwrite(outname,unknown_img)
return
``` |
{
"source": "jjyr/akamodel",
"score": 3
} |
#### File: akamodel/akamodel/metadata.py
```python
from sqlalchemy import MetaData as RawMetaData
class MetaData(object):
def __init__(self, raw_metadata=None):
self._model_dict = dict()
self._raw_metadata = raw_metadata or RawMetaData()
def register(self, model):
self._model_dict[model.__name__] = model
def get_model_by_name(self, name):
return self._model_dict[name]
```
#### File: jjyr/akamodel/setup.py
```python
from __future__ import unicode_literals
import logging
import subprocess
import sys
from setuptools import Command
from setuptools import find_packages, setup
LOG = logging.getLogger(__name__)
description = "An ActiveRecord-like ORM in python world"
class TestCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(['tests'])
sys.exit(errno)
setup(name='akamodel',
version='0.0.0',
license='Apache-2.0',
description=description,
long_description=open('README.md', encoding='utf-8').read(),
author='<NAME>',
author_email='<EMAIL>',
cmdclass={'test': TestCommand},
platforms=['unix', 'linux', 'osx'],
packages=find_packages(),
install_requires=[
'sqlalchemy'
],
entry_points={
'console_scripts': [
],
},
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries',
],
)
``` |
{
"source": "jjYukgm/SSL_multi_decoder",
"score": 2
} |
#### File: jjYukgm/SSL_multi_decoder/analysis_plot.py
```python
import sys
import os
import numpy as np
# plot
from laplotter import LossAccPlotter, MultiPlotter
from collections import OrderedDict
import time
import pdb
# pdb.set_trace()
# pdb.set_trace = lambda: None
# import matplotlib
# matplotlib.use('Agg')
def str2flo(str):
if 'nan' in str:
flo = 0.
else:
flo = float(str)
return flo
def calculate_remaining(t1, t2, i, total):
progress = (i + 0.) / total
elapsed_time = t2 - t1
if progress > 0:
remaining_time = elapsed_time * (1/progress) - elapsed_time
else:
remaining_time = 0
# return progress, remaining_time
psec = int(remaining_time % 60)
pmin = int(remaining_time // 60)
time_str = '[{:8.2%}], remain: {:2d}:{:2d} '.format(progress, pmin, psec)
time_str = '| ' + time_str
return time_str
def check_val(data, suffix, b_size=None):
dict = locals()
ema = None
tri = None
# plot_data = {'X': [], 'Y': [], 'legend': []}
if not b_size:
b_size = 100
batch_per_epoch = int((50000 + 100 - 1) / b_size) # in data_loader: unlabel_size = tr_size
# batch_per_epoch /= 500 # vis_period
## load loss data
log_path = os.path.join('{}_log'.format(data), '{}.FM+VI.{}.txt'.format(data, suffix))
log_file2 = open(log_path, "r")
st_time = time.time()
fi_len = 0
for li in log_file2:
li_or = li.split(" | ")
if len(li_or) == 1:
continue
if ema is None and "ema" in li_or[3]:
ema = True
if tri is None and "tri" in li_or[4]:
tri = True
fi_len += 1
# print("file len: {}".format(fi_len))
if not ema:
ema = False
if not tri:
tri = False
dict["batch_per_epoch"] = batch_per_epoch
dict["log_path"] = log_path
dict["fi_len"] = fi_len
dict["ema"] = ema
dict["tri"] = tri
return dict
def plot_loss_err(data, suffix, fi_len, ema, batch_per_epoch, **kwargs):
## load loss data
log_path = kwargs["log_path"]
log_file2 = open(log_path, "r")
st_time = time.time()
# plot settings
save_to_filepath = os.path.join("{}_log".format(data), "{}_plot_loss_err.png".format(suffix))
plotter = LossAccPlotter(title="{} loss over time".format(suffix),
save_to_filepath=save_to_filepath,
show_regressions=True,
show_averages=True,
show_loss_plot=True,
show_err_plot=True,
show_ema_plot=ema,
show_plot_window=False,
x_label="Epoch")
i = 0
for li in log_file2:
li_or = li.split(" | ")
if len(li_or) == 1:
continue
iter = li_or[0].split("\t")[0][1:]
loss_train = str2flo(li_or[0].split(":")[1].split(",")[0])
err_train = str2flo(li_or[0].split(",")[1])
loss_val = str2flo(li_or[1].split(":")[1].split(",")[0])
err_val = str2flo(li_or[1].split(",")[1])
ema_err_train = ema_err_val = None
if ema:
ema_err_train = li_or[3].split(":")[1].split(",")[0]
ema_err_val = li_or[3].split(",")[1]
if "None" not in ema_err_train:
ema_err_train = str2flo(ema_err_train)
ema_err_val = str2flo(ema_err_val)
else:
ema_err_train = ema_err_val = None
float_epoch = str2flo(iter) / batch_per_epoch
plotter.add_values(float_epoch,
loss_train=loss_train, loss_val=loss_val,
err_train=err_train, err_val=err_val,
ema_err_train=ema_err_train, ema_err_val=ema_err_val,
redraw=False)
i += 1
time_str = "{}\r".format(calculate_remaining(st_time, time.time(), i, fi_len))
sys.stdout.write(time_str)
sys.stdout.flush()
sys.stdout.write("\n")
log_file2.close()
plotter.redraw() # save as image
# plotter.block()
def plot_losses(data, suffix, fi_len, batch_per_epoch, **kwargs):
# plot_data = {'X': [], 'Y': [], 'legend': []}
other_loss = OrderedDict()
# plot settings
save_to_filepath = os.path.join("{}_log".format(data), "{}_plot_losses.png".format(suffix))
plotter = LossAccPlotter(title="{} loss over time".format(suffix),
save_to_filepath=save_to_filepath,
show_regressions=False,
show_averages=False,
show_other_loss=True,
show_log_loss=True,
show_loss_plot=True,
show_err_plot=True,
show_plot_window=False,
epo_max=1000,
x_label="Epoch")
## load loss data
log_path = kwargs["log_path"]
log_file2 = open(log_path, "r")
st_time = time.time()
i = 0
for li in log_file2:
li_or = li.split(" | ")
if len(li_or) == 1:
continue
iter = li_or[0].split("\t")[0][1:]
loss_train = str2flo(li_or[0].split(":")[1].split(",")[0])
err_train = str2flo(li_or[0].split(",")[1])
loss_val = str2flo(li_or[1].split(":")[1].split(",")[0])
err_val = str2flo(li_or[1].split(",")[1])
for li2 in li_or:
if "loss" not in li2:
continue
# pdb.set_trace()
key = li2.split(": ")[0]
value = str2flo(li2.split(": ")[1])
if key == 'vi loss':
value *= 1e-2
other_loss[key] = value
float_epoch = str2flo(iter) / batch_per_epoch
plotter.add_values(float_epoch,
loss_train=loss_train, loss_val=loss_val,
err_train=err_train, err_val=err_val,
redraw=False, other_loss = other_loss)
i += 1
time_str = "{}\r".format(calculate_remaining(st_time, time.time(), i, fi_len))
# print(time_string, end = '\r')
sys.stdout.write(time_str)
sys.stdout.flush()
sys.stdout.write("\n")
sys.stdout.flush()
log_file2.close()
plotter.redraw() # save as image
# plotter.block()
def plot_values(data, suffix, fi_len, batch_per_epoch, **kwargs):
# plot gen acc and unl acc
# plot_data = {'X': [], 'Y': [], 'legend': []}
other_value = OrderedDict()
# plot settings
save_to_filepath = os.path.join("{}_log".format(data), "{}_plot_values.png".format(suffix))
plotter = MultiPlotter(title="{} Accuracy over time".format(suffix),
save_to_filepath=save_to_filepath,
show_regressions=False,
show_averages=False,
show_plot_window=False,
epo_max=1000,
x_label="Epoch")
## load loss data
log_path = kwargs["log_path"]
log_file2 = open(log_path, "r")
st_time = time.time()
i = 0
for li in log_file2:
li_or = li.split(" | ")
if len(li_or) == 1:
continue
iter = li_or[0].split("\t")[0][1:]
li_ev = li.split("[Eval]")[1].split(" | ")[0].split(",")
for li2 in li_ev:
if "acc" not in li2:
continue
# pdb.set_trace()
key = li2.split(": ")[0]
value = str2flo(li2.split(": ")[1])
other_value[key] = value
float_epoch = str2flo(iter) / batch_per_epoch
plotter.add_values(float_epoch, other_value = other_value,
redraw=False)
i += 1
time_str = "{}\r".format(calculate_remaining(st_time, time.time(), i, fi_len))
# print(time_string, end = '\r')
sys.stdout.write(time_str)
sys.stdout.flush()
sys.stdout.write("\n")
sys.stdout.flush()
log_file2.close()
plotter.redraw() # save as image
# plotter.block()
def plot_tri(data, suffix, fi_len, batch_per_epoch, tri, ema, **kwargs):
assert tri, "tri Something Wrong"
# plot gen acc and unl acc
# plot_data = {'X': [], 'Y': [], 'legend': []}
other_value = OrderedDict()
# plot settings
save_to_filepath = os.path.join("{}_log".format(data), "{}_plot_tri.png".format(suffix))
plotter = MultiPlotter(title="{} ErrorRate over time".format(suffix),
save_to_filepath=save_to_filepath,
show_regressions=False,
show_averages=False,
show_plot_window=False,
epo_max=1000,
x_label="Epoch")
## load loss data
log_path = kwargs["log_path"]
log_file2 = open(log_path, "r")
st_time = time.time()
i = 0
for li in log_file2:
li_or = li.split(" | ")
if len(li_or) == 1:
continue
iter = li_or[0].split("\t")[0][1:]
other_value["err_train"] = str2flo(li_or[0].split(",")[1])
other_value["err_val"] = str2flo(li_or[1].split(",")[1])
if ema:
ema_err_train = li_or[3].split(":")[1].split(",")[0]
ema_err_val = li_or[3].split(",")[1]
if "None" not in ema_err_train:
ema_err_train = str2flo(ema_err_train)
ema_err_val = str2flo(ema_err_val)
else:
ema_err_train = ema_err_val = None
other_value["ema_err_train"] = ema_err_train
other_value["ema_err_val"] = ema_err_val
# tri
tri_2 = li_or[4].split(":")[1].split(",")[0]
tri_3 = li_or[4].split(",")[1]
other_value["tri_2"] = str2flo(tri_2)
other_value["tri_3"] = str2flo(tri_3)
float_epoch = str2flo(iter) / batch_per_epoch
plotter.add_values(float_epoch, other_value = other_value,
redraw=False)
i += 1
time_str = "{}\r".format(calculate_remaining(st_time, time.time(), i, fi_len))
# print(time_string, end = '\r')
sys.stdout.write(time_str)
sys.stdout.flush()
sys.stdout.write("\n")
sys.stdout.flush()
log_file2.close()
plotter.redraw() # save as image
# plotter.block()
def main():
if len(sys.argv) < 3: # 1
print("Usage:{} <DATA> <WHO> [batchsize]".format(sys.argv[0]))
sys.exit(1) # 2
if len(sys.argv) > 3:
b_size = int(sys.argv[3])
else:
b_size = None
dict = {"data": sys.argv[1], "suffix": sys.argv[2], "b_size": b_size}
dict = check_val(**dict)
plot_loss_err(**dict)
plot_losses(**dict)
plot_values(**dict)
if dict["tri"]: plot_tri(**dict)
if __name__ == "__main__":
main()
```
#### File: jjYukgm/SSL_multi_decoder/cmgmd_tester.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.utils as vutils
import data
import config
import model
import random
import time
import os, sys
import math
import argparse
from collections import OrderedDict
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
# from sklearn.preprocessing import scale
from sklearn.manifold import TSNE
# from PIL import Image
from utils import *
# from cifarmg_trainer import Trainer
import losses, ramps
import copy
# from metrics import ArcMarginProduct # cosine distance
from losses import FocalLoss
# import random
import pdb
# pdb.set_trace()
# pdb.set_trace = lambda: None
class Tester(object):
def __init__(self, config, args):
self.config = config
for k, v in args.__dict__.items():
setattr(self.config, k, v)
setattr(self.config, 'save_dir', '{}_log'.format(self.config.dataset))
assert os.path.exists(self.config.save_dir), "There is no log folder"
# try to load config
log_path = os.path.join(self.config.save_dir, '{}.FM+VI.{}.txt'.format(self.config.dataset, self.config.suffix))
if os.path.isfile(log_path):
print("log config covering...")
logger = open(log_path, 'r')
keep_str = ['dis_channels']
keep_val = ['last_epochs', 'last_epo_lbl']
for li in logger:
if "|" in li: break
key, val = li.split(" : ")
val = val[:-1] # abort \n
if key in keep_val:
continue
elif key not in keep_str:
val = self.int_float_bool(val)
setattr(self.config, key, val)
logger.close()
# self.labeled_loader, self.unlabeled_loader, self.dev_loader, self.special_set = data.get_cifar_loaders_test(
# config)
self.loaders = data.get_data_loaders_test(config)
if config.mu:
self.label_list = range(config.num_label)
in_channels = [int(i) for i in config.dis_channels.split(",")]
self.dis = model.Unet_Discriminator(config, in_channels=in_channels).cuda()
self.ema_dis = model.Unet_Discriminator(config, in_channels=in_channels).cuda()
else:
self.dis = model.Discriminative(config).cuda()
self.ema_dis = model.Discriminative(config).cuda() # , ema=True).cuda()
if config.mu:
self.gen = nn.ModuleList()
self.gen.append(model.UNetWithResnetEncoder(n_classes=3, res=config.gen_mode).cuda())
for i in range(config.num_label-1):
self.gen.append(model.ResnetDecoder_skip(n_classes=3, res=config.gen_mode).cuda())
elif hasattr(self.config, 'gen_mode') and self.config.gen_mode != "non":
self.gen = model.generator(image_side=config.image_side,
noise_size=config.noise_size,
large=config.double_input_size,
gen_mode=config.gen_mode).cuda()
dis_para = [{'params': self.dis.parameters()}, ]
if 'm' in config.dis_mode: # svhn: 168; cifar:192
self.m_criterion = FocalLoss(gamma=2)
if config.dis_double:
self.dis_dou = model.Discriminative_out(config).cuda()
dis_para.append({'params': self.dis_dou.parameters()})
if config.gen_mode == "z2i":
self.enc = model.Encoder(config.image_side, noise_size=config.noise_size, output_params=True).cuda()
self.d_criterion = nn.CrossEntropyLoss()
if config.consistency_type == 'mse':
self.consistency_criterion = losses.softmax_mse_loss # F.MSELoss() # (size_average=False)
elif config.consistency_type == 'kl':
self.consistency_criterion = losses.softmax_kl_loss # nn.KLDivLoss() # (size_average=False)
else:
pass
self.consistency_weight = 0
disp_str = ''
for attr in sorted(dir(self.config), key=lambda x: len(x)):
if not attr.startswith('__'):
disp_str += '{} : {}\n'.format(attr, getattr(self.config, attr))
sys.stdout.write(disp_str)
sys.stdout.flush()
# for arcface
self.s = 30.0
m = 0.50
self.cos_m = math.cos(m)
self.sin_m = math.sin(m)
self.th = math.cos(math.pi - m)
self.mm = math.sin(math.pi - m) * m
# for tsne
self.gen_feat = None
self.p_fs = tuple([ int(i) for i in self.config.p_fs.split(",")])
# for mu
self.img_per_cls = None
def int_float_bool(self, value):
try:
value = int(value)
except:
pass
if isinstance(value, str):
try:
value = float(value)
except:
pass
if isinstance(value, str):
if "False" in value:
value = False
elif "true" in value:
value = True
return value
def _get_vis_images(self, labels):
labels = labels.data.cpu()
vis_images = self.special_set.index_select(0, labels)
return vis_images
def gram_matrix(self, y):
(b, ch, h, w) = y.size()
features = y.view(b, ch, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (ch * h * w)
return gram
def get_feat(self, images, mu_layers=5, ema=False):
if self.config.mu:
return self.gen[0](images, encode=True)
else:
if ema:
return self.ema_dis(images, feat=True)
return self.dis(images, feat=True)
def eval_true_fake(self, data_loader, max_batch=None):
self.gen.eval()
self.dis.eval()
# if not 'd' in self.config.dis_mode:
# self.metric_fc.eval()
# self.enc.eval()
cnt = 0
unl_acc, gen_acc, max_unl_acc, max_gen_acc = 0., 0., 0., 0.
for i, (images, _) in enumerate(data_loader.get_iter()):
images = Variable(images.cuda(), volatile=True)
if self.config.gen_mode == "z2i":
noise = Variable(torch.Tensor(images.size(0), self.config.noise_size).uniform_().cuda(), volatile=True)
gen_feat = self.get_feat(self.gen(noise))
elif self.config.gen_mode == "i2i":
gen_feat = self.get_feat(self.gen(images))
else:
gen_feat = self.get_feat(self.gen[i%self.config.num_label](images))
unl_feat = self.get_feat(images)
unl_logits = self.dis.out_net(unl_feat)
gen_logits = self.dis.out_net(gen_feat)
unl_logsumexp = log_sum_exp(unl_logits)
gen_logsumexp = log_sum_exp(gen_logits)
##### Monitoring (eval mode)
# true-fake accuracy
unl_acc += torch.mean(nn.functional.sigmoid(unl_logsumexp).gt(0.5).float()).data[0]
gen_acc += torch.mean(nn.functional.sigmoid(gen_logsumexp).gt(0.5).float()).data[0]
# top-1 logit compared to 0: to verify Assumption (2) and (3)
max_unl_acc += torch.mean(unl_logits.max(1)[0].gt(0.0).float()).data[0]
max_gen_acc += torch.mean(gen_logits.max(1)[0].gt(0.0).float()).data[0]
cnt += 1
if max_batch is not None and i >= max_batch - 1: break
return unl_acc / cnt, gen_acc / cnt, max_unl_acc / cnt, max_gen_acc / cnt
def eval(self, data_loader, max_batch=None, ema=False, tri=0):
if ema:
# if self.consistency_weight == 0.:
# return 0.
dis = self.ema_dis
else:
dis = self.dis
if tri == 0:
dis_out = dis.out_net
elif tri == 2:
dis_out = self.dis_dou.out_net3
else: # 1
dis_out = self.dis_dou.out_net2
self.gen.eval()
dis.eval()
# self.enc.eval()
loss, incorrect, cnt = 0, 0, 0
for i, (images, labels) in enumerate(data_loader.get_iter()):
images = Variable(images.cuda(), volatile=True)
labels = Variable(labels.cuda(), volatile=True)
feat = self.get_feat(images, ema=ema)
pred_prob = dis_out(feat)
loss += self.d_criterion(pred_prob, labels).data[0]
cnt += 1
incorrect += torch.ne(torch.max(pred_prob, 1)[1], labels).data.sum()
if max_batch is not None and i >= max_batch - 1: break
return loss / cnt, incorrect
def visualize_iter(self, data_loader=None, bsize=400, iters=1, data_suffix="g", gzlab=-1):
assert data_loader is not None or "g" in data_suffix, "g or loader"
iter_num = lambda a, b: int((len(a) + b - 1) // b)
if data_loader is not None and ("g" not in data_suffix or iters == -1):
iters = iter_num(data_loader, bsize)
elif iters != -1 and self.config.declbl and self.config.mu and not self.config.nstf:
iters = iter_num(data_loader, bsize)
nrow = int(bsize ** 0.5)
start_time = time.time()
first_str = "{} iters: {}".format(data_suffix, iters)
for i in range(iters):
data_suffix2 = data_suffix + "{:03d}".format(i)
self.visualize(data_loader, vis_size=bsize, data_suffix=data_suffix2, nrow=nrow, gzlab=gzlab)
time_str = self.calculate_remaining(start_time, time.time(), float(i) / iters)
sys.stdout.write('\r{}{}'.format(first_str, time_str)) # ta
sys.stdout.flush()
sys.stdout.write('\n')
sys.stdout.flush()
def visualize(self, data_loader=None, vis_size=100, data_suffix="g", nrow=10, gzlab=-1):
self.gen.eval()
self.dis.eval()
# vis_size = 100
lab = None
if self.config.gen_mode == "z2i" and data_loader is None:
if gzlab > 0:
lab = torch.Tensor(range(gzlab))
lab = lab.unsqueeze(1).repeat(1, vis_size / gzlab).view(-1).long()
labels_oh = Variable(torch.zeros(vis_size, gzlab).scatter_(1, lab.unsqueeze(1), 1).cuda())
noise = Variable(torch.Tensor(vis_size, self.config.noise_size - gzlab).uniform_().cuda())
noise = torch.cat((labels_oh, noise), dim=1)
else:
noise = Variable(torch.Tensor(vis_size, self.config.noise_size).uniform_().cuda())
gen_images = self.gen(noise)
elif self.config.gen_mode == "i2i":
gen_images = []
cnt = 0
while (True):
images, _ = data_loader.next()
images = Variable(images.cuda(), volatile=True)
gen_image = self.gen(images)
gen_images.append(gen_image)
cnt += data_loader.batch_size
if cnt + data_loader.batch_size > vis_size:
break
gen_images = torch.cat(gen_images, 0)
elif "g" not in data_suffix and data_loader is not None: # just image
gen_images = None
i, cnt = 0, 0
while (True):
images, labs = data_loader.next()
images = Variable(images.cuda(), volatile=True)
if i == 0:
gen_images = images
lab = labs
else:
gen_images = torch.cat((gen_images, images), 0)
lab = torch.cat((lab, labs), 0)
i += 1
cnt += data_loader.batch_size
if cnt + data_loader.batch_size > vis_size:
break
elif self.config.declbl and self.config.mu and not self.config.tsne \
and not self.config.nsg and self.config.nstf: # category-wised img in a row
ori_images = []
gen_images = []
for i in range(self.config.num_label):
ori_images.append([])
gen_images.append([])
while (True):
cnt = 0
images, labs = data_loader.next()
for i in range(self.config.num_label):
i_count = sum(labs == i)
if i_count == 0 and len(ori_images[i]) == 0:
cnt += 1
if i_count == 0 or len(ori_images[i]) > 0:
continue
inds = (labs == i).nonzero().squeeze()
ori_images[i] = images[inds,:,:,:][0].unsqueeze(0)
if cnt == 0:
break
ori_images = torch.cat(ori_images, 0)
ori_images = Variable(ori_images.cuda(), volatile=True)
inp_feat = self.gen[0](ori_images, skip_encode=True)
for i in range(self.config.num_label):
gen_image = self.gen[i].decode(inp_feat)
gen_images[i].append(gen_image)
for i in range(self.config.num_label):
gen_images[i] = torch.cat(gen_images[i], 0) # .squeeze(0).transpose(0, 1)
o_size = ori_images.size()
gen_images = torch.stack(gen_images, 0).transpose(0, 1).contiguous().view(-1, o_size[1], o_size[2], o_size[3])
# gen_images = torch.cat(gen_images, 0)
elif self.config.declbl and self.config.mu:
ori_images = []
gen_images = []
for i in range(self.config.num_label):
ori_images.append([])
gen_images.append([])
cnt = 0
img_per_cls = np.zeros(self.config.num_label, dtype=int)
while (True):
images, labs = data_loader.next()
images = Variable(images.cuda(), volatile=True)
inp_feat = self.gen[0](images, skip_encode=True)
for i in range(self.config.num_label):
i_count = sum(labs == i)
if i_count == 0:
continue
img_per_cls[i] += i_count
inds = (labs == i).nonzero().squeeze().cuda()
i_feat = dict()
for j in inp_feat.keys():
i_feat[j] = inp_feat[j][inds,:,:,:]
gen_image = self.gen[i].decode(i_feat)
ori_images[i].append(images[inds,:,:,:])
gen_images[i].append(gen_image)
cnt += data_loader.batch_size
if cnt + data_loader.batch_size > vis_size:
break
for i in range(self.config.num_label):
if len(gen_images[i]) != 0:
ori_images[i] = torch.cat(ori_images[i], 0)
gen_images[i] = torch.cat(gen_images[i], 0)
elif self.config.mu: # mu
ori_images = []
gen_images = []
cnt = 0
img_per_cls = data_loader.batch_size # // self.config.num_label
while (True):
images, _ = data_loader.next()
images = Variable(images.cuda(), volatile=True)
ori_images.append(images)
inp_feat = self.gen[0](images, skip_encode=True) # , [range(i*img_per_cls, (i+1)*img_per_cls)], skip_encode=True)
for i in range(self.config.num_label):
gen_image = self.gen[i].decode(inp_feat)
gen_images.append(gen_image)
cnt += img_per_cls * self.config.num_label
if cnt + img_per_cls * self.config.num_label > vis_size:
break
ori_images = torch.cat(ori_images, 0)
gen_images = torch.cat(gen_images, 0)
# for tsne
if "g" in data_suffix and self.config.tsne:
if self.config.mu and self.config.declbl: # may diff # every cls
if self.gen_feat is None:
self.gen_feat = [None] * self.config.num_label
for i in range(self.config.num_label):
if img_per_cls[i] != 0:
feat = self.get_feat(gen_images[i]).data
if self.gen_feat[i] is None:
self.gen_feat[i] = feat
else:
self.gen_feat[i] = torch.cat((self.gen_feat[i], feat), dim=0)
if self.img_per_cls is None:
self.img_per_cls = img_per_cls
else:
self.img_per_cls += img_per_cls
else:
feat = self.get_feat(gen_images).data
if self.config.mu and self.img_per_cls is None:
self.img_per_cls = img_per_cls
if self.gen_feat is None:
self.gen_feat = feat
else:
self.gen_feat = torch.cat((self.gen_feat, feat), dim=0)
if self.config.nsg:
return
if type(gen_images) == list:
ori_images = torch.cat(ori_images, 0)
gen_images = torch.cat(gen_images, 0)
if self.config.declbl and self.config.mu and not self.config.tsne \
and not self.config.nsg and self.config.nstf:
nrow = gen_images.size(0) // self.config.num_label
save_path = os.path.join(self.config.save_dir,
'Te{}.FM+VI.{}.{}.png'.format(self.config.dataset, self.config.suffix, data_suffix))
vutils.save_image(gen_images.data.cpu(), save_path, normalize=True, range=(-1, 1), nrow=nrow)
if "g" in data_suffix and data_loader is not None:
save_path = os.path.join(self.config.save_dir,
'Te{}.FM+VI.{}.{}_ori.png'.format(self.config.dataset, self.config.suffix, data_suffix))
vutils.save_image(ori_images.data.cpu(), save_path, normalize=True, range=(-1, 1), nrow=1)
if self.config.nstf:
return
# dis true img
gen_logits = self.dis.out_net(self.get_feat(gen_images))
# pdb.set_trace()
# self.visualize_accs(gen_images, gen_logits, data_suffix)
# gen_images = Variable(torch.Tensor([batch, 3, 32, 32])).cuda
# gen_logits = Variable(torch.Tensor([batch])).cuda
gen_logsumexp = log_sum_exp(gen_logits)
acc = nn.functional.sigmoid(gen_logsumexp).gt(0.5).data.long()
max_acc = gen_logits.max(1)[0].detach().gt(0.0).data.long()
# acc_images[1-acc,:,:,:] *= 0
acc_ind = acc.unsqueeze(1).repeat(1, gen_images.nelement() /
gen_images.size(0)).view(gen_images.size())
acc_ind = acc_ind.float()
err_ind = 1 - acc_ind
if acc_ind.sum() > 0:
acc_images = gen_images.clone().data # acc img
acc_images -= 2 * err_ind
save_path = os.path.join(self.config.save_dir,
'Te{}.FM+VI.{}.{}ac.png'.format(self.config.dataset, self.config.suffix,
data_suffix))
vutils.save_image(acc_images.cpu(), save_path, normalize=True, range=(-1, 1), nrow=nrow)
# gen_err
if err_ind.sum() > 0:
err_images = gen_images.clone().data
# err_images[acc,:,:,:] *= 0
err_images -= 2 * acc_ind
save_path = os.path.join(self.config.save_dir,
'Te{}.FM+VI.{}.{}er.png'.format(self.config.dataset, self.config.suffix,
data_suffix))
vutils.save_image(err_images.cpu(), save_path, normalize=True, range=(-1, 1), nrow=nrow)
# acc_images[1-max_acc,:,:,:] *= 0
acc_ind = max_acc.unsqueeze(1).repeat(1, gen_images.nelement() /
gen_images.size(0)).view(gen_images.size())
acc_ind = acc_ind.float()
err_ind = 1 - acc_ind
if acc_ind.sum() > 0:
acc_images = gen_images.clone().data # max_acc img
acc_images -= 2 * err_ind
save_path = os.path.join(self.config.save_dir,
'Te{}.FM+VI.{}.{}mac.png'.format(self.config.dataset, self.config.suffix,
data_suffix))
vutils.save_image(acc_images.cpu(), save_path, normalize=True, range=(-1, 1), nrow=nrow)
if err_ind.sum() > 0:
# max_gen_err
err_images = gen_images.clone().data
# err_images[max_acc,:,:,:] *= 0
err_images -= 2 * acc_ind
save_path = os.path.join(self.config.save_dir,
'Te{}.FM+VI.{}.{}mer.png'.format(self.config.dataset, self.config.suffix,
data_suffix))
vutils.save_image(err_images.cpu(), save_path, normalize=True, range=(-1, 1), nrow=nrow)
# record report
save_path = os.path.join(self.config.save_dir,
'Te{}.FM+VI.{}.{}.txt'.format(self.config.dataset, self.config.suffix, data_suffix))
save_str = ""
topk = self.config.num_label
val, ind = torch.topk(gen_logits, topk)
val, ind = val.data.cpu().numpy(), ind.data.cpu().numpy()
acc, max_acc = acc.cpu().numpy(), max_acc.cpu().numpy()
save_str += "sum{}_m{}/{} ".format(acc.sum(), max_acc.sum(), acc.shape[0])
if lab is not None: # real data should have label
lab = lab.numpy()
acc_str_row = lambda a: "{}_m{} {}: ".format(acc[a], max_acc[a], lab[a])
pred_accumu = lab[ind[:, 0] == lab]
lab_accumu = lab
else:
acc_str_row = lambda a: "{}_m{}: ".format(acc[a], max_acc[a])
pred_accumu = ind[max_acc * acc > 0, 0]
lab_accumu = ind[:, 0]
# accumulate all labels
for i in range(gen_logits.size(1)):
save_str += "{}({}/{}) ".format(i, np.sum(pred_accumu == i), np.sum(lab_accumu == i))
save_str += "\n"
for i in range(vis_size):
save_str += acc_str_row(i)
for j in range(topk):
save_str += "{}({}) ".format(ind[i, j], val[i, j])
save_str += "\n"
logger = open(save_path, 'wb')
logger.write(save_str)
def tsne(self, data_suffix="u"):
# g check
if not hasattr(self.config, 'gen_mode') or self.config.gen_mode == "non":
data_suffix = data_suffix.replace("g", "")
# value check
if data_suffix == "":
print("No suffix!")
return
# get real feature
cifar_feats = lbls = None
u_lbls = g_lab = None
if "u" in data_suffix:
cifar_feats, u_lbls = self.eval_feats(self.loaders[0], data="u")
lbls = u_lbls.copy()
# if self.config.te:
# cifar_feats = cifar_feats[:1000]
# lbls = lbls[:1000]
if "l" in data_suffix :
if not len(self.loaders) > 2:
print("You cannot plot {}".format(data_suffix))
return
feats, l_lbls = self.eval_feats(self.loaders[2], data="l")
if cifar_feats is None:
cifar_feats = feats
lbls = l_lbls
else:
pass
# cifar_feats = np.concatenate((cifar_feats, feats), axis=0)
# lbls = np.concatenate((lbls, l_lbls), axis=0)
if "d" in data_suffix:
feats, d_lbls = self.eval_feats(self.loaders[1], data="d")
if cifar_feats is None:
cifar_feats = feats
lbls = d_lbls
else:
cifar_feats = np.concatenate((cifar_feats, feats), axis=0)
lbls = np.concatenate((lbls, d_lbls), axis=0)
num_label = self.config.num_label
g_offset = 20 if self.config.dataset == "coil20" else 10
# get fake feature
if "g" in data_suffix:
if self.config.mu and self.config.declbl:
num_label += self.config.num_label
g_lab = []
for i in range(self.config.num_label):
g_lab.append(np.array([g_offset+i]*self.img_per_cls[i]))
g_lab = np.concatenate(g_lab)
elif self.config.mu:
num_label += self.config.num_label
iter_num = self.gen_feat.shape[0] / (self.img_per_cls * self.config.num_label)
g_lab = np.tile(np.arange(g_offset, g_offset+self.config.num_label).repeat(self.img_per_cls), iter_num)
else:
num_label += 1
g_lab = np.array([g_offset]).repeat(self.gen_feat.shape[0])
if cifar_feats is None:
cifar_feats = self.gen_feat
lbls = g_lab
else:
cifar_feats = np.concatenate((cifar_feats, self.gen_feat), axis=0)
lbls = np.concatenate((lbls, g_lab), axis=0)
data_num = [u_lbls.shape[0], g_lab.shape[0]] \
if "g" in data_suffix and "u" in data_suffix else None
self.plot_scatter(cifar_feats, lbls, data_suffix, data_num)
if not self.config.banl == "":
# pdb.set_trace()
data_suffix2 = data_suffix+"_" + self.config.banl
banl = str(self.config.banl)
ban_e = [int(i) for i in banl.split(",")]
ban_ind = np.array([], dtype=int)
for i in ban_e:
ban_ind = np.concatenate((ban_ind, np.where(lbls == i)[0]), axis=0)
num_label -= 1
if self.config.mu:
ban_ind = np.concatenate((ban_ind, np.where(lbls == g_offset+i)[0]), axis=0)
num_label -= 1
# ban_ind.sort()
mask = np.ones(len(lbls), dtype=bool)
mask[ban_ind] = False
lbls2 = lbls[mask]
cifar_feats2 = cifar_feats[mask]
if self.config.mu:
num_ul = num_label // 2
else:
num_ul = num_label - 1
data_num = [(u_lbls.shape[0]//self.config.num_label) * num_ul, g_lab.shape[0]] \
if "g" in data_suffix and "u" in data_suffix else None
self.plot_scatter(cifar_feats2, lbls2, data_suffix2, data_num)
if not self.config.showlab == "":
data_num = [u_lbls.shape[0]//self.config.num_label, g_lab.shape[0]] \
if "g" in data_suffix and "u" in data_suffix else None
for i in self.config.showlab.split(","):
data_suffix2 = data_suffix+"_" + i
show_e = int(i)
mask = np.where(lbls == show_e)[0]
mask = np.concatenate((mask, np.where(lbls == g_offset)[0]), axis=0)
# ban_ind.sort()
lbls2 = lbls[mask]
cifar_feats2 = cifar_feats[mask]
self.plot_scatter(cifar_feats2, lbls2, data_suffix2, data_num)
def plot_scatter(self, feats, y_dist, data_suffix, data_num=None):
# x_dist = ts_feat
# y_dist = lbls
print("Plot tsne {}".format(data_suffix))
splitul = False
if "u" in data_suffix and "l" in data_suffix:
print("Also Plot tsne {}2".format(data_suffix))
assert data_num is not None, "data_num is None"
splitul = True
if self.config.te:
print("TSNE point num: {}".format(len(y_dist)))
x_dist = TSNE(n_components=2).fit_transform(feats)
x_dist *= self.config.p_scale
if self.config.te:
print("after TSNE transform")
# plot
plt.ioff()
# fig = plt.figure(figsize=self.p_fs, dpi=self.config.p_d)
# fig.add_subplot(111)
fig, ax = plt.subplots(figsize=self.p_fs, dpi=self.config.p_d)
if self.config.num_label <= 10:
colors = {-1: 'lightblue',
0: '#FF7070', 1: '#FFAA70', 2: '#FFEB62', 3: '#C1FF62', 4: '#68FFAF',
5: '#68FFFF', 6: '#76BBFF', 7: '#767FFF', 8: '#A476FF', 9: '#FF76FF',
10: '#D20000', 11: '#C95000', 12: '#C9AE00', 13: '#78C900', 14: '#00C95E',
15: '#03ACAC', 16: '#145FAB', 17: '#353EB9', 18: '#6134B9', 19: '#CA46CA'}
# 0: 'salmon', 1: 'yellow', 2: 'lime', 3: 'orange', 4: 'dodgerblue',
# 5: 'skyblue', 6: 'violet', 7: 'cyan', 8: 'pink', 9: 'palegreen',
# 10: 'darkred', 11: 'tan', 12: 'limegreen', 13: 'darkorange', 14: 'steelblue',
# 15: 'royalblue', 16: 'darkviolet', 17: 'darkcyan', 18: 'deeppink', 19: 'darkseagreen'}
else:
colors = {-1: 'lightblue',
0: '#FFA680', 1: '#FFC980', 2: '#FFED80', 3: '#EFFF80', 4: '#CBFF80', 5: '#A6FF80', 6: '#82FF80', 7: '#80FFA2', 8: '#80FFC6', 9: '#80FFEB',
10: '#80F1FF', 11: '#80CDFF', 12: '#80A9FF', 13: '#8084FF', 14: '#A080FF', 15: '#C480FF', 16: '#E980FF', 17: '#FF80F3', 18: '#FF80CF', 19: '#FF80AB',
20: '#C00000', 21: '#C03600', 22: '#C06D00', 23: '#C0A300', 24: '#A6C000', 25: '#70C000', 26: '#3AC000', 27: '#03C000', 28: '#00C033', 29: '#00C06A',
30: '#00C0A0', 31: '#00AAC0', 32: '#0073C0', 33: '#003DC0', 34: '#0006C0', 35: '#3000C0', 36: '#6600C0', 37: '#9D00C0', 38: '#C000AD', 39: '#C00076'}
unique_lbl = np.array(sorted(np.unique(y_dist)))
num_gls = len(colors)-1 # lbl<10: 21-1; el: 41-1
for i in range(-1, num_gls): # remove labels which are outside y_dist
if i not in unique_lbl:
colors.pop(i)
# remap key and label, start_label: ?; last lbl: num_gls
num_lbl = len(unique_lbl)
if self.config.mu:
g_min_lbl = num_gls - self.config.num_label + 1
else:
g_min_lbl = num_gls
if self.config.te:
print("unique_lbl: {}".format(unique_lbl))
print("num: {}; g_num: {}; gml: {}".format(num_lbl, np.sum(unique_lbl >= 10), g_min_lbl))
for i in range(num_lbl):
ori_lbl = unique_lbl[num_lbl-1-i]
new_lbl = num_gls-i
colors[new_lbl] = colors[ori_lbl]
del colors[ori_lbl]
y_dist[y_dist == ori_lbl] = new_lbl
co_keys = sorted(colors.keys())
co_list = [colors[i] for i in co_keys]
cm = LinearSegmentedColormap.from_list(
'plotcm', co_list, N=len(co_list))
for i in co_keys:
g_mask = y_dist == i
if i < g_min_lbl:
plt.scatter(x_dist[g_mask, 0], x_dist[g_mask, 1], c=colors[i], cmap=cm, marker="x",
s=self.config.p_s, alpha=self.config.p_alpha)
else:
plt.scatter(x_dist[g_mask, 0], x_dist[g_mask, 1], c=colors[i], cmap=cm,
marker="o", facecolors='none', edgecolors=colors[i],
s=self.config.p_s, alpha=self.config.p_alpha)
# sc = plt.scatter(x_dist[:, 0], x_dist[:, 1], c=y_dist, cmap=cm, marker=",",
# s=self.config.p_s, alpha=self.config.p_alpha)
x_min, x_max = x_dist[:, 0].min() - 1, x_dist[:, 0].max() + 1
y_min, y_max = x_dist[:, 1].min() - 1, x_dist[:, 1].max() + 1
plt.title('TSNE {}'.format(data_suffix))
plt.axis((x_min, x_max, y_min, y_max))
cax, _ = matplotlib.colorbar.make_axes(ax)
normalize = matplotlib.colors.Normalize(vmin=min(unique_lbl), vmax=max(unique_lbl))
cb = matplotlib.colorbar.ColorbarBase(cax, cmap=cm, norm=normalize)
# cb = plt.colorbar(cm)
cb.set_ticks([0, max(colors.keys())])
cb.set_ticklabels(["", ""])
fig.canvas.draw()
# save as image
save_path = os.path.join(self.config.save_dir,
'Te{}.FM+VI.{}.tsne.{}.png'.format(self.config.dataset, self.config.suffix, data_suffix))
plt.savefig(save_path, bbox_inches='tight')
plt.close('all')
# split u, l ver
if splitul:
data_suffix += '2'
colors = {0: 'lightblue', 1: 'g', 2: 'r'}
# lbl_str = {0: 'unlabel', 1: 'label', 2: 'gen'}
# from matplotlib.colors import LinearSegmentedColormap
cm = LinearSegmentedColormap.from_list(
'splitul', colors.values(), N=len(colors))
ist = {0: 0, 1: data_num[0], 2: - data_num[1]}
ied = {0: data_num[0], 2: None} # 0: u_lbls.shape[0], 1: u_lbls.shape[0] + l_lbls.shape[0]
plt.ioff()
fig = plt.figure(figsize=self.p_fs, dpi=self.config.p_d)
fig.add_subplot(111)
# draw the figure first...
for g in colors.keys(): # u, g
if g == 1:
if "_" in data_suffix:
continue
assert len(self.loaders) >= 4, "no label ind"
ix = self.loaders[3]
ix = ix[ix < x_dist.shape[0]]
y_dist[ix] = g
else:
y_dist[ist[g]:ied[g]] = g
sc = plt.scatter(x_dist[:, 0], x_dist[:, 1], c=y_dist, cmap=cm, marker=",",
s=self.config.p_s, alpha=self.config.p_alpha)
plt.title('TSNE {}'.format(data_suffix))
cb = plt.colorbar(sc)
cb.set_ticks([0, colors.keys()[-1]])
cb.set_ticklabels(["", ""])
plt.axis((x_min, x_max, y_min, y_max))
fig.canvas.draw()
# save as image
save_path = os.path.join(self.config.save_dir,
'Te{}.FM+VI.{}.tsne.{}.png'.format(self.config.dataset, self.config.suffix, data_suffix))
plt.savefig(save_path, bbox_inches='tight')
plt.close('all')
# plot ul
data_suffix = data_suffix[:-1]+"3"
colors.pop(2)
cm = LinearSegmentedColormap.from_list(
'gul3', colors.values(), N=len(colors))
fig = plt.figure(figsize=self.p_fs, dpi=self.config.p_d)
fig.add_subplot(111)
g_mask = y_dist != 2
x_dist = x_dist[g_mask]
y_dist = y_dist[g_mask]
sc = plt.scatter(x_dist[:, 0], x_dist[:, 1], c=y_dist, cmap=cm, marker=",",
s=self.config.p_s, alpha=self.config.p_alpha)
plt.title('TSNE {}'.format(data_suffix))
cb = plt.colorbar(sc)
cb.set_ticks([0, colors.keys()[-1]])
cb.set_ticklabels(["", ""])
plt.axis((x_min, x_max, y_min, y_max))
fig.canvas.draw()
# save as image
save_path = os.path.join(self.config.save_dir,
'Te{}.FM+VI.{}.tsne.{}.png'.format(self.config.dataset, self.config.suffix, data_suffix))
plt.savefig(save_path, bbox_inches='tight')
plt.close('all')
def eval_feats(self, data_loader, data="u"):
if data == "u":
if not hasattr(self, 'u_lbl'):
self.u_feats, self.u_lbl = self.eval_feat(data_loader)
return self.u_feats, self.u_lbl
elif data == "l":
if not hasattr(self, 'l_lbl'):
self.l_feats, self.l_lbl = self.eval_feat(data_loader)
return self.l_feats, self.l_lbl
elif data == "d":
if not hasattr(self, 'd_lbl'):
self.d_feats, self.d_lbl = self.eval_feat(data_loader)
return self.d_feats, self.d_lbl
else:
print("There is no data={}".format(data))
def eval_feat(self, data_loader, max_batch=None, ema=False):
if ema:
dis = self.ema_dis
else:
dis = self.dis
self.gen.eval()
dis.eval()
# self.enc.eval()
# if self.config.te:
# max_batch = 1
feats = lbls = None
for i, (images, labels) in enumerate(data_loader.get_iter(shuffle=False)):
images = Variable(images.cuda(), volatile=True)
labels = Variable(labels.cuda(), volatile=True)
feat = self.get_feat(images, ema=ema)
if i == 0:
feats = feat.data.clone()
lbls = labels.data.clone()
else:
feats = torch.cat((feats, feat.data.clone()), dim=0)
lbls = torch.cat((lbls, labels.data.clone()), dim=0)
if max_batch is not None and i >= max_batch - 1: break
feats, lbls = feats.cpu().numpy(), lbls.cpu().numpy()
return feats, lbls
def calculate_remaining(self, t1, t2, progress): # ta
elapsed_time = t2 - t1
if (progress > 0):
remaining_time = elapsed_time * (1 / progress) - elapsed_time
else:
remaining_time = 0
# elapsed time
esec = int(elapsed_time % 60)
emin = int((elapsed_time // 60) % 60)
ehr = int(elapsed_time / 3600)
# remaining_time
rsec = int(remaining_time % 60)
rmin = int((remaining_time // 60) % 60)
rhr = int(remaining_time / 3600)
time_str = '[{:8.2%}], {:3d}:{:2d}:{:2d}<{:3d}:{:2d}:{:2d} '.format(progress, ehr, emin, esec, rhr, rmin, rsec)
time_str = '| ' + time_str
return time_str
def load_model(self, net, net_label, epo_label): # ta
load_filename = 'VI.{}_{}_net_{}.pth'.format(self.config.suffix, epo_label, net_label)
load_path = os.path.join(self.config.save_dir, load_filename)
if not os.path.exists(load_path):
print("There is no {}!".format(load_filename))
return
load_net = torch.load(load_path)
net.cpu()
model.load_my_state_dict(net, load_net)
if torch.cuda.is_available():
net.cuda()
def resume(self, epo_label): # ta
# load old
self.load_model(self.dis, 'D', epo_label)
if hasattr(self.config, 'con_coef'):
self.load_model(self.ema_dis, 'M', epo_label)
if hasattr(self.config, 'gen_mode') and self.config.gen_mode != "non":
self.load_model(self.gen, 'G', epo_label)
if hasattr(self.config, 'gen_mode') and self.config.gen_mode == "z2i":
self.load_model(self.enc, 'E', epo_label)
if hasattr(self.config, 'dis_double') and self.config.dis_double:
self.load_model(self.dis_dou, 'D2', epo_label)
def test(self):
config = self.config
batch_per_epoch = int((len(self.loaders[0]) + config.train_batch_size - 1) / config.train_batch_size)
iter_num = batch_per_epoch * (config.last_epochs - 1)
if config.last_epo_lbl != 0:
iter_num = config.last_epo_lbl
if config.mu:
config.suffix = "{}_s{}".format(config.suffix, config.train_step)
self.resume(iter_num)
# turn save_dir into another folder
self.config.save_dir = os.path.join(self.config.save_dir, 'T{}'.format(self.config.suffix))
if not os.path.exists(self.config.save_dir):
os.makedirs(self.config.save_dir)
gzlab = -1
if config.gzlab:
gzlab = config.num_label
if config.te:
config.gen_iter = 2
if config.gen_iter == 1:
if config.gen_mode == "z2i":
self.visualize(gzlab=gzlab)
else:
self.visualize(self.loaders[0])
else:
if config.gen_mode == "z2i":
self.visualize_iter(bsize=100, iters=config.gen_iter,
data_suffix="g", gzlab=gzlab)
else:
self.visualize_iter(data_loader=self.loaders[0], bsize=self.loaders[0].batch_size,
iters=config.gen_iter, data_suffix="g", gzlab=gzlab)
if config.alldata:
print("loader num: {}".format(len(self.loaders)))
self.visualize_iter(self.loaders[1], 400, data_suffix="d")
self.visualize_iter(self.loaders[0], 400, data_suffix="u")
if len(self.loaders) >= 3:
self.visualize_iter(self.loaders[2], 400, data_suffix="l")
if config.tsne:
if hasattr(self, 'gen'):
if self.config.mu and self.config.declbl:
if config.gen_iter != -1:
assert config.gen_iter * self.loaders[0].batch_size <= len(self.loaders[0]), \
"out of dataset: {}*{} > {}".format(config.gen_iter, self.loaders[0].batch_size,
len(self.loaders[0]))
img_num = config.gen_iter * self.loaders[0].batch_size // config.num_label
self.img_per_cls = [img_num] * config.num_label
for i in range(config.num_label):
self.gen_feat[i] = self.gen_feat[i][:img_num]
self.gen_feat = torch.cat(self.gen_feat, 0)
print("# gen_feat: {}; ipc: {}".format(self.gen_feat.size(0), self.img_per_cls))
self.gen_feat = self.gen_feat.cpu().numpy()
if config.te:
# self.tsne("g") # t1\
self.tsne("gul")
# self.tsne("gu")
else:
if config.mu:
self.tsne("g")
self.tsne("gu")
self.tsne("gl")
self.tsne("gul")
self.tsne("gd")
if __name__ == '__main__':
cc = config.cifarmg_config()
parser = argparse.ArgumentParser(description='cmgmd_tester.py')
parser.add_argument('-suffix', default='mg0', type=str, help="Suffix added to the save images.")
parser.add_argument('-r', dest='resume', action='store_true')
parser.add_argument('-dataset', default=cc.dataset, type=str,
help="dataset: cifar, stl10, coil20")
parser.add_argument('-last_epochs', default=cc.last_epochs, type=int,
help="last epochs")
parser.add_argument('-last_epo_lbl', default=0, type=int,
help="last epoch lbl")
parser.add_argument('-gen_iter', default=1, type=int,
help="gen iteration times. def: 1; full: -1")
parser.add_argument('-cmp', default='winter', type=str,
help="color map name")
parser.add_argument('-alldata', dest='alldata', action='store_true',
help="plot dev, unl, lab image same as gen")
parser.add_argument('-gzlab', dest='gzlab', action='store_true',
help="Gen z add label")
parser.add_argument('-nsg', dest='nsg', action='store_true',
help="no save gen img")
parser.add_argument('-nstf', dest='nstf', action='store_true',
help="no save gen true/fake ana")
parser.add_argument('-tsne', dest='tsne', action='store_true',
help="plot tsne")
parser.add_argument('-banl', default='', type=str,
help="the lab num be ignored on tsne. ex: 0,1,2")
parser.add_argument('-showlab', default='', type=str,
help="the only lab num be shown on tsne. ex: 0,1")
parser.add_argument('-p_fs', default='12,9', type=str,
help="plot fig size, def: 12,9")
parser.add_argument('-p_d', default=300, type=int,
help="plot dpi, def: 300")
parser.add_argument('-p_scale', default=20.0, type=float,
help="plot point scale, def: 20.0")
parser.add_argument('-p_s', default=20.0, type=float,
help="plot s, def: 20.0")
parser.add_argument('-p_alpha', default=0.5, type=float,
help="plot alpha, def:0.5")
parser.add_argument('-te', dest='te', action='store_true',
help="just for test colorbar")
parser.add_argument('-mu', dest='mu', action='store_true',
help="mu series: G(En De), D(Classifier)")
parser.add_argument('-dl', dest='declbl', action='store_true',
help="mu series: decode same label image")
parser.set_defaults(alldata=False)
parser.set_defaults(gzlab=False)
parser.set_defaults(tsne=False)
parser.set_defaults(nsg=False)
parser.set_defaults(nstf=False)
parser.set_defaults(te=False)
parser.set_defaults(mu=False)
parser.set_defaults(declbl=False)
parser.add_argument('-image_side', default="32", type=int,
help="cifar: 32, stl10: 96")
parser.add_argument('-noise_size', default=cc.noise_size, type=int,
help="gen noise size")
parser.add_argument('-train_batch_size', default=cc.train_batch_size, type=int,
help="labeled batch size")
parser.add_argument('-train_batch_size_2', default=cc.train_batch_size_2, type=int,
help="unlabeled batch size")
parser.add_argument('-gen_mode', default=cc.gen_mode, type=str,
help="gen model mode: z2i, i2i")
parser.add_argument('-d', dest='double_input_size', action='store_true',
help="double input size")
parser.add_argument('-f', dest='flip', action='store_true',
help="flip input or not")
parser.add_argument('-dd', dest='dis_double', action='store_true',
help="dis double")
parser.add_argument('-dt', dest='dis_triple', action='store_true',
help="dis tri")
parser.set_defaults(resume=False)
parser.set_defaults(double_input_size=cc.double_input_size)
parser.set_defaults(flip=cc.flip)
args = parser.parse_args()
tester = Tester(cc, args)
tester.test()
```
#### File: jjYukgm/SSL_multi_decoder/multi_decoder_trainer.py
```python
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.utils as vutils
import data
import config
import model
import random
import time
import os, sys
import math
import argparse
from collections import OrderedDict
import numpy as np
from utils import *
import losses, ramps
import copy
# from metrics import ArcMarginProduct # cosine distance
from losses import FocalLoss
# import random
# import pdb
# pdb.set_trace()
# pdb.set_trace = lambda: None
class Trainer(object):
def __init__(self, config, args):
self.config = config
for k, v in args.__dict__.items():
setattr(self.config, k, v)
setattr(self.config, 'save_dir', '{}_log'.format(self.config.dataset))
if not os.path.exists(self.config.save_dir):
os.makedirs(self.config.save_dir)
disp_str = ''
for attr in sorted(dir(self.config), key=lambda x: len(x)):
if not attr.startswith('__'):
disp_str += '{} : {}\n'.format(attr, getattr(self.config, attr))
sys.stdout.write(disp_str)
sys.stdout.flush()
self.labeled_loader, self.unlabeled_loader, self.dev_loader, self.special_set \
= data.get_data_loaders(config)
self.gen = nn.ModuleList()
self.gen.append(model.UNetWithResnetEncoder(n_classes=3, res=config.gen_mode).cuda())
for i in range(config.num_label-1):
self.gen.append(model.ResnetDecoder_skip(n_classes=3, res=config.gen_mode).cuda())
if config.train_step != 1:
batch_per_epoch = int((len(self.unlabeled_loader) + config.train_batch_size - 1) / config.train_batch_size)
if config.step1_epo_lbl is not 0:
epo_label = config.step1_epo_lbl
else:
epo_label = self.config.step1_epo * batch_per_epoch
self.load_model(self.gen, 'G', epo_label, suffix=config.suffix+"_s1")
for i in range(1, config.num_label): # clone decoders
self.gen[i].up_blocks = copy.deepcopy(self.gen[0].up_blocks)
# create dis
in_channels = [int(i) for i in config.dis_channels.split(",")]
self.dis = model.Unet_Discriminator(config, in_channels=in_channels, ucnet=config.dis_uc).cuda()
self.ema_dis = model.Unet_Discriminator(config, in_channels=in_channels, ucnet=config.dis_uc).cuda() # , ema=True).cuda()
if hasattr(self, 'dis'):
dis_para = [{'params': self.dis.parameters()},]
if config.dis_double:
self.dis_dou = model.Discriminative_out(config).cuda()
dis_para.append({'params': self.dis_dou.parameters()})
self.dis_optimizer = optim.Adam(dis_para, lr=config.dis_lr, betas=(0.5, 0.999))
# self.dis_optimizer = optim.SGD(self.dis.parameters(), lr=config.dis_lr,
# momentum=config.momentum,
# weight_decay=config.weight_decay,
# nesterov=config.nesterov)
if hasattr(self, 'gen'):
if config.gop == 'SGD':
self.gen_optimizer = optim.SGD(self.gen.parameters(), lr=config.gen_lr,
momentum=config.momentum,
weight_decay=config.weight_decay,
nesterov=config.nesterov)
else:
self.gen_optimizer = optim.Adam(self.gen.parameters(), lr=config.gen_lr, betas=(0.0, 0.999))
self.d_criterion = nn.CrossEntropyLoss()
if config.consistency_type == 'mse':
self.consistency_criterion = losses.softmax_mse_loss # F.MSELoss() # (size_average=False)
elif config.consistency_type == 'kl':
self.consistency_criterion = losses.softmax_kl_loss # nn.KLDivLoss() # (size_average=False)
else:
pass
self.consistency_weight = 0
# add step into data suffix
self.config.suffix+="_s{}".format(config.train_step)
log_path = os.path.join(self.config.save_dir, '{}.FM+VI.{}.txt'.format(
self.config.dataset, self.config.suffix))
if config.resume:
self.logger = open(log_path, 'ab')
else:
self.logger = open(log_path, 'wb')
self.logger.write(disp_str)
# for arcface
self.s = 30.0
m = 0.50
self.cos_m = math.cos(m)
self.sin_m = math.sin(m)
self.th = math.cos(math.pi - m)
self.mm = math.sin(math.pi - m) * m
# for dg start epoch
if config.dg_start > 0:
self.dg_flag = False
else:
self.dg_flag = True
# for enc lab update:
self.lab_feat_cen = None
if hasattr(self, 'dis'):
print self.dis
def _get_vis_images(self, labels):
labels = labels.data.cpu()
vis_images = self.special_set.index_select(0, labels)
return vis_images
def gram_matrix(self, y):
(b, ch, h, w) = y.size()
features = y.view(b, ch, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (ch * h * w)
return gram
def arcface_loss(self, x, linear, label):
w = linear.weight
cosine = F.linear(F.normalize(x), F.normalize(w))
sine = torch.sqrt(1.0 - torch.pow(cosine, 2))
phi = cosine * self.cos_m - sine * self.sin_m
# if self.easy_margin:
# # phi = torch.where(cosine > 0, phi, cosine)
# phi = phi * (cosine > 0).float() + cosine *(cosine <= 0).float()
# else:
# phi = torch.where(cosine > self.th, phi, cosine - self.mm)
phi = phi * (cosine > self.th).float() + (cosine - self.mm)*(cosine <= self.th).float()
# --------------------------- convert label to one-hot ---------------------------
# one_hot = torch.zeros(cosine.size(), device='cuda')
one_hot = Variable(torch.zeros(cosine.size()).cuda())
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
# -------------torch.where(out_i = {x_i if condition_i else y_i) -------------
output = (one_hot * phi) + ((1.0 - one_hot) * cosine) # you can use torch.where if your torch.__version__ is 0.4
output *= self.s
return output
def step1_train(self, iter=None):
# use unlabeled data train
self.gen.train()
# lab_images, lab_labels = self.labeled_loader.next()
# lab_images, lab_labels = Variable(lab_images.cuda()), Variable(lab_labels.cuda())
unl_images,_ = self.unlabeled_loader.next()
unl_images = Variable(unl_images.cuda())
# lab_cons_loss = 0
unl_cons_loss = 0
# 1 class/ gen[decoder]
gen_feat2 = self.gen[0](unl_images, skip_encode=True)
# for i in range(self.config.num_label):
gen_images2 = self.gen[0].decode(gen_feat2)
if self.config.rssim:
unl_cons_loss += - losses.SSIM(gen_images2, unl_images)
else:
unl_cons_loss += nn.MSELoss()(gen_images2, unl_images)
# mask = (lab_labels == i).nonzero()
# if mask.nelement() <= 1:
# continue
# input_images = lab_images[mask[:,0]]
# gen_feat = self.gen[0](input_images, skip_encode=True)
# gen_images = self.gen[i].decode(gen_feat)
# lab_cons_loss += nn.MSELoss()(gen_images, input_images)
# unl_cons_loss /= self.config.num_label
g_loss = unl_cons_loss
# g_loss = lab_cons_loss + unl_cons_loss
self.gen_optimizer.zero_grad()
g_loss.backward()
self.gen_optimizer.step()
monitor_dict = OrderedDict()
# monitor_dict['lab con loss'] = lab_cons_loss.data[0]
monitor_dict['unl con loss'] = unl_cons_loss.data[0]
return monitor_dict
def step2_train(self, iter=0, labeled=None, vis=False):
config = self.config
self.dis.train()
# self.ema_dis.train()
if config.dis_double: self.dis_dou.train()
if hasattr(self, 'gen'): self.gen.train()
##### train Dis
lab_images, lab_labels = self.labeled_loader.next()
lab_images, lab_labels = Variable(lab_images.cuda()), Variable(lab_labels.cuda())
unl_images, _ = self.unlabeled_loader.next()
unl_images = Variable(unl_images.cuda())
lab_loss = 0
lab_loss2 = 0
cons_loss = 0
nei_loss = 0
tri_loss = 0
ult_loss = 0
dgl_loss = 0
lab_feat = self.gen[0](lab_images, encode=True)
unl_feat = self.gen[0](unl_images, encode=True)
gen_feat = None
# if iter % config.dg_ratio == 0:
gen_feat = self.gen[0](self.get_gens_img(unl_images).detach(), encode=True)
if config.dis_uc:
unl_logits, unl_uc = self.dis(unl_feat, uc=True)
lab_logits, lab_uc = self.dis(lab_feat, uc=True)
else:
unl_logits = self.dis(unl_feat)
lab_logits = self.dis(lab_feat)
if gen_feat is not None:
gen_logits = self.dis(gen_feat)
# Standard classification loss
if config.dis_uc:
lab_loss,_ = losses.uncertainty_loss(self.d_criterion, lab_logits, lab_uc, lab_labels)
else:
lab_loss = self.d_criterion(lab_logits, lab_labels)
if config.dis_double:
lab_logits2 = self.dis_dou(lab_feat)
lab_loss2 += self.d_criterion(lab_logits2, lab_labels)
if config.dis_triple:
lab_logits2 = self.dis_dou.out_net3(lab_feat)
lab_loss2 += self.d_criterion(lab_logits2, lab_labels)
lab_loss *= config.dl_weight
lab_loss2 *= config.dl_weight
# GAN true-fake loss: sumexp(logits) is seen as the input to the sigmoid
unl_logsumexp = log_sum_exp(unl_logits)
if gen_feat is not None:
gen_logsumexp = log_sum_exp(gen_logits)
true_loss = - 0.5 * torch.mean(unl_logsumexp) + 0.5 * torch.mean(F.softplus(unl_logsumexp))
fake_loss = 0
if gen_feat is not None:
fake_loss = 0.5 * torch.mean(F.softplus(gen_logsumexp))
unl_loss = config.du_weight * true_loss
if self.dg_flag:
unl_loss += config.dg_weight * fake_loss
# ema consistency loss
if config.nei_coef > 0 or config.con_coef > 0:
ema_unl_logits = self.ema_dis(unl_feat)
ema_unl_logits = Variable(ema_unl_logits.detach().data, requires_grad=False)
if config.con_coef > 0:
if config.dis_uc:
cons_loss,_ = losses.uncertainty_loss(self.consistency_criterion,
unl_logits, unl_uc, ema_unl_logits)
else:
cons_loss = self.consistency_criterion(unl_logits, ema_unl_logits)
cons_loss *= self.consistency_weight * config.con_coef
cons_loss /= (config.train_batch_size + config.train_batch_size_2)
if config.dis_double and config.dt_weight > 0: # todo: add double, triple
unl_logits2 = self.dis_dou(unl_feat)
_, unl_lab1 = torch.max(unl_logits, 1)
_, unl_lab2 = torch.max(unl_logits2, 1)
tri_loss += self.d_criterion(unl_logits, unl_lab2)
tri_loss += self.d_criterion(unl_logits2, unl_lab1)
# GAN true-fake loss
unl_logsumexp = log_sum_exp(unl_logits2)
gen_logsumexp = log_sum_exp(self.dis_dou(gen_feat))
true_loss = - 0.5 * torch.mean(unl_logsumexp) + 0.5 * torch.mean(F.softplus(unl_logsumexp))
fake_loss = 0.5 * torch.mean(F.softplus(gen_logsumexp))
ult_loss += true_loss + fake_loss
if config.dis_triple:
unl_logits3 = self.dis_dou.out_net3(unl_feat)
_, unl_lab3 = torch.max(unl_logits3, 1)
tri_loss += self.d_criterion(unl_logits, unl_lab3)
tri_loss += self.d_criterion(unl_logits2, unl_lab3)
tri_loss += self.d_criterion(unl_logits3, unl_lab1)
tri_loss += self.d_criterion(unl_logits3, unl_lab2)
unl_logsumexp = log_sum_exp(unl_logits3)
gen_logsumexp = log_sum_exp(self.dis_dou.out_net3(gen_feat))
true_loss = - 0.5 * torch.mean(unl_logsumexp) + 0.5 * torch.mean(F.softplus(unl_logsumexp))
fake_loss = 0.5 * torch.mean(F.softplus(gen_logsumexp))
ult_loss += true_loss + fake_loss
tri_loss *= config.dt_weight
ult_loss *= config.ut_weight
if config.dgl_weight > 0:
gen_lab_img, new_lbls = self.get_gens_img(lab_images, lbls=lab_labels)
dgl_feat = self.gen[0](gen_lab_img, encode=True)
if config.dis_uc:
dgl_logits, dgl_uc = self.dis(dgl_feat, uc=True)
dgl_loss,_ = losses.uncertainty_loss(self.d_criterion, dgl_logits, dgl_uc, new_lbls)
else:
dgl_logits = self.dis(dgl_feat)
dgl_loss = self.d_criterion(dgl_logits, new_lbls)
dgl_loss *= config.dgl_weight
d_loss = lab_loss + unl_loss + cons_loss + lab_loss2 + tri_loss + ult_loss + dgl_loss
##### Monitoring (train mode)
# true-fake accuracy
unl_acc = torch.mean(nn.functional.sigmoid(unl_logsumexp.detach()).gt(0.5).float())
# top-1 logit compared to 0: to verify Assumption (2) and (3)
max_unl_acc = torch.mean(unl_logits.max(1)[0].detach().gt(0.0).float())
gen_acc = None
if gen_feat is not None:
gen_acc = torch.mean(nn.functional.sigmoid(gen_logsumexp.detach()).gt(0.5).float())
max_gen_acc = torch.mean(gen_logits.max(1)[0].detach().gt(0.0).float())
self.dis_optimizer.zero_grad()
if iter % (config.dg_ratio*config.eg_ratio) == 0:
self.gen_optimizer.zero_grad()
d_loss.backward()
self.dis_optimizer.step()
if iter % (config.dg_ratio*config.eg_ratio) == 0:
self.gen_optimizer.step()
# del no need
# del gen_images
del lab_feat
del unl_feat
del unl_logits
if gen_feat is not None:
del gen_logits
del gen_feat
##### train Gen and Enc
tv_loss = 0
st_loss = 0
fm_loss = 0
gl_loss = 0
gn_loss = 0
gr_loss = 0
gc_loss = 0
ef_loss = 0
el_loss = 0
# enc lab center
if iter % (self.batch_per_epoch*config.eg_ratio) == 0 and \
(config.el_weight > 0 or config.gl_weight > 0):
self.cal_lab_feat_cen()
# update # d / 1 g
if iter % config.dg_ratio == 0:
lab_feat = self.gen[0](lab_images, encode=True).detach()
unl_feat = self.gen[0](unl_images, encode=True).detach()
gen_images = self.get_gens_img(unl_images, spbatch=True, partcode=config.halfgnoise)
unl_images = unl_images[range(gen_images.size(0))]
img_per_gen = gen_images.size(0) // config.num_label
if config.el_weight > 0: # lbl mean cluster
for i in range(config.num_label):
mask = (lab_labels == i).nonzero()
mask_num = mask.nelement()
if mask_num < 1:
continue
part_lab_feat = lab_feat[mask[:,0]]
el_loss += nn.KLDivLoss()(part_lab_feat, self.lab_feat_cen[i].repeat(mask_num, 1))
el_loss *= config.el_weight
if config.ef_weight > 0: # lbl std < ts, total std > ts better
ts = config.ef_ts
for i in range(config.num_label):
mask = (lab_labels == i).nonzero()
mask_num = mask.nelement()
if mask_num <= 1:
continue
part_lab_feat = lab_feat[mask[:,0]]
plf_std = torch.std(part_lab_feat, 0)
ef_loss += torch.mean(torch.max(plf_std - ts,
Variable(torch.zeros(plf_std.size())).cuda()))
ef_loss += ts
# total std
ef_std = torch.std(unl_feat, 0)
ef_loss += torch.mean(torch.max(ts - ef_std, Variable(torch.zeros(ef_std.size())).cuda()))
ef_loss *= config.ef_weight
if config.gf_weight > 0 or config.gn_weight > 0 or config.gl_weight > 0:
gen_feat = self.gen[0](gen_images, encode=True)
# gen lab feat loss: mean(En(xl)) - mean(En(De(En(xl))))
if config.gl_weight > 0:
for i in range(config.num_label):
gen_unl_feat = torch.mean(gen_feat[range(i*img_per_gen, (i+1)*img_per_gen)], 0)
gl_loss += nn.KLDivLoss()(gen_unl_feat, self.lab_feat_cen[i])
gl_loss *= config.gl_weight
#
# diff_ul = torch.abs(torch.mean(lab_feat, 0) - torch.mean(unl_feat, 0))
# gl_ts = torch.mean(diff_ul) * 2
# for i in range(config.num_label):
# mask = (lab_labels == i).nonzero()
# mask_num = mask.nelement()
# if mask_num < 1:
# continue
# # part_lab_images = lab_images[mask[:,0]]
# # gen_lab_feat = self.gen[0](self.gen[i].decode(
# # self.gen[0](part_lab_images, skip_encode=True)), encode=True)
# mean_mask_feat = lab_feat[mask[:,0]]
# if mask_num != 1:
# mean_mask_feat = torch.mean(mean_mask_feat, 0)
# # gen_lab_feat = torch.mean(gen_lab_feat, 0)
# # gen_unl_feat = self.gen[i].decode(self.gen[0](unl_images, skip_encode=True))
# gen_unl_feat = torch.mean(gen_feat[range(i*img_per_gen, (i+1)*img_per_gen)], 0)
# diff = torch.abs(mean_mask_feat - gen_unl_feat)
# gl_loss += mask_num * \
# torch.mean(torch.max(diff - gl_ts,
# Variable(torch.zeros(diff.size())).cuda()))
# gl_loss /= lab_feat.size(0)
# gl_loss *= config.gl_weight
# Feature matching loss: En(xu) - En(De(En(xu)))
if config.gf_weight > 0:
fm_loss += nn.KLDivLoss()(torch.mean(gen_feat, 0), torch.mean(unl_feat, 0)) + \
torch.mean(torch.abs(torch.std(gen_feat, 0) - torch.std(unl_feat, 0)))
# fm_loss = torch.mean(torch.abs(gen_feat - unl_feat[:gen_feat.size(0)]))
fm_loss *= config.gf_weight
if config.gc_weight > 0:
key_ = "layer_{}".format(model.UNetWithResnetEncoder.DEPTH - 1)
feat_size = self.gen[0](unl_images, skip_encode=True)[key_][:img_per_gen*config.num_label].size()
rand_codes = Variable(torch.rand(feat_size).cuda()) # .unsqueeze(-1).unsqueeze(-1)
gen_rand_feat = self.gen[0](
self.get_gens_img(unl_images, codes=rand_codes), encode=True)
rand_codes = rand_codes.mean(3, True).mean(2, True) # .repeat(config.num_label, 1)
gc_loss = nn.MSELoss()(gen_rand_feat, rand_codes)
gc_loss *= config.gc_weight
# reconstruction loss
if config.gr_weight > 0:
unl_tmp = unl_images[:img_per_gen].repeat(config.num_label, 1, 1, 1)
# blur
# get nn.L1Loss;F.MSELoss;nn.KLDivLoss
if self.config.rssim:
gr_loss = -losses.SSIM()(gen_images, unl_tmp)
else:
gr_loss = nn.MSELoss()(gen_images, unl_tmp)
gr_loss *= config.gr_weight
# could impact the gr
# gen neighbor loss: same => closer; diff => farther
if config.gn_weight > 0:
pos, neg = 0, 0
diff = None
for j in range(config.num_label-1):
gen_feat_j = gen_feat[range(j*img_per_gen, (j+1)*img_per_gen)]
for i in range(j+1, config.num_label):
# if i <= j:
# continue
diff_ = gen_feat_j - \
gen_feat[range(i*img_per_gen, (i+1)*img_per_gen)]
diff_ = torch.mean(torch.abs(diff_), 0, True)
if diff is None:
diff = diff_
else:
diff = torch.cat((diff, diff_), dim=0)
mean_gen_feat_j = torch.mean(gen_feat_j, 0, True).repeat(img_per_gen, 1).detach()
pos += nn.KLDivLoss()(gen_feat_j, mean_gen_feat_j)
gen_feat_j = gen_feat[range((config.num_label-1)*img_per_gen, (config.num_label)*img_per_gen)]
mean_gen_feat_j = torch.mean(gen_feat_j, 0, True).repeat(img_per_gen, 1).detach()
pos += nn.KLDivLoss()(gen_feat_j, mean_gen_feat_j)
# pos /= config.num_label
# diff = torch.mean(diff, 0, True)
neg = torch.mean(torch.max(config.nei_margin - diff, Variable(torch.zeros(diff.size()).cuda())))
# neg /= (config.num_label - 1) * gen_feat.size(1) # * config.num_label
gn_loss = pos + neg # (torch.mean(torch.cat((pos, neg), 0)))
gn_loss *= self.consistency_weight * config.gn_weight
# neighbor loss
if config.nei_coef > 0:
tot_feat = torch.cat((lab_feat, unl_feat), dim=0)
inds = torch.randperm(tot_feat.size(0)).cuda()
# pdb.set_trace()
# topk do
if config.nei_top>1:
_, ema_lbl = torch.topk(ema_unl_logits,config.nei_top,dim=1)
ema_lbl = torch.zeros(ema_unl_logits.size()).cuda().scatter_(1,ema_lbl.data.long(),1)
lab_labels_tmp = torch.zeros(lab_logits.size()).cuda().scatter_(1,lab_labels.data.long().unsqueeze(1),1)
ema_lbl = Variable(torch.cat((lab_labels_tmp, ema_lbl), dim=0))
ema_lbl = ema_lbl[inds]
nei_mask = ema_lbl[:config.train_batch_size] * ema_lbl[config.train_batch_size:]
nei_mask = torch.sum(nei_mask, 1).float() / config.nei_top
else: # top1 do
_, ema_lbl = torch.max(ema_unl_logits, 1)
ema_lbl = torch.cat((lab_labels, ema_lbl), dim=0)
ema_lbl = ema_lbl[inds]
nei_mask = torch.eq(ema_lbl[:config.train_batch_size], ema_lbl[config.train_batch_size:]).float() # nei or not
tot_feat = tot_feat[inds]
diff = tot_feat[:config.train_batch_size] - tot_feat[config.train_batch_size:]
diff = torch.sqrt(torch.mean(diff ** 2, 1))
pos = nei_mask * diff
neg = (1 - nei_mask) * (torch.max(config.nei_margin - diff, Variable(torch.zeros(diff.size())).cuda()) ** 2)
nei_loss = self.consistency_weight * config.nei_coef * \
(torch.mean(pos + neg))
# tv losss
if config.tv_weight > 0:
(_, c_x, h_x, w_x) = gen_images.size()
count_h = c_x * (h_x - 1) * w_x
count_w = c_x * h_x * (w_x - 1)
h_tv = torch.pow((gen_images[:, :, 1:, :] - gen_images[:, :, :-1, :]), 2).sum()
w_tv = torch.pow((gen_images[:, :, :, 1:] - gen_images[:, :, :, :-1]), 2).sum()
tv_loss = config.tv_weight * (h_tv / count_h + w_tv / count_w) / config.train_batch_size
if config.st_weight > 0:
# key = "layer_{}".format(model.UNetWithResnet50Encoder.DEPTH - 1)
# gen_gram = self.gen[0](gen_images, skip_encode=True)
# gen_gram = gen_gram[key]
# gen_gram = self.gram_matrix(gen_gram)
# unl_gram = self.gen[0](unl_images, skip_encode=True)
# unl_gram = unl_gram[key].detach()
# unl_gram = self.gram_matrix(unl_gram)
gen_gram = self.gram_matrix(gen_images)
unl_gram = self.gram_matrix(unl_images)
st_loss += config.st_weight * nn.KLDivLoss()(gen_gram, unl_gram)
# Generator loss
g_loss = Variable(torch.zeros((1,1)), requires_grad=True).cuda() + \
fm_loss + nei_loss + \
ef_loss + el_loss + \
tv_loss + st_loss + \
gl_loss + gn_loss + gr_loss + gc_loss
self.gen_optimizer.zero_grad()
g_loss.backward()
self.gen_optimizer.step()
monitor_dict = OrderedDict()
monitor_dict['unl acc'] = unl_acc.data[0]
if gen_acc is not None: monitor_dict['gen acc'] = gen_acc.data[0] * config.dg_ratio
else: monitor_dict['gen acc'] = 0
monitor_dict['max unl acc'] = max_unl_acc.data[0]
if gen_acc is not None: monitor_dict['max gen acc'] = max_gen_acc.data[0] * config.dg_ratio
else: monitor_dict['max gen acc'] = 0
monitor_dict['lab loss'] = lab_loss.data[0]
monitor_dict['unl loss'] = unl_loss.data[0]
if config.dgl_weight > 0: monitor_dict['dgl loss'] = dgl_loss.data[0]
if config.con_coef > 0: monitor_dict['con loss'] = cons_loss.data[0]
if config.dis_double:
monitor_dict['la2 loss'] = lab_loss2.data[0]
if config.dt_weight > 0: monitor_dict['tri loss'] = tri_loss.data[0]
if config.ut_weight > 0: monitor_dict['ult loss'] = ult_loss.data[0]
if hasattr(self, 'gen') and iter % config.dg_ratio == 0:
if config.gf_weight > 0: monitor_dict['fm loss'] = fm_loss.data[0] * config.dg_ratio
if config.ef_weight > 0: monitor_dict['ef loss'] = ef_loss.data[0] * config.dg_ratio
if config.el_weight > 0: monitor_dict['el loss'] = el_loss.data[0] * config.dg_ratio
if config.tv_weight > 0: monitor_dict['tv loss'] = tv_loss.data[0] * config.dg_ratio
if config.st_weight > 0: monitor_dict['st loss'] = st_loss.data[0] * config.dg_ratio
if config.nei_coef > 0: monitor_dict['nei loss'] = nei_loss.data[0] * config.dg_ratio
if config.gl_weight > 0: monitor_dict['gl loss'] = gl_loss.data[0] * config.dg_ratio
if config.gn_weight > 0: monitor_dict['gn loss'] = gn_loss.data[0] * config.dg_ratio
if config.gr_weight > 0: monitor_dict['gr loss'] = gr_loss.data[0] * config.dg_ratio
if config.gc_weight > 0: monitor_dict['gc loss'] = gc_loss.data[0] * config.dg_ratio
# if config.gl_weight > 0: monitor_dict['gl ts'] = gl_ts.data[0] * config.dg_ratio
elif iter % config.dg_ratio != 0:
if config.gf_weight > 0: monitor_dict['fm loss'] = 0
if config.ef_weight > 0: monitor_dict['ef loss'] = 0
if config.el_weight > 0: monitor_dict['el loss'] = 0
if config.tv_weight > 0: monitor_dict['tv loss'] = 0
if config.st_weight > 0: monitor_dict['st loss'] = 0
if config.nei_coef > 0: monitor_dict['nei loss'] = 0
if config.gl_weight > 0: monitor_dict['gl loss'] = 0
if config.gn_weight > 0: monitor_dict['gn loss'] = 0
if config.gr_weight > 0: monitor_dict['gr loss'] = 0
if config.gc_weight > 0: monitor_dict['gc loss'] = 0
# if config.gl_weight > 0: monitor_dict['gl ts'] = 0
return monitor_dict
def eval_true_fake(self, data_loader, max_batch=None):
self.gen.eval()
self.dis.eval()
cnt = 0
unl_acc, gen_acc, max_unl_acc, max_gen_acc = 0., 0., 0., 0.
for i, (images, _) in enumerate(data_loader.get_iter()):
images = Variable(images.cuda(), volatile=True)
unl_feat = self.gen[0](images, encode=True)
gen_feat = self.gen[0](self.get_gens_img(images), encode=True)
unl_logits = self.dis(unl_feat)
gen_logits = self.dis(gen_feat)
unl_logsumexp = log_sum_exp(unl_logits)
gen_logsumexp = log_sum_exp(gen_logits)
##### Monitoring (eval mode)
# true-fake accuracy
unl_acc += torch.mean(nn.functional.sigmoid(unl_logsumexp).gt(0.5).float()).data[0]
gen_acc += torch.mean(nn.functional.sigmoid(gen_logsumexp).gt(0.5).float()).data[0]
# top-1 logit compared to 0: to verify Assumption (2) and (3)
max_unl_acc += torch.mean(unl_logits.max(1)[0].gt(0.0).float()).data[0]
max_gen_acc += torch.mean(gen_logits.max(1)[0].gt(0.0).float()).data[0]
cnt += 1
if max_batch is not None and i >= max_batch - 1: break
return unl_acc / cnt, gen_acc / cnt, max_unl_acc / cnt, max_gen_acc / cnt
def eval(self, data_loader, max_batch=None, ema=False, tri=0):
if ema:
# if self.consistency_weight == 0.:
# return 0.
dis = self.ema_dis
else:
dis = self.dis
if tri == 0:
dis_out = dis
elif tri == 2:
dis_out = self.dis_dou.out_net3
else: # 1
dis_out = self.dis_dou.out_net2
# self.gen.eval()
dis.eval()
loss, incorrect, cnt = 0, 0, 0
for i, (images, labels) in enumerate(data_loader.get_iter()):
images = Variable(images.cuda(), volatile=True)
labels = Variable(labels.cuda(), volatile=True)
feat = self.gen[0](images, encode=True)
pred_prob = dis_out(feat)
loss += self.d_criterion(pred_prob, labels).data[0]
cnt += 1
incorrect += torch.ne(torch.max(pred_prob, 1)[1], labels).data.sum()
if max_batch is not None and i >= max_batch - 1: break
return loss / cnt, incorrect
def get_gens_img(self, images, spbatch=False, partcode=False, lbls=None, codes=None):
# images: Variable(Tensor)
gen_images = []
if lbls is not None:
new_lbls = []
img_per_gen = images.size(0) // self.config.num_label
num_part = []
for j in range(self.config.num_label):
if spbatch:
num_part.append(range(img_per_gen))
elif lbls is not None:
mask = (lbls == j).nonzero().squeeze()
num_mask = len(mask)
num_part.append(mask)
if num_mask < 1:
continue
new_lbls += [j]*num_mask
else:
num_part.append(range(j*img_per_gen, (j+1)*img_per_gen))
gen_feat = self.gen[0](images, skip_encode=True)
if partcode:
lay_key = "layer_{}".format(model.UNetWithResnetEncoder.DEPTH - 1)
keep_len = gen_feat[lay_key].size(1) // 2
gn_size = gen_feat[lay_key][:,keep_len:].size()
gen_feat[lay_key] = gen_feat[lay_key][:,:keep_len]
gn = Variable(torch.rand(gn_size).cuda()) * 2
gen_feat[lay_key] = torch.cat((gen_feat[lay_key], gn), 1)
elif codes is not None:
lay_key = "layer_{}".format(model.UNetWithResnetEncoder.DEPTH - 1)
# codes = codes[:gen_feat[lay_key].size(0)]
gen_feat[lay_key] = codes
for j in range(self.config.num_label):
if len(num_part[j]) < 1:
continue
j_feat = dict()
for i in gen_feat.keys():
j_feat[i] = gen_feat[i][num_part[j]]
gen_image = self.gen[j].decode(j_feat)
gen_images.append(gen_image)
gen_images = torch.cat(gen_images, 0)
if lbls is not None:
new_lbls = Variable(torch.from_numpy(np.array(new_lbls)).cuda())
return gen_images, new_lbls
else:
return gen_images
def visualize(self, data_loader):
self.gen.eval()
# self.dis.eval()
vis_size = 100
num_label = self.config.num_label
img_per_batch = self.config.dev_batch_size // num_label
img_per_batch *= num_label
# nrow = int((10 // num_label)*num_label)
inp_images = []
gen_images = []
for i, (images, _) in enumerate(data_loader.get_iter()):
if i * self.config.dev_batch_size >= vis_size:
break
inp_images.append(images[:img_per_batch])
images = Variable(images.cuda(), volatile=True)
gen_images.append(self.get_gens_img(images))
inp_images = torch.cat(inp_images, 0)
gen_images = torch.cat(gen_images, 0)
save_path = os.path.join(self.config.save_dir,
'{}.FM+VI.{}_.png'.format(self.config.dataset, self.config.suffix))
vutils.save_image(gen_images.data.cpu(), save_path, normalize=True, range=(-1, 1), nrow=10)
save_path = os.path.join(self.config.save_dir,
'{}.FM+VI.{}_d.png'.format(self.config.dataset, self.config.suffix))
vutils.save_image(inp_images, save_path, normalize=True, range=(-1, 1), nrow=10)
def param_init(self):
def func_gen(flag):
def func(m):
if hasattr(m, 'init_mode'):
setattr(m, 'init_mode', flag)
return func
images = []
num_img = 500
for i in range(num_img // self.config.train_batch_size):
lab_images, _ = self.labeled_loader.next()
images.append(lab_images)
images = torch.cat(images, 0)
if hasattr(self, 'dis'):
self.dis.apply(func_gen(True))
if self.config.dis_double: self.dis_dou.apply(func_gen(True))
feat = self.gen[0](Variable(images.cuda()), encode=True)
if self.config.dis_uc:
logits,_ = self.dis(feat, uc=True)
else:
logits = self.dis(feat)
if self.config.dis_double: logits = self.dis_dou(feat)
if self.config.dis_triple: logits = self.dis_dou.out_net3(feat)
self.dis.apply(func_gen(False))
if self.config.dis_double: self.dis_dou.apply(func_gen(False))
self.ema_dis = copy.deepcopy(self.dis) # clone weight_scale and weight
def calculate_remaining(self, t1, t2, epoch): # ta
tot_progress = (epoch + 0.) / self.config.max_epochs
if self.config.resume:
progress = (epoch - self.config.last_epochs + 0.) / (self.config.max_epochs- self.config.last_epochs)
else:
progress = tot_progress
elapsed_time = t2 - t1
if (progress > 0):
remaining_time = elapsed_time * (1 / progress) - elapsed_time
else:
remaining_time = 0
# elapsed time
esec = int(elapsed_time % 60)
emin = int((elapsed_time // 60) % 60)
ehr = int(elapsed_time / 3600)
# remaining_time
rsec = int(remaining_time % 60)
rmin = int((remaining_time // 60) % 60)
rhr = int(remaining_time / 3600)
time_str = '[{:8.2%}], {:3d}:{:2d}:{:2d}<{:3d}:{:2d}:{:2d} '.format(tot_progress, ehr, emin, esec, rhr, rmin, rsec)
time_str = '| ' + time_str + '\n'
return time_str
def save_model(self, net, net_label, epo_label): # ta
save_filename = 'VI.{}_{}_net_{}.pth'.format(self.config.suffix, epo_label, net_label)
save_path = os.path.join(self.config.save_dir, save_filename)
torch.save(net.cpu().state_dict(), save_path)
if torch.cuda.is_available():
net.cuda()
def del_model(self, net_label, epo_label): # ta
del_filename = 'VI.{}_{}_net_{}.pth'.format(self.config.suffix, epo_label, net_label)
del_path = os.path.join(self.config.save_dir, del_filename)
if os.path.exists(del_path):
os.remove(del_path)
else:
print("The file does not exist, {}".format(del_path))
def load_model(self, net, net_label, epo_label, suffix=None): # ta
if suffix is None:
suffix = self.config.suffix
load_filename = 'VI.{}_{}_net_{}.pth'.format(suffix, epo_label, net_label)
load_path = os.path.join(self.config.save_dir, load_filename)
load_net = torch.load(load_path)
net.cpu()
model.load_my_state_dict(net, load_net)
if torch.cuda.is_available():
net.cuda()
def save(self, epo_label): # ta
# save new
if hasattr(self, 'dis'):
self.save_model(self.dis, 'D', epo_label)
self.save_model(self.ema_dis, 'M', epo_label)
if hasattr(self, 'gen'):
self.save_model(self.gen, 'G', epo_label)
if hasattr(self, 'dis_dou'):
self.save_model(self.dis_dou, 'D2', epo_label)
# del old
if epo_label >= self.config.vis_period:
epo_label -= self.config.vis_period
if hasattr(self, 'dis'):
self.del_model('D', epo_label)
self.del_model('M', epo_label)
if hasattr(self, 'gen'):
self.del_model('G', epo_label)
if hasattr(self, 'dis_dou'):
self.save_model('D2', epo_label)
def resume(self, epo_label): # ta
self.cal_lab_feat_cen()
# load old
if hasattr(self, 'dis'):
self.load_model(self.dis, 'D', epo_label)
self.load_model(self.ema_dis, 'M', epo_label)
if hasattr(self, 'gen'):
self.load_model(self.gen, 'G', epo_label)
if hasattr(self, 'dis_dou'):
self.load_model(self.dis_dou, 'D2', epo_label)
def adjust_learning_rate(self, optimizer, lr, ini_lr, epoch):
# LR warm-up to handle large minibatch sizes from https://arxiv.org/abs/1706.02677
lr = ramps.linear_rampup(epoch, self.config.lr_rampup) * (lr - ini_lr) + ini_lr
# Cosine LR rampdown from https://arxiv.org/abs/1608.03983 (but one cycle only)
if self.config.lr_rampdn:
assert self.config.lr_rampdn >= self.config.max_epochs
lr *= ramps.cosine_rampdown(epoch, self.config.lr_rampdn )
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def update_ema_variables(self, alpha, global_step, batch_per_epoch): # ta2
# alpha: min of weight reservation, hp
# global_step: history update step counts
# Use the true average until the exponential average is more correct
epoch = global_step / batch_per_epoch
if epoch < self.config.t_start:
return
alpha = min(1 - 1 / (global_step + 1), alpha)
if epoch == self.config.t_start \
or self.config.t_forget_coef == 0 \
or (self.config.t_forget_coef > 0.
and epoch % (self.config.c_rampup * self.config.t_forget_coef) == 0):
alpha = 0.
for ema_param, param in zip(self.ema_dis.parameters(), self.dis.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def get_current_consistency_weight(self, epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
if epoch < self.config.t_start:
self.consistency_weight = 0.
else:
self.consistency_weight = ramps.sigmoid_rampup(epoch, self.config.c_rampup)
def train(self):
config = self.config
batch_per_epoch = int((len(self.unlabeled_loader) +
config.train_batch_size - 1) / config.train_batch_size)
self.batch_per_epoch = batch_per_epoch
if not config.resume:
self.param_init()
# self.iter_cnt = 0
iter = 0
else:
# self.iter_cnt = 0 + config.last_epochs
if config.last_epo_lbl is not 0:
iter = config.last_epo_lbl
else:
iter = batch_per_epoch*(config.last_epochs)
self.resume(iter)
iter += 1
min_dev_incorrect = e_mdi = 1e6
monitor = OrderedDict()
if config.eval_period == -1:
config.eval_period = batch_per_epoch
self.config.eval_period = batch_per_epoch
if config.vis_period == -1:
config.vis_period = batch_per_epoch
if config.t_start == -1:
config.t_start = 1. / batch_per_epoch
min_lr = config.min_lr if hasattr(config, 'min_lr') else 0.0
last_tr_inco = [1.0]
start_time = time.time()
while True:
if iter % batch_per_epoch == 0:
epoch = iter / batch_per_epoch
if not self.dg_flag and config.dg_start <= epoch:
self.dg_flag = True
epoch_ratio = float(epoch) / float(config.max_epochs)
# use another outer max to prevent any float computation precision problem
if hasattr(self, 'dis'):
self.dis_optimizer.param_groups[0]['lr'] = max(min_lr, config.dis_lr *
min(3. * (1. - epoch_ratio), 1.))
if hasattr(self, 'gen'):
self.gen_optimizer.param_groups[0]['lr'] = max(min_lr, config.gen_lr *
min(3. * (1. - epoch_ratio), 1.))
self.get_current_consistency_weight(iter / batch_per_epoch)
if hasattr(self, 'dis'):
self.adjust_learning_rate(self.dis_optimizer, config.dis_lr,
config.ini_lr, iter / batch_per_epoch)
iter_vals = getattr(self, "step{}_train".format(config.train_step))(iter=iter)
if hasattr(self, 'dis'):
self.update_ema_variables(self.config.ema_decay, iter, batch_per_epoch)
if len(monitor.keys()) == 0:
for k in iter_vals.keys():
monitor[k] = 0.
# if not monitor.has_key(k):
# monitor[k] = 0.
for k, v in iter_vals.items():
monitor[k] += v
if iter % config.eval_period == 0:
if hasattr(self, 'dis'):
train_loss, train_incorrect = self.eval(self.labeled_loader)
dev_loss, dev_incorrect = self.eval(self.dev_loader)
ema_result = self.eval(self.dev_loader, ema=True)
if isinstance(ema_result, tuple):
ema_train_result = self.eval(self.labeled_loader, ema=True)
ema_train_result_ = ema_train_result[1] / (1.0 * len(self.labeled_loader))
ema_result_ = ema_result[1] / (1.0 * len(self.dev_loader))
if config.dis_double:
_, tri_result1 = self.eval(self.dev_loader, tri=1)
tri_result1 = tri_result1 / (1.0 * len(self.dev_loader))
if self.config.dis_triple:
_, tri_result2 = self.eval(self.dev_loader, tri=2)
tri_result2 = tri_result2 / (1.0 * len(self.dev_loader))
else:
tri_result2 = 0.
if hasattr(self, 'gen'):
unl_acc, gen_acc, max_unl_acc, max_gen_acc = self.eval_true_fake(self.dev_loader, 10)
disp_str = "#{}".format(iter)
if hasattr(self, 'dis'):
train_incorrect /= 1.0 * len(self.labeled_loader)
dev_incorrect /= 1.0 * len(self.dev_loader)
min_dev_incorrect = min(min_dev_incorrect, dev_incorrect)
e_mdi = min(e_mdi, ema_result_)
disp_str += '\ttrain: {:.4f}, {:.4f} | dev: {:.4f}, {:.4f} | best: {:.4f}'.format(
train_loss, train_incorrect, dev_loss, dev_incorrect, min_dev_incorrect)
if isinstance(ema_result, tuple):
disp_str += ' | ema: {:.4f}, {:.4f}, {:.4f}'.format(ema_train_result_, ema_result_, e_mdi)
else:
disp_str += ' | ema: None , None'
if config.dis_double:
disp_str += ' | tri: {:.4f}, {:.4f}'.format(tri_result1, tri_result2)
for k, v in monitor.items():
disp_str += ' | {}: {:.4f}'.format(k, v / config.eval_period)
if hasattr(self, 'dis') and hasattr(self, 'gen'):
disp_str += ' | [Eval] unl acc: {:.4f}, gen acc: {:.4f}, max unl acc: {:.4f}, max gen acc: {:.4f}'.format(
unl_acc, gen_acc, max_unl_acc, max_gen_acc)
if hasattr(self, 'dis'):
disp_str += ' | dlr: {:.5f}'.format(self.dis_optimizer.param_groups[0]['lr'])
elif hasattr(self, 'gen'):
disp_str += ' | glr: {:.5f}'.format(self.gen_optimizer.param_groups[0]['lr'])
disp_str += '\n'
monitor = OrderedDict()
# timer # ta
time_str = self.calculate_remaining(start_time, time.time(), iter / batch_per_epoch)
self.logger.write(disp_str)
sys.stdout.write(disp_str)
sys.stdout.write(time_str) # ta
sys.stdout.flush()
# stop check
thres = 1 #0.4; 0.3
if hasattr(self, 'dis') and train_incorrect > sum(last_tr_inco)/len(last_tr_inco) + thres:
print("tr_inco encrease > {}!".format(thres))
break
elif hasattr(self, 'dis'):
last_tr_inco.append(train_incorrect)
if len(last_tr_inco) > 3:
last_tr_inco.pop(0)
epoch = iter / batch_per_epoch
if epoch >= config.max_epochs:
self.save(iter)
self.visualize(self.dev_loader)
break
if iter % config.vis_period == 0:
# save model # ta
self.save(iter)
self.visualize(self.dev_loader)
iter += 1
# self.iter_cnt += 1
def cal_lab_feat_cen(self):
config = self.config
self.lab_feat_cen = [None] * config.num_label
lab_num = [0] * config.num_label
# all lab feat sum
local_loader = self.labeled_loader.get_iter(shuffle=False)
for img, lbl in local_loader:
img, lbl = Variable(img.cuda()), Variable(lbl.cuda())
loc_feat = self.gen[0](img, encode=True).detach()
for i in range(config.num_label):
mask = (lbl == i).nonzero()
mask_num = mask.nelement()
if mask_num < 1:
continue
loc_feat2 = loc_feat[mask[:,0]]
if mask_num != 1:
loc_feat2 = torch.sum(loc_feat2, 0).unsqueeze(0)
if self.lab_feat_cen[i] is None:
self.lab_feat_cen[i] = loc_feat2
else:
self.lab_feat_cen[i] += loc_feat2
lab_num[i] += mask_num
# feat sum -> feat mean
for i in range(config.num_label):
self.lab_feat_cen[i] = self.lab_feat_cen[i] / lab_num[i]
if __name__ == '__main__':
cc = config.cifarmu_config()
parser = argparse.ArgumentParser(description='multi_decoder_trainer.py')
parser.add_argument('-suffix', default='md0', type=str, help="Suffix added to the save images.")
parser.add_argument('-r', dest='resume', action='store_true')
parser.add_argument('-num_label', default=cc.num_label, type=int,
help="label num")
parser.add_argument('-allowed_label', default=cc.allowed_label, type=str,
help="allowed label in dataset")
parser.add_argument('-dataset', default=cc.dataset, type=str,
help="dataset: cifar, stl10, coil20")
parser.add_argument('-image_side', default="32", type=int,
help="cifar: 32, stl10: 96, coil20: 128, imagenet10: 256")
parser.add_argument('-train_step', default=cc.train_step, type=int,
help="train step: 1, 2")
parser.add_argument('-step1_epo', default=0, type=int,
help="load gen from train step 1 epo #")
parser.add_argument('-step1_epo_lbl', default=0, type=int,
help="load gen from train step 1 epo label #")
parser.add_argument('-dg_ratio', default=5, type=int,
help="update # d/g")
parser.add_argument('-eg_ratio', default=1, type=int,
help="update # g/enc-d")
parser.add_argument('-dis_channels', default=cc.dis_channels, type=str,
help="# of dis channels, r50: '1024,192'; r34: '512,192' ")
parser.add_argument('-max_epochs', default=cc.max_epochs, type=int,
help="max epoches")
parser.add_argument('-last_epochs', default=cc.last_epochs, type=int,
help="last epochs")
parser.add_argument('-last_epo_lbl', default=0, type=int,
help="last epochs label")
parser.add_argument('-dg_start', default=cc.dg_start, type=int,
help="start dis loss epoch")
parser.add_argument('-eval_period', default=cc.eval_period, type=int,
help="evaluate period, -1: per-epoch")
parser.add_argument('-vis_period', default=cc.vis_period, type=int,
help="visualize period, -1: per-epoch")
parser.add_argument('-ld', '--size_labeled_data', default=cc.size_labeled_data, type=int,
help="labeled data num")
parser.add_argument('-ud', '--size_unlabeled_data', default=cc.size_unlabeled_data, type=int,
help="unlabeled data num")
parser.add_argument('-train_batch_size', default=cc.train_batch_size, type=int,
help="labeled batch size")
parser.add_argument('-train_batch_size_2', default=cc.train_batch_size_2, type=int,
help="unlabeled batch size")
parser.add_argument('-dis_lr', default=cc.dis_lr, type=float,
help="discriminator learn rate")
parser.add_argument('-gen_lr', default=cc.gen_lr, type=float,
help="generator learn rate")
# parser.add_argument('-weight_decay', default=cc.weight_decay, type=float,
# help="generator weight decay")
parser.add_argument('-gop', default=cc.gop, type=str,
help="gen optim: Adam, SGD")
parser.add_argument('-con_coef', default=cc.con_coef, type=float,
help="Consistency loss content")
parser.add_argument('-nei_coef', default=cc.nei_coef, type=float,
help="neighbor loss content")
parser.add_argument('-nei_margin', default=cc.nei_margin, type=float,
help="neighbor margin content; less better")
parser.add_argument('-nei_top', default=cc.nei_top, type=int,
help="neighbor top-k")
parser.add_argument('-c_rampup', default=cc.c_rampup, type=int,
help="rampup period")
parser.add_argument('-ini_lr', default=cc.ini_lr, type=float,
help="lr rampup ini")
parser.add_argument('-lr_rampup', default=cc.lr_rampup, type=int,
help="lr rampup fin epoch")
parser.add_argument('-lr_rampdn', default=cc.lr_rampdn, type=int,
help="lr rampdn fin epoch")
parser.add_argument('-t_forget_coef', default=cc.t_forget_coef, type=float,
help="teacher corget content * c_r, 0: always forget, -1: no forget")
parser.add_argument('-t_start', default=cc.t_start, type=float,
help="teacher start calculate loss, -1: 2nd batch start")
parser.add_argument('-dl_weight', default=cc.dl_weight, type=float,
help="dis lab loss content")
parser.add_argument('-du_weight', default=cc.du_weight, type=float,
help="dis unlabeled loss content")
parser.add_argument('-dg_weight', default=cc.dg_weight, type=float,
help="dis gen loss content")
parser.add_argument('-dgl_weight', default=0, type=float,
help="dis gen lab loss content")
parser.add_argument('-dt_weight', default=cc.dt_weight, type=float,
help="dis triple loss content")
parser.add_argument('-ut_weight', default=cc.ut_weight, type=float,
help="dis triple gan loss content")
parser.add_argument('-ef_weight', default=0, type=float,
help="encode feat mean & std loss content")
parser.add_argument('-ef_ts', default=0.3, type=float,
help="encode feat threshold, def: 0.3")
parser.add_argument('-el_weight', default=0, type=float,
help="encode lab feat mean to clustering")
parser.add_argument('-tv_weight', default=cc.tv_weight, type=float,
help="tv loss weight")
parser.add_argument('-st_weight', default=cc.st_weight, type=float,
help="style loss weight")
parser.add_argument('-gf_weight', default=1, type=float,
help="gen feat measure loss content")
parser.add_argument('-gl_weight', default=cc.gl_weight, type=float,
help="gen lab feat loss content")
parser.add_argument('-gn_weight', default=0, type=float,
help="gen feat nei loss content")
parser.add_argument('-gr_weight', default=0, type=float,
help="gen reconstruct loss content")
parser.add_argument('-gc_weight', default=0, type=float,
help="gen code loss content")
parser.add_argument('-gen_mode', default=cc.gen_mode, type=str,
help="gen model mode: res '50', '34', non")
parser.add_argument('-f', dest='flip', action='store_true')
parser.add_argument('-dd', dest='dis_double', action='store_true',
help="double dis")
parser.add_argument('-dt', dest='dis_triple', action='store_true',
help="trible dis")
parser.add_argument('-hgn', dest='halfgnoise', action='store_true',
help="whether the whole E(img) is not the input of Decoder")
parser.add_argument('-rs', dest='rssim', action='store_true',
help="gr loss uses ssim or not")
parser.add_argument('-uc', dest='dis_uc', action='store_true',
help="dis uncertainty or not")
parser.set_defaults(resume=False)
parser.set_defaults(dis_double=False)
parser.set_defaults(dis_triple=False)
parser.set_defaults(halfgnoise=False)
parser.set_defaults(dis_uc=False)
parser.set_defaults(rssim=False)
parser.set_defaults(flip=cc.flip)
args = parser.parse_args()
trainer = Trainer(cc, args)
trainer.train()
``` |
{
"source": "jjyyxx/ray",
"score": 2
} |
#### File: serve/pipeline/deployment_node.py
```python
from typing import Any, Dict, Optional, List, Tuple, Union
from ray.experimental.dag import DAGNode, InputNode
from ray.serve.api import Deployment
from ray.serve.handle import RayServeSyncHandle, RayServeHandle
from ray.serve.pipeline.deployment_method_node import DeploymentMethodNode
from ray.serve.pipeline.constants import USE_SYNC_HANDLE_KEY
from ray.experimental.dag.format_utils import get_dag_node_str
class DeploymentNode(DAGNode):
"""Represents a deployment node in a DAG authored Ray DAG API."""
def __init__(
self,
deployment,
cls_args: Tuple[Any],
cls_kwargs: Dict[str, Any],
cls_options: Dict[str, Any],
other_args_to_resolve: Optional[Dict[str, Any]] = None,
):
self._body: Deployment = deployment
super().__init__(
cls_args,
cls_kwargs,
cls_options,
other_args_to_resolve=other_args_to_resolve,
)
self._deployment_handle: Union[
RayServeHandle, RayServeSyncHandle
] = self._get_serve_deployment_handle(deployment, other_args_to_resolve)
if self._contains_input_node():
raise ValueError(
"InputNode handles user dynamic input the the DAG, and "
"cannot be used as args, kwargs, or other_args_to_resolve "
"in the DeploymentNode constructor because it is not available "
"at class construction or binding time."
)
def _copy_impl(
self,
new_args: List[Any],
new_kwargs: Dict[str, Any],
new_options: Dict[str, Any],
new_other_args_to_resolve: Dict[str, Any],
):
return DeploymentNode(
self._body,
new_args,
new_kwargs,
new_options,
other_args_to_resolve=new_other_args_to_resolve,
)
def _execute_impl(self, *args):
"""Executor of DeploymentNode by ray.remote()"""
return self._deployment_handle.options(**self._bound_options).remote(
*self._bound_args, **self._bound_kwargs
)
def _get_serve_deployment_handle(
self,
deployment: Deployment,
bound_other_args_to_resolve: Dict[str, Any],
) -> Union[RayServeHandle, RayServeSyncHandle]:
"""
Return a sync or async handle of the encapsulated Deployment based on
config.
Args:
deployment (Deployment): Deployment instance wrapped in the DAGNode.
bound_other_args_to_resolve (Dict[str, Any]): Contains args used
to configure DeploymentNode.
Returns:
RayServeHandle: Default and catch-all is to return sync handle.
return async handle only if user explicitly set
USE_SYNC_HANDLE_KEY with value of False.
"""
if USE_SYNC_HANDLE_KEY not in bound_other_args_to_resolve:
# Return sync RayServeSyncHandle
return deployment.get_handle(sync=True)
elif bound_other_args_to_resolve.get(USE_SYNC_HANDLE_KEY) is True:
# Return sync RayServeSyncHandle
return deployment.get_handle(sync=True)
elif bound_other_args_to_resolve.get(USE_SYNC_HANDLE_KEY) is False:
# Return async RayServeHandle
return deployment.get_handle(sync=False)
else:
raise ValueError(
f"{USE_SYNC_HANDLE_KEY} should only be set with a boolean value."
)
def _contains_input_node(self) -> bool:
"""Check if InputNode is used in children DAGNodes with current node
as the root.
"""
children_dag_nodes = self._get_all_child_nodes()
for child in children_dag_nodes:
if isinstance(child, InputNode):
return True
return False
def __getattr__(self, method_name: str):
# Raise an error if the method is invalid.
getattr(self._body.func_or_class, method_name)
call_node = DeploymentMethodNode(
self._body,
method_name,
(),
{},
{},
other_args_to_resolve=self._bound_other_args_to_resolve,
)
return call_node
def __str__(self) -> str:
return get_dag_node_str(self, str(self._body))
```
#### File: pipeline/tests/test_generate.py
```python
import pytest
import ray
from ray import serve
from ray.experimental.dag import InputNode
from ray.serve.pipeline.generate import (
transform_ray_dag_to_serve_dag,
extract_deployments_from_serve_dag,
)
from ray.serve.pipeline.tests.test_modules import Model, Combine
def _validate_consistent_output(
deployment, dag, handle_by_name, input=None, output=None
):
"""Assert same input lead to same outputs across the following:
1) Deployment handle returned from Deployment instance get_handle()
2) Original executable Ray DAG
3) Deployment handle return from serve public API get_deployment()
"""
deployment_handle = deployment.get_handle()
assert ray.get(deployment_handle.remote(input)) == output
assert ray.get(dag.execute(input)) == output
handle_by_name = serve.get_deployment(handle_by_name).get_handle()
assert ray.get(handle_by_name.remote(input)) == output
def test_simple_single_class(serve_instance):
# Assert converting both arg and kwarg
model = Model._bind(2, ratio=0.3)
ray_dag = model.forward._bind(InputNode())
serve_root_dag = ray_dag._apply_recursive(
lambda node: transform_ray_dag_to_serve_dag(node)
)
deployments = extract_deployments_from_serve_dag(serve_root_dag)
assert len(deployments) == 1
deployments[0].deploy()
_validate_consistent_output(deployments[0], ray_dag, "Model", input=1, output=0.6)
def test_single_class_with_valid_ray_options(serve_instance):
model = Model.options(num_cpus=1, memory=1000)._bind(2, ratio=0.3)
ray_dag = model.forward._bind(InputNode())
serve_root_dag = ray_dag._apply_recursive(
lambda node: transform_ray_dag_to_serve_dag(node)
)
deployments = extract_deployments_from_serve_dag(serve_root_dag)
assert len(deployments) == 1
deployments[0].deploy()
_validate_consistent_output(
deployments[0], ray_dag, deployments[0].name, input=1, output=0.6
)
deployment = serve.get_deployment(deployments[0].name)
assert deployment.ray_actor_options == {
"num_cpus": 1,
"memory": 1000,
"runtime_env": {},
}
def test_single_class_with_invalid_deployment_options(serve_instance):
model = Model.options(name="my_deployment")._bind(2, ratio=0.3)
ray_dag = model.forward._bind(InputNode())
serve_root_dag = ray_dag._apply_recursive(
lambda node: transform_ray_dag_to_serve_dag(node)
)
deployments = extract_deployments_from_serve_dag(serve_root_dag)
assert len(deployments) == 1
with pytest.raises(
ValueError, match="Specifying name in ray_actor_options is not allowed"
):
deployments[0].deploy()
def test_multi_instantiation_class_deployment_in_init_args(serve_instance):
"""
Test we can pass deployments as init_arg or init_kwarg, instantiated
multiple times for the same class, and we can still correctly replace
args with deployment handle and parse correct deployment instances.
"""
m1 = Model._bind(2)
m2 = Model._bind(3)
combine = Combine._bind(m1, m2=m2)
ray_dag = combine.__call__._bind(InputNode())
print(f"Ray DAG: \n{ray_dag}")
serve_root_dag = ray_dag._apply_recursive(
lambda node: transform_ray_dag_to_serve_dag(node)
)
print(f"Serve DAG: \n{serve_root_dag}")
deployments = extract_deployments_from_serve_dag(serve_root_dag)
assert len(deployments) == 3
for deployment in deployments:
deployment.deploy()
_validate_consistent_output(deployments[2], ray_dag, "Combine", input=1, output=5)
def test_shared_deployment_handle(serve_instance):
"""
Test we can re-use the same deployment handle multiple times or in
multiple places, without incorrectly parsing duplicated deployments.
"""
m = Model._bind(2)
combine = Combine._bind(m, m2=m)
ray_dag = combine.__call__._bind(InputNode())
print(f"Ray DAG: \n{ray_dag}")
serve_root_dag = ray_dag._apply_recursive(
lambda node: transform_ray_dag_to_serve_dag(node)
)
print(f"Serve DAG: \n{serve_root_dag}")
deployments = extract_deployments_from_serve_dag(serve_root_dag)
assert len(deployments) == 2
for deployment in deployments:
deployment.deploy()
_validate_consistent_output(deployments[1], ray_dag, "Combine", input=1, output=4)
def test_multi_instantiation_class_nested_deployment_arg(serve_instance):
"""
Test we can pass deployments with **nested** init_arg or init_kwarg,
instantiated multiple times for the same class, and we can still correctly
replace args with deployment handle and parse correct deployment instances.
"""
# TODO: (jiaodong) Support nested deployment args
pass
def test_simple_function(serve_instance):
# TODO: (jiaodong) Support function deployment node
pass
def test_multiple_functions(serve_instance):
# TODO: (jiaodong) Support function deployment node
pass
def test_mix_class_and_function(serve_instance):
# TODO: (jiaodong) Support function deployment node
pass
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
``` |
{
"source": "JJYYYY/policy_crawl",
"score": 2
} |
#### File: policy_crawl/common/fetch.py
```python
import time
import random
import requests
#from my_fake_useragent import UserAgent
from policy_crawl.common.logger import errorlog
user_agent_list = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/45.0.2454.85 Safari/537.36 115Browser/6.0.3',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
]
#随机选择一个
user_agent = random.choice(user_agent_list)
#传递给header
headers = { 'User-Agent': user_agent }
#headers = {''}
def get(url,params=None,headers=headers,code="utf-8",timeout=160,**kwargs):
res=requests.get(url,params=params,headers=headers,timeout=timeout,**kwargs)
if res.status_code in [200,201,301]:
return res.content.decode(code)
else:
errorlog.logger.error("url status_code 错误:%s,status_code:%s" % (url, res.status_code))
raise ConnectionError("没有连接")
def post(url,data=None,headers=headers,code="utf-8",timeout=160,**kwargs):
res=requests.post(url,data=data,headers=headers,timeout=timeout,**kwargs)
if res.status_code in [200,201,301]:
return res.content.decode(code)
else:
errorlog.logger.error("url status_code 错误:%s,status_code:%s" %(url,res.status_code))
raise ConnectionError("没有连接")
```
#### File: mca/all/fujian.py
```python
import re
import time
import json
from pyquery import PyQuery as pq
from policy_crawl.common.fetch import get,post
from policy_crawl.common.save import save
from policy_crawl.common.logger import alllog,errorlog
def parse_index(html):
items=json.loads(html.replace("\r\n","").replace("\n",""))
for item in items["docs"]:
try:
data = {}
data["title"] = item["title"]
data["content"] = item["content"]
data["content_url"] = item["chnldocurl"]
data["publish_time"] = item["pubtime"]
data["classification"] = "福建省税务局"
data["url"] = item["url"]
print(data)
save(data)
except:
pass
def main():
for i in range(1,4):
print(i)
url="http://mzt.fujian.gov.cn/was5/web/search?channelid=229105&sortfield=-docreltime,-docorder&extension=&templet=docs.jsp&classsql=siteid%3D46*chnlid%3D18040&searchword=&prepage=20&page="+str(i)+"&r=0.26315070612868396"
html=get(url)
parse_index(html)
if __name__ == '__main__':
main()
```
#### File: npma/all/nation.py
```python
import re
import time
import random
from pyquery import PyQuery as pq
from policy_crawl.common.fetch import get,post
from policy_crawl.common.save import save
from policy_crawl.common.logger import alllog,errorlog
from policy_crawl.common.utils import get_cookie
def parse_detail(html,url):
alllog.logger.info("国家药品监督管理局: %s"%url)
doc=pq(html)
data={}
data["title"]=doc("title").text()
data["content"]=doc(".articlecontent3").text().replace("\n","")
data["content_url"]=[item.attr("href") for item in doc(".articlecontent3 a").items()]
try:
data["publish_time"]=re.findall("(\d{4}年\d{1,2}月\d{1,2}日)",html)[0]
# data["publish_time"]=re.findall("(\d{4}/\d{1,2}/\d{1,2})",html)[0]
# data["publish_time"]=re.findall("(\d{4}-\d{1,2}-\d{1,2})",html)[0]
except:
data["publish_time"]=""
errorlog.logger.error("url:%s 未找到publish_time"%url)
data["classification"]="国家药品监督管理局"
data["url"]=url
print(data)
save(data)
def parse_index(html,cookies):
doc=pq(html)
items=doc(".ListColumnClass15 a").items()
for item in items:
url=item.attr("href")
if "http" not in url:
url="http://www.nmpa.gov.cn/WS04" + url.replace("../","/")
try:
html=get(url,cookies=cookies,code="gbk")
except:
errorlog.logger.error("url错误:%s"%url)
parse_detail(html,url)
time.sleep(random.randint(1,3))
def main():
for i in range(100,152):
cookies=get_cookie("http://www.nmpa.gov.cn/WS04/CL2051/index_1.html",".ListColumnClass15","FSSBBIl1UgzbN7N80S","FSSBBIl1UgzbN7N80T")
print(cookies)
print(i)
if i==0:
url="http://www.nmpa.gov.cn/WS04/CL2051/index.html"
else:
url="http://www.nmpa.gov.cn/WS04/CL2051/index_"+str(i)+".html"
html=get(url,cookies=cookies,code="GB18030")
parse_index(html,cookies)
if __name__ == '__main__':
main()
```
#### File: sasac/all/anhui.py
```python
import re
import time
from pyquery import PyQuery as pq
from policy_crawl.common.fetch import get,post
from policy_crawl.common.save import save
from policy_crawl.common.logger import alllog,errorlog
def parse_detail(html,url):
alllog.logger.info("安徽省国有资产委员会: %s"%url)
doc=pq(html)
data={}
data["title"]=doc(".s_article_top h1").text()
if not data["title"]:
data["title"] = doc(".dicontent_bt h1").text()
data["content"]=doc(".h-content").text().replace("\n","")
data["content_url"]=[item.attr("href") for item in doc(".h-content a").items()]
try:
# data["publish_time"]=re.findall("(\d{4}年\d{1,2}月\d{1,2}日)",html)[0]
# data["publish_time"]=re.findall("(\d{4}/\d{1,2}/\d{1,2})",html)[0]
data["publish_time"]=re.findall("(\d{4}-\d{1,2}-\d{1,2})",html)[0]
except:
data["publish_time"]=""
errorlog.logger.error("url:%s 未找到publish_time"%url)
if not data["content"]:
data["content"] = doc(".dicontent_left").text().replace("\n", "")
data["content_url"] = [item.attr("href") for item in doc(".dicontent_left a").items()]
data["classification"]="安徽省国有资产委员会"
data["url"]=url
print(data)
save(data)
def parse_index(html):
doc=pq(html)
items=doc("#ul_list tr a").items()
for item in items:
url=item.attr("href")
if "http" not in url:
url="http://gzw.ah.gov.cn/xxgk/" + url
try:
html=get(url,code="gbk")
except:
errorlog.logger.error("url错误:%s"%url)
parse_detail(html,url)
time.sleep(1)
def main():
url="http://gzw.ah.gov.cn/xxgk/list.jsp"
for i in range(1,40):
print(i)
data={'strColId': '0ae0ae0d59cb48b38d86babb0edc8918', 'strTopicType': '', 'strThemeType': '', 'strWebSiteId': '1448866116912004', 'strPage': '', 'strMasTitle': '', 'year': '', '_index': '', 'PageSizeIndex': str(i), 'strIndex': '', 'strSearchContent': '', 'strTxtCnt': ''}
html=post(url,data=data,code="gbk")
parse_index(html)
if __name__ == '__main__':
main()
``` |
{
"source": "jjz17/FreeCell-Solitaire",
"score": 3
} |
#### File: mvc/model/deck.py
```python
from mvc.model.card import Card, Value, Suit
import random
class Deck:
def __init__(self):
self.cards = []
self.build_deck()
def build_deck(self):
for value in Value:
for suit in Suit:
self.cards.append(Card(value, suit))
def shuffle(self):
random.shuffle(self.cards)
``` |
{
"source": "jjz17/Natural-Strength",
"score": 3
} |
#### File: Natural-Strength/application/utils.py
```python
import numpy as np
import _pickle as cPickle
from application.user_metrics import UserMetrics, DummyUserMetrics
def choose_pred(current, pred):
result = max(current, pred)
if result == current:
result *= 1.05
return result
def copy_metrics(user_metric: UserMetrics):
if user_metric != None:
return DummyUserMetrics(user_metric.weight, user_metric.squat, user_metric.bench, user_metric.deadlift, user_metric.date)
return None
def generate_null_metrics():
return {'data': {'user_metric': DummyUserMetrics(None, None, None, None, None)}, 'units': ''}
def handle_unit_conversion(input_unit_kg: True, output_unit_kg: False, **data):
if input_unit_kg and not output_unit_kg:
for key in data.keys():
# Check if data is a UserMetrics object
if isinstance(data[key], UserMetrics) or isinstance(data[key], DummyUserMetrics):
data[key] = metrics_kg_to_lbs(data[key])
else:
data[key] = kg_to_lbs(data[key])
units = 'Lbs'
elif not input_unit_kg and output_unit_kg:
for key in data.keys():
data[key] = lbs_to_kg(data[key])
units = 'Kg'
elif not output_unit_kg:
for key in data.keys():
# Check if data is a UserMetrics object
if isinstance(data[key], UserMetrics) or isinstance(data[key], DummyUserMetrics):
data[key] = metrics_kg_to_lbs(data[key])
units = 'Lbs'
else:
units = 'Kg'
return {'data': data, 'units': units}
def kg_to_lbs(kg):
return round(float(kg) * 2.20462, 2)
def lbs_to_kg(lbs):
return round(float(lbs) * 0.453592, 2)
def load_model(model_file: str):
return cPickle.load(open(model_file, 'rb'))
def metrics_kg_to_lbs(user_metric):
if user_metric == None:
return None
return DummyUserMetrics(kg_to_lbs(user_metric.weight), kg_to_lbs(user_metric.squat), kg_to_lbs(user_metric.bench), kg_to_lbs(user_metric.deadlift), user_metric.date)
def scale_stats(scaler, stats: list):
return scaler.transform(np.array(stats).reshape(1, -1))
```
#### File: Natural-Strength/pages/example.py
```python
import streamlit as st
import numpy as np
import pandas as pd
# @st.cache
def app():
st.markdown("## Data Upload")
# Upload the dataset and save as csv
st.markdown("### Upload a csv file for analysis.")
st.write("\n")
# Code to read a single file
uploaded_file = st.file_uploader("Choose a file", type=['csv', 'xlsx'])
global data
if uploaded_file is not None:
try:
data = pd.read_csv(uploaded_file)
except Exception as e:
print(e)
data = pd.read_excel(uploaded_file)
# uploaded_files = st.file_uploader("Upload your CSV file here.", type='csv', accept_multiple_files=False)
# # Check if file exists
# if uploaded_files:
# for file in uploaded_files:
# file.seek(0)
# uploaded_data_read = [pd.read_csv(file) for file in uploaded_files]
# raw_data = pd.concat(uploaded_data_read)
# uploaded_files = st.file_uploader("Upload CSV", type="csv", accept_multiple_files=False)
# print(uploaded_files, type(uploaded_files))
# if uploaded_files:
# for file in uploaded_files:
# file.seek(0)
# uploaded_data_read = [pd.read_csv(file) for file in uploaded_files]
# raw_data = pd.concat(uploaded_data_read)
# read temp data
# data = pd.read_csv('data/2015.csv')
''' Load the data and save the columns with categories as a dataframe.
This section also allows changes in the numerical and categorical columns. '''
if st.button("Load Data"):
# Raw data
st.dataframe(data)
# utils.getProfile(data)
# st.markdown("<a href='output.html' download target='_blank' > Download profiling report </a>",unsafe_allow_html=True)
# HtmlFile = open("data/output.html", 'r', encoding='utf-8')
# source_code = HtmlFile.read()
# components.iframe("data/output.html")# Save the data to a new file
data.to_csv('data/main_data.csv', index=False)
# Generate a pandas profiling report
# if st.button("Generate an analysis report"):
# utils.getProfile(data)
# Open HTML file
# pass
# Collect the categorical and numerical columns
numeric_cols = data.select_dtypes(include=np.number).columns.tolist()
categorical_cols = list(set(list(data.columns)) - set(numeric_cols))
# Save the columns as a dataframe or dictionary
columns = []
# Iterate through the numerical and categorical columns and save in columns
# columns = utils.genMetaData(data)
# Save the columns as a dataframe with categories
# Here column_name is the name of the field and the type is whether it's numerical or categorical
columns_df = pd.DataFrame(columns, columns=['column_name', 'type'])
columns_df.to_csv('data/metadata/column_type_desc.csv', index=False)
# Display columns
st.markdown("**Column Name**-**Type**")
for i in range(columns_df.shape[0]):
st.write(f"{i + 1}. **{columns_df.iloc[i]['column_name']}** - {columns_df.iloc[i]['type']}")
st.markdown("""The above are the automated column types detected by the application in the data.
In case you wish to change the column types, head over to the **Column Change** section. """)
```
#### File: Natural-Strength/src/records_scraping.py
```python
import random
import re
import time
import os
from datetime import datetime
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
def get_soup(url):
""" Returns the BeautifulSoup object for website of the given category name in the
given CL webpage's homepage
Args:
url(String): the given URL
Returns:
soup(bs4.BeautifulSoup): the BeautifulSoup object representation of the desired category page
"""
# Open the target category page
# html = urllib.urlopen(url)
html = requests.get(url)
# Create a BeautifulSoup object after the HTML page is read
# soup = BeautifulSoup(html.read())
soup = BeautifulSoup(html.content, "html.parser")
# Close the urllib connection to avoid issues with the website
html.close()
return soup
def get_dict(sex='M'):
url = ''
min_class = 0
max_class = 0
label_val = ''
if sex == 'M':
url = 'https://usapl.liftingdatabase.com/records-default?recordtypeid=120365&categoryid=59&weightclassid=122663'
min_class = 52.0
max_class = 141.0
# ABSTRACT THIS
label_val = 'USAPL Nationals 2022 - Male'
else:
url = 'https://usapl.liftingdatabase.com/records-default?recordtypeid=120362&categoryid=59&weightclassid=122653'
min_class = 44.0
max_class = 101.0
label_val = 'USAPL Nationals 2022 - Female'
soup = get_soup(url)
choices = soup.find('optgroup', attrs={'label': label_val})
options = choices.find_all('option')
url_class_map = {}
for option in options:
w_class = ''
# Handle max class
if option.get_text()[-1] == '+':
w_class = max_class
else:
w_class = float(option.get_text()) * -1
if w_class >= min_class:
url = option['value']
url_class_map[url] = w_class
print(url_class_map)
return url_class_map
def get_records_df():
m_dict = get_dict('M')
f_dict = get_dict('F')
dicts = [(m_dict,
'https://usapl.liftingdatabase.com/records-default?recordtypeid=120365&categoryid=59&weightclassid=',
'M'),
(f_dict,
'https://usapl.liftingdatabase.com/records-default?recordtypeid=120362&categoryid=59&weightclassid=',
'F')]
# Create DataFrame
df = pd.DataFrame(columns=['Name', 'Weight Class', 'Lift', 'Weight (kg)', 'Date', 'Sex'])
for info in dicts:
info_dict = info[0]
target_url = info[1]
sex = info[2]
for url, w_class in info_dict.items():
page = str(target_url) + str(url)
soup = get_soup(page)
body = soup.find_all('tbody')[1]
# print(body)
# titles = body.find_all('th', {'colspan' : '9'})
records = body.find_all('tr', class_=None)
lift = ''
for count, record in enumerate(records):
# even tags are lift title
if count > 0 and (count - 1) % 2 == 0:
lift = record.get_text().strip()
# print(lift)
# print(record.get_text().strip())
# odd tags are info
if count > 0 and (count - 1) % 2 == 1:
infos = record.find_all('td')
name = infos[1]
# weight_class = infos[0]
weight = infos[2]
date = infos[3]
# Create a temporary dictionary to store the information of the current post
temp_dict = {'Name': name, 'Weight Class': w_class, 'Lift': lift, 'Weight (kg)': weight,
'Date': date, 'Sex': sex}
# Append the current post's information to the df DataFrame to create its respective row
df = df.append(temp_dict, ignore_index=True)
# for info in infos:
# except print(info.get_text().strip())
# print('Running...')
print('Test')
time.sleep(random.randint(2, 3))
# Wrangling
df['Name'] = df['Name'].apply(lambda x: x.get_text())
df['Weight (kg)'] = df['Weight (kg)'].apply(
lambda x: float(x.get_text().split()[0]) if len(x.get_text()) > 0 else np.nan)
df['Date'] = df['Date'].apply(lambda x: re.sub(r"[\n\t\s]*", "", x.get_text()))
df['Date'] = df['Date'].apply(
lambda x: datetime(1, 1, 1).date() if len(x) == 0 else datetime.strptime(x, '%m/%d/%Y').date())
return df
# f = open("test2.txt", "a")
# f.write('\nrecords: ' + str(datetime.now()))
# f.close()
df = get_records_df()
df.to_csv(f'..{os.path.sep}data{os.path.sep}current_usapl_american_raw_records.csv', index=False)
```
#### File: Natural-Strength/website/app.py
```python
from flask import Flask, render_template
from datetime import datetime
app = Flask(__name__)
@app.route('/')
def hello():
return 'Hello, world!'
@app.route('/user/<usr>')
def user_home(usr):
return f'Welcome to your home, {usr}!'
@app.route('/input')
def user_input():
pass
@app.route('/practice')
def practice():
return str(datetime.now())
# return render_template('practice.html', time=time)
if __name__ == '__main__':
app.run()
``` |
{
"source": "jjz17/Sport-Ball-Image-Classifier",
"score": 3
} |
#### File: Sport-Ball-Image-Classifier/src/download_images.py
```python
import os
import random
import time
import urllib.request as urllib
import pandas as pd
def url_to_jpg(index, url, file_path):
file_name = f'image{index}.jpg'
full_path = f'{file_path}{file_name}'
urllib.urlretrieve(url, full_path)
# print(f'{file_name} saved successfully')
#%%
import os
# main_dir = ["FolderA", "FolderB"]
# common_dir = ["SubFolder1", "SubFolder2", "SubFolder3"]
# Creating Directories
main_dir = 'sport_ball_images'
sub_dirs = ['basketball', 'soccer']
#
for sub_dir in sub_dirs:
try:
os.makedirs(f'data{os.path.sep}{os.path.join(dir, sub_dir)}')
except OSError:
pass
# for dir in main_dir:
# for sub_dir in common_dir:
# try:
# os.makedirs(f'data{os.path.sep}{os.path.join(dir, sub_dir)}')
# except OSError:
# print('Failed')
#%%
df = pd.read_csv(f'..{os.path.sep}data{os.path.sep}image_urls.csv')
# print(df.head())
for i in df.index:
row = df.loc[i]
label = row['Type']
url = row['Image_URL']
print(url)
file_path = ''
if label == 'Basketball':
file_path = f'data{os.path.sep}sport_ball_images{os.path.sep}basketball{os.path.sep}'
else:
file_path = f'data{os.path.sep}sport_ball_images{os.path.sep}soccer{os.path.sep}'
try:
url_to_jpg(i + 1, url, file_path)
print(f'Saving image {i + 1}')
time.sleep(random.randint(2, 5))
except:
pass
# print(f'{label} {url}')
#%%
# url_to_jpg(500, 'http://www.pennracquet.com/images/balls_professional.png', f'data{os.path.sep}sport_ball_images{os.path.sep}soccer{os.path.sep}')
``` |
{
"source": "jjz369/Mask_RCNN",
"score": 2
} |
#### File: Twitter_button/models/button.py
```python
import os
import sys
import json
import random
import numpy as np
import skimage.draw
import cv2
import matplotlib
import matplotlib.pyplot as plt
ROOT_DIR = os.path.abspath("../")
sys.path.append(ROOT_DIR)
CURR_DIR = os.path.abspath("./")
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
from mrcnn.model import log
# Path to pre-trained weights
MODEL_DIR = os.path.join(CURR_DIR, "button_logs")
COCO_MODEL_PATH = os.path.join(CURR_DIR, "mask_rcnn_coco.h5")
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
############################################################
# Configuration
############################################################
class ButtonConfig(Config):
"""Configuration for training on the Twitter following page screenshot
figures. Derives from the base Config class and overrides values specific
to the twitter following button dataset.
"""
# Give the configuration a recognizable name
NAME = "button"
# Train on 2 images per GPU. We can put multiple images on each
# GPU because the images are small.
IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 2 # background + 1 button class
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 100
# Use a smaller anchor because some of images and objects are small
RPN_ANCHOR_SCALES = (16, 32, 64, 128, 256)
# use small validation steps since the epoch is small
VALIDATION_STEPS = 5
############################################################
# Datasets
############################################################
class ButtonDataset(utils.Dataset):
"""
load pre-treated twitter button images and masks from annotations
"""
def load_button(self, dataset_dir, subset):
"""
Load a subset of the twitter following button image datasets.
Parameters
----------
dataset_dir : Root directory of the dataset
subset : subsets to load: train or val
"""
# Add classes. We have only one class to add.
self.add_class("button", 1, "button")
# Train or validation datasets?
assert subset in ["train", "val"]
dataset_dir = os.path.join(dataset_dir, subset)
# Add images
annotations = json.load(open(os.path.join(dataset_dir, "via_export_json.json")))
annotations = list(annotations.values())
annotations = [a for a in annotations if a['regions']]
# Get the x, y coordinaets of points of the rectangles that make up
# the outline of each object instance. These are stores in the
# shape_attributes (see json format above)
# The if condition is needed to support VIA versions 1.x and 2.x.
for a in annotations:
if type(a['regions']) is dict:
rectangles = [r['shape_attributes'] for r in a['regions'].values()]
else:
rectangles = [r['shape_attributes'] for r in a['regions']]
# load_mask() needs the image size to convert rectangles to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
"button",
image_id=a['filename'], # use file name as a unique image id
path=image_path,
width=width, height=height,
rectangles=rectangles)
def load_mask(self, image_id):
"""
Generate instance masks for an image.
This function is modified because instead of using polygons, I used a
rectangle for the annotations.
Parameters
----------
image_id : the loaded internel image_id
Returns
-------
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a button dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "button":
return super(self.__class__, self).load_mask(image_id)
# Convert rectangles to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
mask = np.zeros([info["height"], info["width"], len(info["rectangles"])],
dtype=np.uint8)
for i, p in enumerate(info["rectangles"]):
start = (p['y'], p['x'])
extent = (p['y']+p['height'], p['x']+p['width'])
# Get indexes of pixels inside the rectangle and set them to 1
rr, cc = skimage.draw.rectangle(start, extent)
mask[rr, cc, i] = 1
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "button":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
def train(model):
"""Train the model."""
# Training dataset.
dataset_train = ButtonDataset()
dataset_train.load_button(args.dataset, "train")
dataset_train.prepare()
# Validation dataset
dataset_val = ButtonDataset()
dataset_val.load_button(args.dataset, "val")
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Since we're using a very small dataset, and starting from
# COCO trained weights, we don't need to train too long. Also,
# no need to train all layers, just the heads should do it.
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=1,
layers='heads')
print("Training network all")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=2,
layers="all")
def detect(model, image_path = None):
assert image_path, "Argument --image_path is required for detection"
# Read image
image = skimage.io.imread(args.image)
# Detect objects
r = model.detect([image], verbose=1)[0]
print("The bounding boxes are: " r[rois])
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'])
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN to detect following buttons.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'detect'")
parser.add_argument('--dataset', required=False,
metavar="/path/to/button/dataset/",
help='Directory of the Button dataset')
parser.add_argument('--weights', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file ")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--image', required=False,
metavar="path or URL to image",
help='Image to apply detect')
parser.add_argument('--video', required=False,
metavar="path or URL to video",
help='Video to apply the color splash effect on')
args = parser.parse_args()
# Validate arguments
if args.command == "train":
assert args.dataset, "Argument --dataset is required for training"
elif args.command == "detect":
assert args.image, "Provide --image to detect"
print("Weights: ", args.weights)
print("Dataset: ", args.dataset)
print("Logs: ", args.logs)
# Configurations
if args.command == "train":
config = ButtonConfig()
else:
class InferenceConfig(ButtonConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.weights.lower() == "coco":
weights_path = COCO_WEIGHTS_PATH
# Download weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
elif args.weights.lower() == "last":
# Find last trained weights
weights_path = model.find_last()
elif args.weights.lower() == "imagenet":
# Start from ImageNet trained weights
weights_path = model.get_imagenet_weights()
else:
weights_path = args.weights
# Load weights
print("Loading weights ", weights_path)
if args.weights.lower() == "coco":
# Exclude the last layers because they require a matching
# number of classes
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
model.load_weights(weights_path, by_name=True)
# Train or evaluate
if args.command == "train":
train(model)
elif args.command == "detect":
detect(model)
else:
print("'{}' is not recognized. "
"Use 'train' or 'detect'".format(args.command))
``` |
{
"source": "jjz369/Steam-Game-Recommender",
"score": 3
} |
#### File: jjz369/Steam-Game-Recommender/run_web_app.py
```python
from flask import Flask, render_template
import random
import yaml
from sqlalchemy import create_engine
app = Flask(__name__)
#path_steam_user_id = './data/steam_user_id.txt'
config = yaml.safe_load(open('./src/config.yaml'))
db_username = config['mysql']['username']
db_password = config['mysql']['password']
db_endpoint = config['mysql']['endpoint']
db_database = config['mysql']['database']
engine = create_engine('mysql+pymysql://{}:{}@{}/{}?charset=utf8mb4'.format(db_username, db_password, db_endpoint, db_database))
lst_user_id = [i[0] for i in engine.execute('select user_id from game_steam_user').fetchall()]
lst_popular_games = engine.execute('''
SELECT
game_steam_app.app_id,
game_steam_app.name,
game_steam_app.initial_price,
game_steam_app.header_image
FROM game_steam_app
JOIN recommended_games_popularity_based
ON game_steam_app.app_id = recommended_games_popularity_based.app_id
AND game_steam_app.type = "game"
AND game_steam_app.release_date <= CURDATE()
AND game_steam_app.initial_price IS NOT NULL
ORDER BY recommended_games_popularity_based.peak_today DESC
LIMIT 5''').fetchall()
@app.route('/')
def recommender():
user_id = random.choice(lst_user_id)
# user_id = 76561197960323774 # no purchase info
lst_most_played_games = engine.execute('''
SELECT
game_steam_app.app_id,
game_steam_app.name,
game_steam_app.initial_price,
game_steam_app.header_image
FROM game_steam_app
JOIN game_steam_user
ON game_steam_app.app_id = game_steam_user.app_id
WHERE game_steam_user.user_id = {}
AND game_steam_user.playtime_forever > 0
AND game_steam_app.type = "game"
AND game_steam_app.release_date <= CURDATE()
AND game_steam_app.initial_price IS NOT NULL
ORDER BY game_steam_user.playtime_forever DESC
LIMIT 3'''.format(user_id)).fetchall()
if lst_most_played_games:
favorite_app_id = lst_most_played_games[0][0]
# get content based recommendation
lst_content_recommended = engine.execute('''
SELECT app_id, name, initial_price, header_image
FROM game_steam_app
WHERE type = "game"
AND release_date <= CURDATE()
AND initial_price IS NOT NULL
AND app_id IN ({})'''.format(','.join(
[str(i) for i in engine.execute('SELECT `0`,`1`,`2` FROM recommended_games_content_based WHERE app_id = {}'.format(favorite_app_id)).first()]
)
)
).fetchall()
# get item based recommendation
lst_item_recommended = engine.execute('''
SELECT app_id, name, initial_price, header_image
FROM game_steam_app
WHERE type = "game"
AND release_date <= CURDATE()
AND initial_price IS NOT NULL
AND app_id IN ({})'''.format(','.join(
[str(i) for i in engine.execute('SELECT `0`,`1`,`2` FROM recommended_games_item_based WHERE app_id = {}'.format(favorite_app_id)).first()]
)
)
).fetchall()
# get ALS based recommendation
lst_als_recommended = engine.execute('''
SELECT app_id, name, initial_price, header_image
FROM game_steam_app
WHERE type = "game"
AND release_date <= CURDATE()
AND initial_price IS NOT NULL
AND app_id IN ({})'''.format(','.join(
[str(i) for i in engine.execute('SELECT `0`,`1`,`2` FROM recommended_games_als_based WHERE user_id = {}'.format(user_id)).first()]
)
)
).fetchall()
else:
lst_content_recommended = []
lst_item_recommended = []
lst_als_recommended = []
return render_template('recommendation.html',
user_id = user_id,
lst_most_played_games = lst_most_played_games,
lst_content_recommended = lst_content_recommended,
lst_item_recommended = lst_item_recommended,
lst_als_recommended = lst_als_recommended,
lst_popular_games = lst_popular_games)
if __name__ == '__main__':
app.run(debug=True)
```
#### File: Steam-Game-Recommender/src/game_details.py
```python
import requests
import time
import json
import yaml
import pandas as pd
from datetime import datetime
from sqlalchemy import create_engine
from sqlalchemy.types import Integer
def get_app_details():
# The Steam API limits 200 data in 5 miniutes time. So after every 200 data, sleep for 5 minitues.
Ns = 38400
current_count = Ns
url = 'http://api.steampowered.com/ISteamApps/GetAppList/v2'
r = requests.get(url)
dic_steam_app = r.json()
lst_app_id = [i.get('appid') for i in dic_steam_app.get('applist').get('apps')]
with open('../data/steam_app_details_2.txt', 'w') as f:
for app_id in sorted(lst_app_id)[Ns:]:
for i in range(3):
try:
r = requests.get(
url = 'http://store.steampowered.com/api/appdetails/',
params = { 'appids' : app_id }
)
dic_app_data = r.json()
break
except Exception as e:
print(app_id, e)
time.sleep(.5)
f.write(json.dumps(dic_app_data))
f.write('\n')
if current_count > Ns and current_count % 200 == 0:
print("The number of games: {}, current id: {}".format(len(lst_app_id), current_count))
time.sleep(300)
current_count += 1
def save_app_details():
dic_app_details = {}
config = yaml.safe_load(open('config.yaml'))
db_username = config['mysql']['username']
db_password = config['mysql']['password']
db_endpoint = config['mysql']['endpoint']
db_database = config['mysql']['database']
engine = create_engine('mysql+pymysql://{}:{}@{}/{}?charset=utf8mb4'.format(db_username, db_password, db_endpoint, db_database))
with open('../data/steam_app_details.txt', 'r') as f:
for i in f.readlines():
try:
for app_id, dic_response in json.loads(i).items():
if dic_response.get('success'):
dic_app_details[app_id] = parse_steam_app_details(dic_response.get('data',{}))
except:
pass
df_steam_app = pd.DataFrame.from_dict(dic_app_details, 'index')
df_steam_app.index.name = 'app_id'
df_steam_app.reset_index(inplace=True)
df_steam_app.to_sql('game_steam_app', engine, if_exists='replace', index=False, chunksize = 10000, dtype={'app_id':Integer(), 'required_age':Integer()})
def parse_steam_app_details(app_data):
developers = ', '.join(app_data.get('developers', []))
if not developers:
developers = None
publishers = ', '.join(app_data.get('publishers', []))
if not publishers:
publishers = None
name = app_data.get('name')
required_age = app_data.get('required_age')
short_description = app_data.get('short_description')
if not short_description:
short_description = None
app_type = app_data.get('type')
header_image = app_data.get('header_image')
fullgame = app_data.get('fullgame',{}).get('appid')
lst_categories = app_data.get('categories',[])
if lst_categories:
categories = ', '.join([i.get('description') for i in lst_categories])
else:
categories = None
lst_genres = app_data.get('genres',[])
if lst_genres:
genres = ', '.join([i.get('description') for i in lst_genres])
else:
genres = None
supported_languages = app_data.get('supported_languages')
if supported_languages:
supported_languages = supported_languages.replace('<strong>*</strong>', '').replace('<br>languages with full audio support','')
if app_data.get('is_free') == True:
initial_price = 0
currency = 'USD'
else:
if app_data.get('price_overview',{}):
initial_price = app_data.get('price_overview',{}).get('initial', 0) / 100
currency = app_data.get('price_overview',{}).get('currency')
else:
initial_price = None
currency = None
if app_data.get('release_date',{}).get('coming_soon') == False:
release_date = app_data.get('release_date',{}).get('date')
if release_date:
try:
release_date = datetime.strptime(release_date, '%b %d, %Y').date()
except Exception as e:
try:
release_date = datetime.strptime(release_date, '%d %b, %Y').date()
except:
try:
release_date = datetime.strptime(release_date, '%b %Y').date()
except:
release_date = None
else:
release_date = None
else:
release_date = None
dic_steam_app = {
'name' : name,
'type' : app_type,
'release_date' : release_date,
'currency' : currency,
'initial_price' : initial_price,
'short_description' : short_description,
'header_image' : header_image,
'fullgame' : fullgame,
'developers' : developers,
'publishers' : publishers,
'required_age' : required_age,
'supported_languages' : supported_languages,
'categories' : categories,
'genres' : genres,
}
return dic_steam_app
```
#### File: Steam-Game-Recommender/src/recommendation.py
```python
import requests, re, os
import pandas as pd
import numpy as np
import yaml
from bs4 import BeautifulSoup
from sqlalchemy import create_engine
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from pyspark.ml.recommendation import ALS
from pyspark import SparkContext
from pyspark.sql import SparkSession
#####################################
##### Model 1: Popularity Based #####
#####################################
def recommendation_popularity_based(engine):
url = 'https://store.steampowered.com/stats'
r = requests.get(url)
soup = BeautifulSoup(r.text, 'lxml')
dic_current_player = {}
for i in soup.find('div', {'id':'detailStats'}).find_all('tr', {'class':'player_count_row'}):
lst_data = i.find_all('td')
current_player = int(lst_data[0].span.string.replace(',',''))
peak_today = int(lst_data[1].span.string.replace(',',''))
app_id = re.findall(r'(\d+)', lst_data[-1].a.get('href'))[0]
dic_current_player[app_id] = {'current_player' : current_player, 'peak_today' : peak_today}
df_popularity_based_result = pd.DataFrame.from_dict(dic_current_player, 'index')
df_popularity_based_result.index.name = 'app_id'
df_popularity_based_result.reset_index(inplace=True)
df_popularity_based_result.to_sql('recommended_games_popularity_based', engine, if_exists='replace', index = False)
#####################################
##### Model 2: Content Based #####
#####################################
def recommendation_content_based(engine):
df_game_description = pd.read_sql_query(
'''
SELECT
app_id,
short_description
FROM game_steam_app
WHERE short_description IS NOT NULL
AND type = "game"
AND name IS NOT NULL
AND release_date <= CURDATE()
AND initial_price IS NOT NULL
''', engine)
tfidf = TfidfVectorizer(strip_accents='unicode',stop_words='english').fit_transform(df_game_description['short_description'].tolist())
lst_app_id = df_game_description['app_id'].tolist()
dic_recomended = {}
for row_index in range(tfidf.shape[0]):
cosine_similarities = linear_kernel(tfidf[row_index:row_index+1], tfidf).flatten()
top_related_rows = cosine_similarities.argsort()[-2:-22:-1]
dic_recomended.update({lst_app_id[row_index]:[lst_app_id[i] for i in top_related_rows]})
df_content_based_results = pd.DataFrame.from_dict(dic_recomended, 'index')
df_content_based_results.index.name = 'app_id'
df_content_based_results.reset_index(inplace=True)
df_content_based_results.to_sql('recommended_games_content_based',engine,if_exists='replace', index = False)
# Model 3: item based
def recommendation_item_based(engine):
df_purchase = pd.read_sql_query(
'''
SELECT app_id, user_id
FROM game_steam_user
WHERE playtime_forever > 15
''', engine).pivot_table(values = 'user_id', index = ['app_id'], columns = ['user_id'], aggfunc = len, fill_value = 0)
purchase_matrix = df_purchase.values
lst_app_id = df_purchase.index
dic_recomended_item_based = {}
for index in range(purchase_matrix.shape[0]):
cosine_similarities = linear_kernel(purchase_matrix[index:index+1], purchase_matrix).flatten()
lst_related_app = np.argsort(-cosine_similarities)[1:101]
dic_recomended_item_based.update({lst_app_id[index]:[lst_app_id[i] for i in lst_related_app]})
df_item_based_result = pd.DataFrame.from_dict(dic_recomended_item_based, 'index')
df_item_based_result.index.name = 'app_id'
df_item_based_result.reset_index(inplace=True)
df_item_based_result.to_sql('recommended_games_item_based', engine, if_exists='replace', chunksize = 1000, index = False)
# Model 4: Collaborative Filtering
def recommendation_als_based(engine):
config = yaml.safe_load(open('{}/config.yaml'.format(os.path.dirname(os.path.realpath(__file__)))))
db_username = config['mysql']['username']
db_password = config['mysql']['password']
db_endpoint = config['mysql']['endpoint']
db_database = config['mysql']['database']
sc=SparkContext()
spark = SparkSession(sc)
# If haveing problem wit drivers: move your MySQL JDBC driver to the jars folder of pyspark
# Ref: https://stackoverflow.com/questions/49011012/cant-connect-to-mysql-database-from-pyspark-getting-jdbc-error
spark.read.format("jdbc").option("url", "jdbc:mysql://{}/{}".format(db_endpoint, db_database))\
.option("user", db_username).option("password", db_password)\
.option("dbtable", "game_steam_user")\
.option("driver", "com.mysql.cj.jdbc.Driver")\
.load().createOrReplaceTempView('user_inventory')
spark.read.format("jdbc").option("url", "jdbc:mysql://{}/{}".format(db_endpoint, db_database))\
.option("user", db_username).option("password", db_password)\
.option("dbtable", "game_steam_app")\
.option("driver", "com.mysql.cj.jdbc.Driver")\
.load().createOrReplaceTempView('game_steam_app')
df_user_playtime = spark.sql('''
SELECT
DENSE_RANK() OVER (ORDER BY user_id) AS user,
user_id,
app_id AS item,
LOG(playtime_forever) AS rating
FROM user_inventory
WHERE playtime_forever >= 5
''')
df_valid_games = spark.sql('''
SELECT app_id
FROM game_steam_app
WHERE short_description IS NOT NULL
AND name IS NOT NULL
AND type = "game"
AND initial_price IS NOT NULL
''')
df_user_inventory = df_user_playtime.join(df_valid_games, df_user_playtime['item'] == df_valid_games['app_id'], 'inner').select('user','user_id','item','rating')
dic_real_user_id = df_user_inventory.select('user','user_id').toPandas().set_index('user')['user_id'].to_dict()
als = ALS(rank = 10)
model = als.fit(df_user_inventory)
recommended_games = model.recommendForAllUsers(10)
dic_recomended_als_based = {}
for user, lst_recommended_games in recommended_games.select('user', 'recommendations.item').toPandas().set_index('user')['item'].to_dict().items():
user_id = dic_real_user_id.get(user)
dic_recomended_als_based[user_id] = {}
for i, app_id in enumerate(lst_recommended_games):
dic_recomended_als_based[user_id].update({i:app_id})
df_als_based_result = pd.DataFrame.from_dict(dic_recomended_als_based, 'index')
df_als_based_result.index.name = 'user_id'
df_als_based_result.reset_index(inplace=True)
df_als_based_result.to_sql('recommended_games_als_based', engine, if_exists='replace', chunksize = 1000, index = False)
def build_recommendation():
config = yaml.safe_load(open('config.yaml'))
db_username = config['mysql']['username']
db_password = config['mysql']['password']
db_endpoint = config['mysql']['endpoint']
db_database = config['mysql']['database']
engine = create_engine('mysql+pymysql://{}:{}@{}/{}?charset=utf8mb4'.format(db_username, db_password, db_endpoint, db_database))
recommendation_popularity_based(engine)
recommendation_content_based(engine)
recommendation_item_based(engine)
recommendation_als_based(engine)
``` |
{
"source": "JJ/zap-api-python",
"score": 2
} |
#### File: src/zapv2/__init__.py
```python
__docformat__ = 'restructuredtext'
__version__ = '0.0.17'
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from .accessControl import accessControl
from .acsrf import acsrf
from .alert import alert
from .alertFilter import alertFilter
from .ascan import ascan
from .ajaxSpider import ajaxSpider
from .authentication import authentication
from .authorization import authorization
from .autoupdate import autoupdate
from .brk import brk
from .context import context
from .core import core
from .exportreport import exportreport
from .forcedUser import forcedUser
from .httpSessions import httpSessions
from .importLogFiles import importLogFiles
from .importurls import importurls
from .localProxies import localProxies
from .openapi import openapi
from .params import params
from .pnh import pnh
from .pscan import pscan
from .replacer import replacer
from .reveal import reveal
from .revisit import revisit
from .ruleConfig import ruleConfig
from .script import script
from .search import search
from .selenium import selenium
from .sessionManagement import sessionManagement
from .soap import soap
from .spider import spider
from .stats import stats
from .users import users
from .wappalyzer import wappalyzer
from .websocket import websocket
class ZAPv2(object):
"""
Client API implementation for integrating with ZAP v2.
"""
base = 'http://zap/JSON/'
base_other = 'http://zap/OTHER/'
def __init__(self, proxies=None, apikey=None, validate_status_code=False):
"""
Creates an instance of the ZAP api client.
:Parameters:
- `proxies`: dictionary of ZAP proxies to use.
Note that all of the other classes in this directory are generated
new ones will need to be manually added to this file
"""
self.__proxies = proxies or {
'http': 'http://127.0.0.1:8080',
'https': 'http://127.0.0.1:8080'
}
self.__apikey = apikey
self.__validate_status_code=validate_status_code
self.accessControl = accessControl(self)
self.acsrf = acsrf(self)
self.alert = alert(self)
self.alertFilter = alertFilter(self)
self.ajaxSpider = ajaxSpider(self)
self.ascan = ascan(self)
self.authentication = authentication(self)
self.authorization = authorization(self)
self.autoupdate = autoupdate(self)
self.brk = brk(self)
self.context = context(self)
self.core = core(self)
self.exportreport = exportreport(self)
self.forcedUser = forcedUser(self)
self.httpsessions = httpSessions(self)
self.importLogFiles = importLogFiles(self)
self.importurls = importurls(self)
self.localProxies = localProxies(self)
self.openapi = openapi(self)
self.params = params(self)
self.pnh = pnh(self)
self.pscan = pscan(self)
self.replacer = replacer(self)
self.reveal = reveal(self)
self.revisit = revisit(self)
self.ruleConfig = ruleConfig(self)
self.script = script(self)
self.search = search(self)
self.selenium = selenium(self)
self.sessionManagement = sessionManagement(self)
self.soap = soap(self)
self.spider = spider(self)
self.stats = stats(self)
self.users = users(self)
self.wappalyzer = wappalyzer(self)
self.websocket = websocket(self)
# not very nice, but prevents warnings when accessing the ZAP API via https
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Currently create a new session for each request to prevent request failing
# e.g. when polling the spider status
#self.session = requests.Session()
#if apikey is not None:
# self.session.headers['X-ZAP-API-Key'] = apikey
def urlopen(self, url, *args, **kwargs):
"""
Opens a url forcing the proxies to be used.
:Parameters:
- `args`: all non-keyword arguments.
- `kwargs`: all other keyword arguments.
"""
# Must never leak the API key via proxied requests
return requests.get(url, proxies=self.__proxies, verify=False, *args, **kwargs).text
def _request_api(self, url, query=None):
"""
Shortcut for an API request. Will always add the apikey (if defined)
:Parameters:
- `url`: the url to GET at.
"""
if not url.startswith('http://zap/'):
# Only allow requests to the API so that we never leak the apikey
raise ValueError('A non ZAP API url was specified ' + url)
# In theory we should be able to reuse the session,
# but there have been problems with that
self.session = requests.Session()
if self.__apikey is not None:
self.session.headers['X-ZAP-API-Key'] = self.__apikey
query = query or {}
if self.__apikey is not None:
# Add the apikey to get params for backwards compatibility
if not query.get('apikey'):
query['apikey'] = self.__apikey
response = self.session.get(url, params=query, proxies=self.__proxies, verify=False)
if (self.__validate_status_code and response.status_code >= 300 and response.status_code < 500):
raise Exception("Non-successfull status code returned from ZAP, which indicates a bad request: "
+ str(response.status_code)
+ "response: " + response.text )
elif (self.__validate_status_code and response.status_code >= 500):
raise Exception("Non-successfull status code returned from ZAP, which indicates a ZAP internal error: "
+ str(response.status_code)
+ "response: " + response.text )
return response
def _request(self, url, get=None):
"""
Shortcut for a GET request.
:Parameters:
- `url`: the url to GET at.
- `get`: the dictionary to turn into GET variables.
"""
data = self._request_api(url, get)
return data.json()
def _request_other(self, url, get=None):
"""
Shortcut for an API OTHER GET request.
:Parameters:
- `url`: the url to GET at.
- `get`: the dictionary to turn into GET variables.
"""
data = self._request_api(url, get)
return data.text
``` |
{
"source": "jjzhang166/chemfiles_cfiles",
"score": 3
} |
#### File: chemfiles_cfiles/tests/density.py
```python
from testrun import cfiles
import os
TRAJECTORY = os.path.join(os.path.dirname(__file__), "data", "nt.xyz")
OUTPUT = "tmp.dat"
def read_data(path):
data = []
with open(path) as fd:
for line in fd:
if line.startswith("#"):
continue
dist, value = map(float, line.split())
data.append((dist, value))
return data
def check_density(data):
for (radius, value) in data:
# Check the inside of the nanotube is empty
if (radius < 5.0):
assert(value == 0)
# Check density is zero between Oint and Si
if (radius > 7.1 and radius < 7.7):
assert(value == 0)
def check_max(data):
# Check the maximal value
max_value = max(data, key=lambda u: u[1])
assert(max_value[0] > 9.5)
assert(max_value[0] < 9.9)
def density(selection):
out, err = cfiles(
"density",
"-c", "24:24:25.458:90:90:120",
"--max=20",
"--points=200",
"--radial=Z",
"-s", selection,
TRAJECTORY, "-o", OUTPUT
)
assert(out == "")
assert(err == "")
data = read_data(OUTPUT)
check_density(data)
return data
if __name__ == '__main__':
tot = density("atoms: all")
al = density("atoms: type Al")
si = density("atoms: type Si")
o = density("atoms: type O")
h = density("atoms: type H")
# Check tot is the sum of all the elements
for radius in range(200):
assert(tot[radius][1] - (al[radius][1] + si[radius][1] + o[radius][1] + h[radius][1]) < 0.1)
os.unlink(OUTPUT)
``` |
{
"source": "jjzhang166/chemfiles",
"score": 2
} |
#### File: scripts/ci/check-public-headers.py
```python
import os
import sys
import re
ROOT = os.path.join(os.path.dirname(__file__), "..", "..")
GENERATED_HEADERS = ["chemfiles/config.hpp", "chemfiles/exports.hpp"]
ERRORS = 0
WHITELIST = [
# standard C99 headers
"stdbool.h", "stdint.h",
# standard C++11 headers
"iterator", "functional", "cstdint", "array", "utility", "cassert",
"string", "memory", "exception", "limits", "algorithm", "stdexcept",
"vector", "cmath", "type_traits", "unordered_map",
# chemfiles helper headers
"chemfiles/span.hpp",
"chemfiles/optional.hpp",
"chemfiles/exports.hpp",
"chemfiles/config.hpp",
"chemfiles/sorted_set.hpp",
"chemfiles/unreachable.hpp",
# chemfiles main headers
"chemfiles/generic.hpp",
"chemfiles/types.hpp",
"chemfiles/Atom.hpp",
"chemfiles/Frame.hpp",
"chemfiles/Error.hpp",
"chemfiles/Residue.hpp",
"chemfiles/Property.hpp",
"chemfiles/Topology.hpp",
"chemfiles/UnitCell.hpp",
"chemfiles/Trajectory.hpp",
"chemfiles/Selections.hpp",
"chemfiles/Connectivity.hpp",
# chemfiles capi headers
"chemfiles/capi/atom.h",
"chemfiles/capi/selection.h",
"chemfiles/capi/trajectory.h",
"chemfiles/capi/residue.h",
"chemfiles/capi/property.h",
"chemfiles/capi/cell.h",
"chemfiles/capi/frame.h",
"chemfiles/capi/types.h",
"chemfiles/capi/topology.h",
"chemfiles/capi/misc.h",
]
def error(message):
global ERRORS
ERRORS += 1
print(message)
def included_headers(path):
includes = set()
with open(path) as fd:
for line in fd:
if "#include" in line:
matched = re.match("#include\s*[\"<](.*)[\">]", line)
if not matched:
error("bad include in {}: {}".format(path, line))
header = matched.groups()[0]
includes.add(header)
if header.startswith("chemfiles"):
if header not in GENERATED_HEADERS:
path = os.path.join(ROOT, "include", header)
includes.update(included_headers(path))
return includes
def check_allowded(headers):
for header in headers:
if header not in WHITELIST:
error("private header {} is publicly reachable".format(header))
if __name__ == '__main__':
headers = included_headers(os.path.join(ROOT, "include", "chemfiles.h"))
check_allowded(headers)
headers = included_headers(os.path.join(ROOT, "include", "chemfiles.hpp"))
check_allowded(headers)
if ERRORS != 0:
sys.exit(1)
``` |
{
"source": "jjzhang166/dface",
"score": 2
} |
#### File: src/core/models.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
def weights_init(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_uniform(m.weight.data)
nn.init.constant(m.bias, 0.1)
class LossFn:
def __init__(self, cls_factor=1, box_factor=1, landmark_factor=1):
# loss function
self.cls_factor = cls_factor
self.box_factor = box_factor
self.land_factor = landmark_factor
self.loss_cls = nn.BCELoss()
self.loss_box = nn.MSELoss()
self.loss_landmark = nn.MSELoss()
def cls_loss(self,gt_label,pred_label):
pred_label = torch.squeeze(pred_label)
gt_label = torch.squeeze(gt_label)
# get the mask element which >= 0, only 0 and 1 can effect the detection loss
mask = torch.ge(gt_label,0)
valid_gt_label = torch.masked_select(gt_label,mask)
valid_pred_label = torch.masked_select(pred_label,mask)
return self.loss_cls(valid_pred_label,valid_gt_label)*self.cls_factor
def box_loss(self,gt_label,gt_offset,pred_offset):
pred_offset = torch.squeeze(pred_offset)
gt_offset = torch.squeeze(gt_offset)
gt_label = torch.squeeze(gt_label)
#get the mask element which != 0
unmask = torch.eq(gt_label,0)
mask = torch.eq(unmask,0)
#convert mask to dim index
chose_index = torch.nonzero(mask.data)
chose_index = torch.squeeze(chose_index)
#only valid element can effect the loss
valid_gt_offset = gt_offset[chose_index,:]
valid_pred_offset = pred_offset[chose_index,:]
return self.loss_box(valid_pred_offset,valid_gt_offset)*self.box_factor
def landmark_loss(self,gt_label,gt_landmark,pred_landmark):
pred_landmark = torch.squeeze(pred_landmark)
gt_landmark = torch.squeeze(gt_landmark)
gt_label = torch.squeeze(gt_label)
mask = torch.eq(gt_label,-2)
chose_index = torch.nonzero(mask.data)
chose_index = torch.squeeze(chose_index)
valid_gt_landmark = gt_landmark[chose_index, :]
valid_pred_landmark = pred_landmark[chose_index, :]
return self.loss_landmark(valid_pred_landmark,valid_gt_landmark)*self.land_factor
class PNet(nn.Module):
''' PNet '''
def __init__(self, is_train=False, use_cuda=True):
super(PNet, self).__init__()
self.is_train = is_train
self.use_cuda = use_cuda
# backend
self.pre_layer = nn.Sequential(
nn.Conv2d(3, 10, kernel_size=3, stride=1), # conv1
nn.PReLU(), # PReLU1
nn.MaxPool2d(kernel_size=2, stride=2), # pool1
nn.Conv2d(10, 16, kernel_size=3, stride=1), # conv2
nn.PReLU(), # PReLU2
nn.Conv2d(16, 32, kernel_size=3, stride=1), # conv3
nn.PReLU() # PReLU3
)
# detection
self.conv4_1 = nn.Conv2d(32, 1, kernel_size=1, stride=1)
# bounding box regresion
self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1, stride=1)
# landmark localization
self.conv4_3 = nn.Conv2d(32, 10, kernel_size=1, stride=1)
# weight initiation with xavier
self.apply(weights_init)
def forward(self, x):
x = self.pre_layer(x)
label = F.sigmoid(self.conv4_1(x))
offset = self.conv4_2(x)
# landmark = self.conv4_3(x)
if self.is_train is True:
# label_loss = LossUtil.label_loss(self.gt_label,torch.squeeze(label))
# bbox_loss = LossUtil.bbox_loss(self.gt_bbox,torch.squeeze(offset))
return label,offset
#landmark = self.conv4_3(x)
return label, offset
class RNet(nn.Module):
''' RNet '''
def __init__(self,is_train=False, use_cuda=True):
super(RNet, self).__init__()
self.is_train = is_train
self.use_cuda = use_cuda
# backend
self.pre_layer = nn.Sequential(
nn.Conv2d(3, 28, kernel_size=3, stride=1), # conv1
nn.PReLU(), # prelu1
nn.MaxPool2d(kernel_size=3, stride=2), # pool1
nn.Conv2d(28, 48, kernel_size=3, stride=1), # conv2
nn.PReLU(), # prelu2
nn.MaxPool2d(kernel_size=3, stride=2), # pool2
nn.Conv2d(48, 64, kernel_size=2, stride=1), # conv3
nn.PReLU() # prelu3
)
self.conv4 = nn.Linear(64*2*2, 128) # conv4
self.prelu4 = nn.PReLU() # prelu4
# detection
self.conv5_1 = nn.Linear(128, 1)
# bounding box regression
self.conv5_2 = nn.Linear(128, 4)
# lanbmark localization
self.conv5_3 = nn.Linear(128, 10)
# weight initiation weih xavier
self.apply(weights_init)
def forward(self, x):
# backend
x = self.pre_layer(x)
x = x.view(x.size(0), -1)
x = self.conv4(x)
x = self.prelu4(x)
# detection
det = torch.sigmoid(self.conv5_1(x))
box = self.conv5_2(x)
# landmark = self.conv5_3(x)
if self.is_train is True:
return det, box
#landmard = self.conv5_3(x)
return det, box
class ONet(nn.Module):
''' RNet '''
def __init__(self,is_train=False, use_cuda=True):
super(ONet, self).__init__()
self.is_train = is_train
self.use_cuda = use_cuda
# backend
self.pre_layer = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=1), # conv1
nn.PReLU(), # prelu1
nn.MaxPool2d(kernel_size=3, stride=2), # pool1
nn.Conv2d(32, 64, kernel_size=3, stride=1), # conv2
nn.PReLU(), # prelu2
nn.MaxPool2d(kernel_size=3, stride=2), # pool2
nn.Conv2d(64, 64, kernel_size=3, stride=1), # conv3
nn.PReLU(), # prelu3
nn.MaxPool2d(kernel_size=2,stride=2), # pool3
nn.Conv2d(64,128,kernel_size=2,stride=1), # conv4
nn.PReLU() # prelu4
)
self.conv5 = nn.Linear(128*2*2, 256) # conv5
self.prelu5 = nn.PReLU() # prelu5
# detection
self.conv6_1 = nn.Linear(256, 1)
# bounding box regression
self.conv6_2 = nn.Linear(256, 4)
# lanbmark localization
self.conv6_3 = nn.Linear(256, 10)
# weight initiation weih xavier
self.apply(weights_init)
def forward(self, x):
# backend
x = self.pre_layer(x)
x = x.view(x.size(0), -1)
x = self.conv5(x)
x = self.prelu5(x)
# detection
det = torch.sigmoid(self.conv6_1(x))
box = self.conv6_2(x)
landmark = self.conv6_3(x)
if self.is_train is True:
return det, box, landmark
#landmard = self.conv5_3(x)
return det, box, landmark
``` |
{
"source": "jjzhang166/FIT-Projects",
"score": 3
} |
#### File: IPP/2.CHA/cha.py
```python
import sys
import argparse
#CHA:xmikus15
import Errors
import os
from HeaderParser import HeaderParser
class ArgumentParser(argparse.ArgumentParser):
def error(self, message):
self.exit(Errors.BadParams, '%s: error: %s\n' % (self.prog, message))
class UniqueStore(argparse.Action):
def __call__(self, parser, namespace, values, option_string):
if getattr(namespace, self.dest, self.default) is not None:
parser.exit(Errors.BadParams,option_string + " appears several times.\n")
setattr(namespace, self.dest, values)
argParser = ArgumentParser(add_help=False)
argParser.add_argument('--help', action=UniqueStore, nargs=0)
argParser.add_argument('--input', action=UniqueStore)
argParser.add_argument('--output', action=UniqueStore)
argParser.add_argument('--pretty-xml',action=UniqueStore, type=int, nargs='?', const = 4)
argParser.add_argument('--no-inline',action=UniqueStore, nargs=0)
argParser.add_argument('--max-par', action=UniqueStore, type=int)
argParser.add_argument('--no-duplicates',action=UniqueStore, nargs=0)
argParser.add_argument('--remove-whitespace',action=UniqueStore, nargs=0)
args = vars(argParser.parse_args())
if args['help'] != None and len([x for x in args if args[x] != None]) > 1:
sys.exit(Errors.BadParams)
elif args['help'] != None:
argParser.print_help()
sys.exit(Errors.NoError)
""" If no input was specified, traverse in current directory """
if args['input'] == None:
args['input'] = './'
if args['output'] != None:
try:
out=open(args['output'],'w',encoding='utf8')
except IOError:
sys.exit(Errors.OutFile)
else:
out = sys.stdout
filesName = []
try:
if os.path.isfile(args['input']):
_dir = ""
filesName.append(args['input'])
elif os.path.isdir(os.path.expanduser(args['input'])):
_dir = os.path.expanduser(args['input'])
for root, dirs,files in os.walk(_dir):
for name in files:
if name.endswith(".h"):
filesName.append(os.path.join(root,name))
else:
print("File/dir not found",file=sys.stderr)
sys.exit(Errors.FileNotFound)
out.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>"+"\n"*(args['pretty_xml'] != None))
out.write("<functions dir=\"%s\">" % _dir + "\n"*(args['pretty_xml'] != None))
for _file in filesName:
header = HeaderParser(_file,args)
header.Parse()
header.ParseFunctions()
_file = _file[len(_dir):]
for function in header.functions:
if args['pretty_xml']:
out.write(" "*args['pretty_xml'])
out.write("<function file=\"%s\" name=\"%s\" varargs=\"%s\" rettype=\"%s\">" % (_file,function.name.strip(),function.vargs,function.returnType.strip()) + "\n"*(args['pretty_xml'] != None))
i = 1
for param in function.parameters:
if args['pretty_xml']:
out.write(" "*args['pretty_xml'])
out.write("<param number=\"%d\" type=\"%s\" />" % (i,param.strip()) + "\n"*(args['pretty_xml'] != None))
i += 1
if args['pretty_xml']:
out.write(" "*args['pretty_xml'])
out.write("</function>" + "\n"*(args['pretty_xml'] != None))
out.write("</functions>\n")
except (OSError) as e:
print(str(e),file=sys.stderr)
sys.exit(Errors.FileNotFound)
``` |
{
"source": "jjzhang166/libtorrent",
"score": 2
} |
#### File: bindings/python/test.py
```python
import libtorrent as lt
import unittest
import time
import os
import shutil
import binascii
# test torrent_info
class test_torrent_info(unittest.TestCase):
def test_bencoded_constructor(self):
info = lt.torrent_info({ 'info': {'name': 'test_torrent', 'length': 1234,
'piece length': 16 * 1024,
'pieces': 'aaaaaaaaaaaaaaaaaaaa'}})
self.assertEqual(info.num_files(), 1)
f = info.files()
self.assertEqual(f.file_path(0), 'test_torrent')
self.assertEqual(f.file_size(0), 1234)
self.assertEqual(info.total_size(), 1234)
class test_alerts(unittest.TestCase):
def test_alert(self):
ses = lt.session({'alert_mask': lt.alert.category_t.all_categories})
shutil.copy(os.path.join('..', '..', 'test', 'test_torrents', 'base.torrent'), '.')
ti = lt.torrent_info('base.torrent');
h = ses.add_torrent({'ti': ti, 'save_path': os.getcwd()})
st = h.status()
time.sleep(1)
ses.remove_torrent(h)
ses.wait_for_alert(1000) # milliseconds
alerts = ses.pop_alerts()
for a in alerts:
print(a.message())
print(st.next_announce)
self.assertEqual(st.name, 'temp')
print(st.errc.message())
print(st.pieces)
print(st.last_seen_complete)
print(st.completed_time)
print(st.progress)
print(st.num_pieces)
print(st.distributed_copies)
print(st.paused)
print(st.info_hash)
self.assertEqual(st.save_path, os.getcwd())
class test_bencoder(unittest.TestCase):
def test_bencode(self):
encoded = lt.bencode({'a': 1, 'b': [1,2,3], 'c': 'foo'})
self.assertEqual(encoded, b'd1:ai1e1:bli1ei2ei3ee1:c3:fooe')
def test_bdecode(self):
encoded = b'd1:ai1e1:bli1ei2ei3ee1:c3:fooe'
decoded = lt.bdecode(encoded)
self.assertEqual(decoded, {b'a': 1, b'b': [1,2,3], b'c': b'foo'})
class test_sha1hash(unittest.TestCase):
def test_sha1hash(self):
h = 'a0'*20
s = lt.sha1_hash(binascii.unhexlify(h))
self.assertEqual(h, str(s))
class test_session(unittest.TestCase):
def test_post_session_stats(self):
s = lt.session({'alert_mask': lt.alert.category_t.stats_notification})
s.post_session_stats()
a = s.wait_for_alert(1000)
self.assertTrue(isinstance(a, lt.session_stats_alert))
self.assertTrue(isinstance(a.values, dict))
self.assertTrue(len(a.values) > 0)
if __name__ == '__main__':
print(lt.__version__)
unittest.main()
``` |
{
"source": "jjzhang166/minerva",
"score": 2
} |
#### File: apps/lstm/lstm-lm-example.py
```python
from collections import defaultdict
import sys
import math
import time
import numpy as np
from scipy import linalg
from scipy.special import expit # Vectorized sigmoid function
import owl
from owl.conv import *
import owl.elewise as ele
class LSTMModel:
def initw(n, d):
magic_number = 0.3
npa = (np.random.rand(n, d) * 2 - 1) * magic_number # U[-0.1, 0.1]
return owl.from_numpy(npa).trans()
def __init__(self, vocab_size, input_size, hidden_size):
output_size = vocab_size
self.Layers = [input_size, hidden_size, output_size]
print 'Model size:', self.Layers
# Recurrent weights: take x_t, h_{t-1}, and bias unit
# and produce the 3 gates and the input to cell signal
# self.WIFOG = owl.randn([self.Layers[0] + self.Layers[1], self.Layers[1] * 4], 0.0, 0.1)
# self.BIFOG = owl.zeros([self.Layers[1] * 4, 1])
self.ig_weight_data = owl.randn([self.Layers[1], self.Layers[0]], 0.0, 0.1)
self.fg_weight_data = owl.randn([self.Layers[1], self.Layers[0]], 0.0, 0.1)
self.og_weight_data = owl.randn([self.Layers[1], self.Layers[0]], 0.0, 0.1)
self.ff_weight_data = owl.randn([self.Layers[1], self.Layers[0]], 0.0, 0.1)
self.ig_weight_prev = owl.randn([self.Layers[1], self.Layers[1]], 0.0, 0.1)
self.fg_weight_prev = owl.randn([self.Layers[1], self.Layers[1]], 0.0, 0.1)
self.og_weight_prev = owl.randn([self.Layers[1], self.Layers[1]], 0.0, 0.1)
self.ff_weight_prev = owl.randn([self.Layers[1], self.Layers[1]], 0.0, 0.1)
self.ig_weight_cell = owl.randn([self.Layers[1], self.Layers[1]], 0.0, 0.1)
self.fg_weight_cell = owl.randn([self.Layers[1], self.Layers[1]], 0.0, 0.1)
self.og_weight_cell = owl.randn([self.Layers[1], self.Layers[1]], 0.0, 0.1)
self.ff_weight_cell = owl.randn([self.Layers[1], self.Layers[1]], 0.0, 0.1)
self.ig_weight_bias = owl.zeros([self.Layers[1], 1])
self.fg_weight_bias = owl.zeros([self.Layers[1], 1])
self.og_weight_bias = owl.zeros([self.Layers[1], 1])
self.ff_weight_bias = owl.zeros([self.Layers[1], 1])
# Decoder weights (e.g. mapping to vocabulary)
self.decoder_weights = owl.randn([self.Layers[2], self.Layers[1]], 0.0, 0.1) # decoder
self.decoder_bias = owl.zeros([output_size, 1])
self.emb_weight = [None] * vocab_size
for i in range(vocab_size):
self.emb_weight[i] = owl.randn([input_size, 1], 0.0, 0.1)
def LSTM_init():
# First read in the input
wids = defaultdict(lambda: len(wids))
wids['<bos>'] = 0 # begin of sentence
wids['<eos>'] = 1 # end of sentence
train_sents = []
test_sents = []
train_words = 0
test_words = 0
fin_train = open("./train")
for line in fin_train:
wordlist = ("<bos> %s <eos>" % line.strip()).split(' ')
wordlist_id = [wids[w] for w in wordlist]
train_words += len(wordlist) - 2
train_sents.append(wordlist_id)
fin_test = open("./test")
for line in fin_test:
wordlist = ("<bos> %s <eos>" % line.strip()).split(' ')
wordlist_id = []
for w in wordlist:
if wids.has_key(w):
wordlist_id.append(wids[w])
test_words += 1
test_sents.append(wordlist_id)
# Define input-dependent variables
N = 100 # hidden units
D = N # embedding
vocab_size = len(wids) # Vocabulary size
print "K", vocab_size, "words", train_words, test_words
return LSTMModel(vocab_size, D, N), train_sents, test_sents, train_words, test_words
def LSTM_train(model, sents, words, learning_rate, EPOCH, tanhC_version = 1):
# Constants
N = model.Layers[1] # Number of units
K = model.Layers[2] # Vocabulary size
last_time = time.time()
# For each epoch
for epoch_id in range(1, EPOCH + 1):
epoch_ll = 0
# For each sentence
for sent_id, sent in enumerate(sents):
#print sent_id
#print "sent", sent
#print "sents", sents
##### Initialize activations #####
Tau = len(sent)
sent_ll = 0 # Sentence log likelihood
data = [None] * Tau
Hout = [None] * Tau
Hout[0] = owl.zeros([N, 1])
act_ig = [None] * Tau
act_fg = [None] * Tau
act_og = [None] * Tau
act_ff = [None] * Tau
C = [None] * Tau
C[0] = owl.zeros([N, 1])
dY = [None] * Tau
dBd = owl.zeros([model.Layers[2], 1]) #dY.sum(0)
dWd = owl.zeros([model.Layers[2], model.Layers[1]])
dHout = [None] * Tau #dY.dot(model.decoder_weights.transpose())
dEmb = [None] * Tau
##### Forward pass #####
# For each time step
for t in range(1, Tau):
# predict the (t+1)'th word from the t'th word
data[t] = model.emb_weight[sent[t - 1]]
NVector = np.zeros((K, 1))
NVector[sent[t]] = 1
target = owl.from_numpy(NVector).trans()
act_ig[t] = model.ig_weight_data * data[t] + model.ig_weight_prev * Hout[t - 1] + model.ig_weight_cell * C[t - 1] + model.ig_weight_bias
act_ig[t] = ele.sigm(act_ig[t])
act_fg[t] = model.fg_weight_data * data[t] + model.fg_weight_prev * Hout[t - 1] + model.fg_weight_cell * C[t - 1] + model.fg_weight_bias
act_fg[t] = ele.sigm(act_fg[t])
act_ff[t] = model.ff_weight_data * data[t] + model.ff_weight_prev * Hout[t - 1] + model.ff_weight_bias
act_ff[t] = ele.tanh(act_ff[t])
C[t] = ele.mult(act_ig[t], act_ff[t]) + ele.mult(act_fg[t], C[t - 1])
act_og[t] = model.og_weight_data * data[t] + model.og_weight_prev * Hout[t - 1] + model.og_weight_cell * C[t] + model.og_weight_bias
act_og[t] = ele.sigm(act_og[t])
if tanhC_version:
Hout[t] = ele.mult(act_og[t], ele.tanh(C[t]))
else:
Hout[t] = ele.mult(act_og[t], C[t])
Y = softmax(model.decoder_weights * Hout[t] + model.decoder_bias)
# BP to Hout
dY[t] = Y - target
dBd += dY[t]
dWd += dY[t] * Hout[t].trans()
dHout[t] = model.decoder_weights.trans() * dY[t]
# evaluation
output = Y.to_numpy() # Can directly get a single element from Y
# print output[0, sent[t]]
sent_ll += math.log(max(output[0, sent[t]],1e-20), 2)
#print "Y_0[t]",Y_o[t]
#print "Y_o[t][sent[t]]",Y_o[t][sent[t]]
#print np.sum(output.to_numpy())
# output = Ym[t].trans() * data[t]
# sent_ll += math.log10( max(np.sum(output.to_numpy()),1e-20) )
##### Initialize gradient vectors #####
weight_update_ig_data = owl.zeros([model.Layers[1], model.Layers[0]])
weight_update_ig_prev = owl.zeros([model.Layers[1], model.Layers[1]])
weight_update_ig_cell = owl.zeros([model.Layers[1], model.Layers[1]])
weight_update_ig_bias = owl.zeros([model.Layers[1], 1])
weight_update_fg_data = owl.zeros([model.Layers[1], model.Layers[0]])
weight_update_fg_prev = owl.zeros([model.Layers[1], model.Layers[1]])
weight_update_fg_cell = owl.zeros([model.Layers[1], model.Layers[1]])
weight_update_fg_bias = owl.zeros([model.Layers[1], 1])
weight_update_og_data = owl.zeros([model.Layers[1], model.Layers[0]])
weight_update_og_prev = owl.zeros([model.Layers[1], model.Layers[1]])
weight_update_og_cell = owl.zeros([model.Layers[1], model.Layers[1]])
weight_update_og_bias = owl.zeros([model.Layers[1], 1])
weight_update_ff_data = owl.zeros([model.Layers[1], model.Layers[0]])
weight_update_ff_prev = owl.zeros([model.Layers[1], model.Layers[1]])
weight_update_ff_bias = owl.zeros([model.Layers[1], 1])
dC = [None] * Tau
for t in xrange(Tau):
dC[t] = owl.zeros(C[t].shape)
# Calculate the error and add it
for t in reversed(range(1, Tau)):
#print "sent",sent
#print "t",t
# BP from og controled gate and og
if tanhC_version:
tanhC = ele.tanh(C[t])
dTanhC = ele.mult(dHout[t], act_og[t])
sen_og = ele.mult(dHout[t], tanhC)
dC[t] += ele.mult((1 - ele.mult(tanhC, tanhC)), dTanhC)
else:
sen_og = ele.mult(C[t], dHout[t])
dC[t] += ele.mult(act_og[t], dHout[t])
# BP from og
sen_og = ele.mult(ele.mult(act_og[t], (1.0 - act_og[t])), sen_og)
dHout[t - 1] = model.og_weight_prev.trans() * sen_og
dC[t] += model.og_weight_cell.trans() * sen_og
dEmb[t] = model.og_weight_data.trans() * sen_og
# BP from fg controled gate
sen_fg = ele.mult(C[t - 1], dC[t])
dC[t - 1] += ele.mult(act_fg[t], dC[t])
# BP from ig controled gate
sen_ig = ele.mult(act_ff[t], dC[t])
sen_ff = ele.mult(act_ig[t], dC[t])
sen_ff = ele.mult((1 - ele.mult(act_ff[t], act_ff[t])), sen_ff)
dEmb[t] += model.ff_weight_data.trans() * sen_ff
# BP from fg
sen_fg = ele.mult(ele.mult(act_fg[t], (1.0 - act_fg[t])), sen_fg)
dHout[t - 1] += model.fg_weight_prev.trans() * sen_fg
dC[t - 1] += model.fg_weight_cell.trans() * sen_fg
dEmb[t] += model.fg_weight_data.trans() * sen_fg
# BP from ig
sen_ig = ele.mult(ele.mult(act_ig[t], (1.0 - act_ig[t])), sen_ig)
dHout[t - 1] += model.ig_weight_prev.trans() * sen_ig
dC[t - 1] += model.ig_weight_cell.trans() * sen_ig
dEmb[t] += model.ig_weight_data.trans() * sen_ig
# derivatives on weight matrix and bias
weight_update_ig_data += sen_ig * data[t].trans()
weight_update_ig_prev += sen_ig * Hout[t - 1].trans()
weight_update_ig_cell += sen_ig * C[t - 1].trans()
weight_update_ig_bias += sen_ig
weight_update_fg_data += sen_fg * data[t].trans()
weight_update_fg_prev += sen_fg * Hout[t - 1].trans()
weight_update_fg_cell += sen_fg * C[t - 1].trans()
weight_update_fg_bias += sen_fg
weight_update_og_data += sen_og * data[t].trans()
weight_update_og_prev += sen_og * Hout[t - 1].trans()
weight_update_og_cell += sen_og * C[t].trans()
weight_update_og_bias += sen_og
weight_update_ff_data += sen_ff * data[t].trans()
weight_update_ff_prev += sen_ff * Hout[t - 1].trans()
weight_update_ff_bias += sen_ff
# normalize the gradients
rate = learning_rate / Tau
# weight update
model.ig_weight_prev -= rate * weight_update_ig_prev
model.ig_weight_data -= rate * weight_update_ig_data
model.ig_weight_cell -= rate * weight_update_ig_cell
model.ig_weight_bias -= rate * weight_update_ig_bias
model.fg_weight_prev -= rate * weight_update_fg_prev
model.fg_weight_data -= rate * weight_update_fg_data
model.fg_weight_cell -= rate * weight_update_fg_cell
model.fg_weight_bias -= rate * weight_update_fg_bias
model.og_weight_prev -= rate * weight_update_og_prev
model.og_weight_data -= rate * weight_update_og_data
model.og_weight_cell -= rate * weight_update_og_cell
model.og_weight_bias -= rate * weight_update_og_bias
model.ff_weight_prev -= rate * weight_update_ff_prev
model.ff_weight_data -= rate * weight_update_ff_data
model.ff_weight_bias -= rate * weight_update_ff_bias
model.decoder_weights -= rate * dWd
model.decoder_bias -= rate * dBd
for t in range(1, Tau):
model.emb_weight[sent[t - 1]] -= rate * dEmb[t]
# Print results
epoch_ll += sent_ll
# print(" Sentence %d LL: %f" % (sent_id, sent_ll))
epoch_ent = epoch_ll * (-1) / words
epoch_ppl = 2 ** epoch_ent
cur_time = time.time()
print("Epoch %d (alpha=%f) PPL=%f" % (epoch_id, learning_rate, epoch_ppl))
print " time consumed:", cur_time - last_time
last_time = cur_time
return model, learning_rate
def LSTM_test(model, sents, words, tanhC_version = 1):
N = model.Layers[1]
K = model.Layers[2]
test_ll = 0
# For each sentence
for sent_id, sent in enumerate(sents):
#print sent_id
#print "sent", sent
#print "sents", sents
##### Initialize activations #####
Tau = len(sent)
sent_ll = 0 # Sentence log likelihood
data = [None] * Tau
Hout = [None] * Tau
Hout[0] = owl.zeros([N, 1])
act_ig = [None] * Tau
act_fg = [None] * Tau
act_og = [None] * Tau
act_ff = [None] * Tau
C = [None] * Tau
C[0] = owl.zeros([N, 1])
##### Forward pass #####
# For each time step
for t in range(1, Tau):
# predict the (t+1)'th word from the t'th word
data[t] = model.emb_weight[sent[t - 1]]
act_ig[t] = model.ig_weight_data * data[t] + model.ig_weight_prev * Hout[t - 1] + model.ig_weight_cell * C[t - 1] + model.ig_weight_bias
act_ig[t] = ele.sigm(act_ig[t])
act_fg[t] = model.fg_weight_data * data[t] + model.fg_weight_prev * Hout[t - 1] + model.fg_weight_cell * C[t - 1] + model.fg_weight_bias
act_fg[t] = ele.sigm(act_fg[t])
act_ff[t] = model.ff_weight_data * data[t] + model.ff_weight_prev * Hout[t - 1] + model.ff_weight_bias
act_ff[t] = ele.tanh(act_ff[t])
C[t] = ele.mult(act_ig[t], act_ff[t]) + ele.mult(act_fg[t], C[t - 1])
act_og[t] = model.og_weight_data * data[t] + model.og_weight_prev * Hout[t - 1] + model.og_weight_cell * C[t] + model.og_weight_bias
act_og[t] = ele.sigm(act_og[t])
if tanhC_version:
Hout[t] = ele.mult(act_og[t], ele.tanh(C[t]))
else:
Hout[t] = ele.mult(act_og[t], C[t])
Y = softmax(model.decoder_weights * Hout[t] + model.decoder_bias)
# evaluation
output = Y.to_numpy() # Can directly get a single element from Y
# print output[0, sent[t]]
sent_ll += math.log(max(output[0, sent[t]],1e-20), 2)
test_ll += sent_ll
test_ent = test_ll * (-1) / words
test_ppl = 2 ** test_ent
print "Test PPL =", test_ppl
if __name__ == '__main__':
#gpu = owl.create_gpu_device(1)
cpu = owl.create_cpu_device()
owl.set_device(cpu)
model, train_sents, test_sents, train_words, test_words = LSTM_init()
learning_rate = 0.1
for i in range(5):
model, learning_rate = LSTM_train(model, train_sents, train_words, learning_rate, 1)
LSTM_test(model, test_sents, test_words)
```
#### File: apps/mnist/mnist_io.py
```python
import numpy as np
import scipy.io as si
def _extract(prefix, md, max_dig):
ret = []
for dig in range(max_dig):
samples = md[prefix + str(dig)]
labels = np.empty([samples.shape[0], 1], dtype=np.float32)
labels.fill(dig)
ret.append(np.hstack((samples.astype(np.float32) / 256, labels)))
return ret
def _split_sample_and_label(merged_mb):
[s, l] = np.hsplit(merged_mb, [merged_mb.shape[1] - 1])
# change label to sparse representation
n = merged_mb.shape[0]
ll = np.zeros([n, 10], dtype=np.float32)
ll[np.arange(n), l.astype(int).flat] = 1
return (s, ll)
def load_mb_from_mat(mat_file, mb_size):
# load from mat
md = si.loadmat(mat_file)
# merge all data
train_all = np.concatenate(_extract('train', md, 10))
test_all = np.concatenate(_extract('test', md, 10))
# shuffle
np.random.shuffle(train_all)
# make minibatch
train_mb = np.vsplit(train_all, range(mb_size, train_all.shape[0], mb_size))
train_data = map(_split_sample_and_label, train_mb)
test_data = _split_sample_and_label(test_all)
print 'Training data: %d mini-batches' % len(train_mb)
print 'Test data: %d samples' % test_all.shape[0]
print train_data[0][1].shape
return (train_data, test_data)
```
#### File: owl/owl/__init__.py
```python
import numpy as np
import libowl as _owl
NArray = _owl.NArray
_owl.initialize()
# def initialize():
# """ Initialize Minerva System with `sys.argv`
#
# .. note::
# Must be called before calling any owl's API
# """
# _owl.initialize()
def has_cuda():
""" Check if CUDA is enabled
:return: CUDA status
:rtype: int
"""
return _owl.has_cuda()
def wait_for_all():
""" Wait for all evaluation to complete
.. note::
The user thread (python) will be blocked until all previous operations are finished.
:return: None
"""
_owl.wait_for_all()
def create_cpu_device():
""" Create device for running on CPU cores
.. note::
At least one of :py:func:`create_cpu_device` or :py:func:`create_gpu_device` should be called
before using any ``owl`` APIs.
:return: A unique id for cpu device
:rtype: int
"""
return _owl.create_cpu_device()
def create_gpu_device(which):
""" Create device for running on GPU card
.. note::
At least one of :py:func:`create_cpu_device` or :py:func:`create_gpu_device` should be called
before using any ``owl`` APIs.
:param int which: which GPU card the code would be run on
:return: A unique id for the device on that GPU card
:rtype: int
"""
return _owl.create_gpu_device(which)
def get_gpu_device_count():
""" Get the number of compute-capable GPU devices
:return: Number of compute-capable GPU devices
:rtype: int
"""
return _owl.get_gpu_device_count()
def set_device(dev):
""" Switch to the given device for running computations
When ``set_device(dev)`` is called, all the subsequent codes will be run on ``dev``
till another ``set_device`` is called.
:param int dev: the id of the device (usually returned by create_xxx_device)
"""
_owl.set_device(dev)
def zeros(shape):
""" Create ndarray of zero values
:param shape: shape of the ndarray to create
:type shape: list int
:return: result ndarray
:rtype: owl.NArray
"""
return NArray.zeros(shape)
def ones(shape):
""" Create ndarray of one values
:param shape: shape of the ndarray to create
:type shape: list int
:return: result ndarray
:rtype: owl.NArray
"""
return NArray.ones(shape)
def randn(shape, mu, var):
""" Create a random ndarray using normal distribution
:param shape: shape of the ndarray to create
:type shape: list int
:param float mu: mu
:param float var: variance
:return: result ndarray
:rtype: owl.NArray
"""
return NArray.randn(shape, mu, var)
def randb(shape, prob):
""" Create a random ndarray using bernoulli distribution
:param shape: shape of the ndarray to create
:type shape: list int
:param float prob: probability for the value to be one
:return: result ndarray
:rtype: owl.NArray
"""
return NArray.randb(shape, prob)
def from_numpy(nparr):
""" Create an owl.NArray from numpy.ndarray
.. note::
The content will be directly copied to Minerva's memory system. However, due to
the different priority when treating dimensions between numpy and Minerva. The
result ``owl.NArray``'s dimension will be *reversed*.
>>> a = numpy.zeros([200, 300, 50])
>>> b = owl.from_numpy(a)
>>> print b.shape
[50, 300, 200]
.. seealso::
:py:func:`owl.NArray.to_numpy`
:param numpy.ndarray nparr: numpy ndarray
:return: Minerva's ndarray
:rtype: owl.NArray
"""
return NArray.from_numpy(np.require(nparr, dtype=np.float32, requirements=['C']))
def concat(narrays, concat_dim):
""" Concatenate NArrays according to concat_dim
:param narrays: inputs for concatenation
:type narrays: owl.NArray
:param concat_dim: the dimension to concate
:type concat_dim: int
:return: result of concatenation
:rtype: owl.NArray
"""
return NArray.concat(narrays, concat_dim)
def slice(src, slice_dim, st_off, slice_count):
""" Slice NArrays according to slice_dim
:param src: inputs for slice
:type src: owl.NArray
:param slice_dim: the dimension to slice
:type slice_dim: int
:param st_off: where to start slice
:type st_off: int
:param slice_count: how many data_chunk on slice_dim
:slice_count: int
:return: result of slicer
:rtype: owl.NArray
"""
return NArray.slice(src, slice_dim, st_off, slice_count)
# def print_profiler_result():
# """ Print result from execution profiler
#
# :return: None
# """
# _owl.print_profiler_result()
#
# def reset_profiler_result():
# """ Reset execution profiler
#
# :return: None
# """
# _owl.reset_profiler_result()
#
# def print_dag_to_file(fname):
# """ Print the current generated dag into the give filename
#
# :param fname: filename for printing the dag
# :type fname: str
# :return: None
# """
# _owl.print_dag_to_file(fname)
#
# def print_dot_dag_to_file(fname):
# """ Print the current generated dag into the give filename in dot format
#
# :param fname: filename for printing the dag
# :type fname: str
# :return: None
# """
# _owl.print_dot_dag_to_file(fname)
```
#### File: owl/net/net.py
```python
import numpy as np
import math
import Queue
import owl
import owl.elewise as ele
import owl.conv as co
from caffe import *
from netio import LMDBDataProvider
from netio import ImageListDataProvider
from netio import ImageWindowDataProvider
class ComputeUnit(object):
''' Interface for each compute unit.
In ``owl.net``, the network is graph (in fact a DAG) that is composed of ``ComputeUnit`` s.
``ComputeUnit`` is a wrap-up of Caffe's ``layer`` abstraction, but is more
general and flexible in its function sigature.
:ivar caffe.LayerParameter params: layer parameter in Caffe's proto structure
:ivar str name: name of the unit; the name must be unique
:ivar btm_names: names of the bottom units
:vartype btm_names: list str
:ivar top_names: names of the top units
:vartype top_names: list str
:ivar list int out_shape:
.. note::
``params``, ``name``, ``btm_names`` and ``top_names`` will be parsed from Caffe's network
description file. ``out_shape`` should be set in :py:meth:`compute_size`
'''
def __init__(self, params):
self.params = params
self.name = params.name
self.btm_names = []
self.top_names = []
self.out = None
self.out_shape = None
self.rec_on_ori = None
self.stride_on_ori = None
self.start_on_ori = None
def __str__(self):
return 'N/A unit'
def compute_size(self, from_btm, to_top):
''' Calculate the output size of this unit
This function will be called before training during the ``compute_size`` phase.
The ``compute_size`` phase is a feed-forward-like phase, during which each ``ComputeUnit``, rather than
calculating the output tensor but calculating the output size (list int) for the top units. The
size is usually used to calculate the weight and bias size for initialization.
:param dict from_btm: input size from bottom units
:param dict to_top: output size to top units
.. seealso::
:py:meth:`FullyConnection.compute_size`
:py:meth:`ConvConnection.compute_size`
:py:meth:`Net.compute_size`
'''
pass
def forward(self, from_btm, to_top, phase):
''' Function for forward propagation
This function will be called during forward-propagation. The function
should take input in ``from_btm``, perform customized computation, and then
put the result in ``to_top``. Both ``from_btm`` and ``to_top`` are ``dict`` type
where key is a ``str`` of name of the bottom/top units and value is an ``owl.NArray``
served as input or output of the function.
:param dict from_btm: input from bottom units
:param dict to_top: output for top units
:param str phase: name of the phase of the running. Currently either ``"TRAIN"`` or ``"TEST"``
'''
pass
def backward(self, from_top, to_btm, phase):
''' Function for backward propagation
This function will be called during backward-propagation. Similar to :py:meth:`forward`,
The function should take input in ``from_top``, perform customized computation, and then
put the result in ``to_btm``. The function also need to calculate the gradient (if any) and
save them to the ``weightgrad`` field (see :py:meth:WeightedComputeUnit.weight_update).
:param dict from_top: input from top units
:param dict to_btm: output for top units
:param str phase: name of the phase of the running. Currently either ``"TRAIN"`` or ``"TEST"``
'''
pass
def weight_update(self, base_lr, base_weight_decay, momentum, batch_size):
''' Function for weight update
This function will be called during weight update.
:param float base_lr: base learning rate
:param float base_weight_decay: base weight decay
:param float momentum: momentum value
:param int batch_size: the size of the current minibatch
'''
pass
class ComputeUnitSimple(ComputeUnit):
''' An auxiliary class for :py:class:`ComputeUnit` that will only have one input unit and one output unit.
'''
def __init__(self, params):
super(ComputeUnitSimple, self).__init__(params)
def compute_size(self, from_btm, to_top):
''' Set the ``out_shape`` as the same shape of the input. Inherited classes could override this function.
'''
to_top[self.top_names[0]] = dict()
to_top[self.top_names[0]]['out_shape'] = from_btm[self.btm_names[0]]['out_shape'][:]
to_top[self.top_names[0]]['rec_on_ori'] = from_btm[self.btm_names[0]]['rec_on_ori']
to_top[self.top_names[0]]['stride_on_ori'] = from_btm[self.btm_names[0]]['stride_on_ori']
to_top[self.top_names[0]]['start_on_ori'] = from_btm[self.btm_names[0]]['start_on_ori']
self.out_shape = to_top[self.top_names[0]]['out_shape'][:]
self.rec_on_ori = to_top[self.top_names[0]]['rec_on_ori']
self.stride_on_ori = to_top[self.top_names[0]]['stride_on_ori']
self.start_on_ori = to_top[self.top_names[0]]['start_on_ori']
def forward(self, from_btm, to_top, phase):
''' Transform the interface from multiple input/output to only one input/output function :py:meth:`ff`.
'''
to_top[self.top_names[0]] = self.ff(from_btm[self.btm_names[0]], phase)
self.out = to_top[self.top_names[0]]
def ff(self, act, phase):
''' Function for forward-propagation
:param owl.NArray act: the activation from the bottom unit
:param str phase: name of the phase of the running. Currently either ``"TRAIN"`` or ``"TEST"``
:return: the activation of this unit
:rtype: owl.NArray
'''
pass
def backward(self, from_top, to_btm, phase):
''' Transform the interface from multiple input/output to only one input/output function :py:meth:`bp`.
'''
to_btm[self.btm_names[0]] = self.bp(from_top[self.top_names[0]], phase)
def bp(self, sen, phase):
''' Function for backward-propagation
:param owl.NArray sen: the sensitivity (or error derivative to the input) from the top unit
:return: the sensitivity of this unit
:rtype: owl.NArray
'''
pass
class WeightedComputeUnit(ComputeUnitSimple):
''' An auxiliary class for :py:class:`ComputeUnit` with weights
:ivar owl.NArray weight: weight tensor
:ivar owl.NArray weightdelta: momentum of weight
:ivar owl.NArray weightgrad: gradient of weight
:ivar owl.NArray bias: bias tensor
:ivar owl.NArray biasdelta: momentum of bias
:ivar owl.NArray biasgrad: gradient of bias
:ivar float lr_mult_w: learning rate multiplier for the weight of this unit
:ivar float lr_mult_b: bias learning rate multiplier for the bias of this unit
:ivar float decay_mult_w: decay multiplier for the weight of this unit
:ivar float decay_mult_b: decay multiplier for the bias of this unit
'''
def __init__(self, params):
super(WeightedComputeUnit, self).__init__(params)
# weights and bias
self.weight = None
self.weightdelta = None
self.weightgrad = None
self.bias = None
self.biasdelta = None
self.biasgrad = None
self.in_shape = None
self.fan_in = None
self.fan_out = None
# blob learning rate and weight decay
if len(params.param) >= 1:
self.lr_mult_w = params.param[0].lr_mult
self.decay_mult_w = params.param[0].decay_mult
else:
self.lr_mult_w = 1
self.decay_mult_w = 1
if len(params.param) >= 2:
self.lr_mult_b = params.param[1].lr_mult
self.decay_mult_b = params.param[1].decay_mult
else:
self.lr_mult_b = 1
self.decay_mult_b = 0
#self.blobs_lr = params.blobs_lr
#self.weight_decay = params.weight_decay
#if len(self.blobs_lr) == 0:
#self.blobs_lr = [1,1]
#if len(self.weight_decay) == 0:
#self.weight_decay = [1, 0]
def compute_size(self, from_btm, to_top):
pass
def init_weights_with_filler(self):
''' Init weights & bias. The function will be called during weight initialization.
Currently, four types of initializers are supported: ``"constant", "gaussian", "uniform", "xavier"``.
'''
#init weight
npweights = None
if self.weight_filler.type == "constant":
npweights = np.ones(self.wshape, dtype = np.float32) * self.weight_filler.value
elif self.weight_filler.type == "gaussian":
npweights = np.random.normal(self.weight_filler.mean, self.weight_filler.std, self.wshape)
elif self.weight_filler.type == "uniform":
npweights = np.random.uniform(self.weight_filler.min, self.weight_filler.max, self.wshape)
elif self.weight_filler.type == "xavier":
scale = np.sqrt(float(3)/self.fan_in)
npweights = np.random.uniform(-scale, scale, self.wshape)
self.weight = owl.from_numpy(npweights.astype(np.float32)).reshape(self.wshape)
#init bias
npwbias = None
if self.bias_filler.type == "constant":
npbias = np.ones(self.bshape, dtype = np.float32) * self.bias_filler.value
elif self.bias_filler.type == "gaussian":
npbias = np.random.normal(self.bias_filler.mean, self.bias_filler.std, self.bshape)
elif self.bias_filler.type == "uniform":
npbias = np.random.uniform(self.bias_filler.min, self.bias_filler.max, self.bshape)
elif self.bias_filler.type == "xavier":
scale = np.sqrt(float(3)/self.fan_in)
npbias = np.random.uniform(-scale, scale, self.bshape)
self.bias = owl.from_numpy(npbias.astype(np.float32)).reshape(self.bshape)
def weight_update(self, base_lr, base_weight_decay, momentum, batch_size):
''' Update the weight & bias
Using following formula:
``$_delta = momentum * $_delta - (base_lr * $_lr / batch_size) * $_grad - (base_lr * $_lr * base_wd * $_wd) * $``
, where ``$`` could be either ``weight`` or ``bias``.
'''
if self.weightdelta == None:
self.weightdelta = owl.zeros(self.weightgrad.shape)
self.weightdelta = momentum * self.weightdelta \
- (base_lr * self.lr_mult_w / batch_size) * self.weightgrad \
- (base_lr * self.lr_mult_w * base_weight_decay * self.decay_mult_w) * self.weight
self.weight = self.weight + self.weightdelta
self.weightgrad = None
if self.biasdelta == None:
self.biasdelta = owl.zeros(self.biasgrad.shape)
self.biasdelta = momentum * self.biasdelta \
- (base_lr * self.lr_mult_b / batch_size) * self.biasgrad \
- (base_lr * self.lr_mult_b * base_weight_decay * self.decay_mult_b) * self.bias
self.bias = self.bias + self.biasdelta
self.biasgrad = None
class LinearUnit(ComputeUnitSimple):
''' Compute unit for linear transformation
'''
def ff(self, x, phase):
return x
def bp(self, y, phase):
return y
def __str__(self):
return 'linear'
class SigmoidUnit(ComputeUnitSimple):
''' Compute unit for Sigmoid non-linearity
'''
def ff(self, x, phase):
return ele.sigm(x)
def bp(self, y, phase):
return ele.sigm_back(y)
def __str__(self):
return 'sigmoid'
class ReluUnit(ComputeUnitSimple):
''' Compute unit for RELU non-linearity
'''
def ff(self, x, phase):
self.ff_x = x
return ele.relu(x)
def bp(self, y, phase):
return ele.relu_back(y, self.ff_x)
def __str__(self):
return 'relu'
class TanhUnit(ComputeUnitSimple):
''' Compute unit for Hyperbolic Tangine non-linearity
'''
def ff(self, x, phase):
return ele.tanh(x)
def bp(self, y, phase):
return ele.tanh_back(y)
def __str__(self):
return 'tanh'
class PoolingUnit(ComputeUnitSimple):
''' Compute unit for Pooling
.. note::
The input and output is of size ``[HWCN]``:
- ``H``: image height
- ``W``: image width
- ``C``: number of image channels (feature maps)
- ``N``: size of minibatch
'''
def __init__(self, params):
super(PoolingUnit, self).__init__(params)
self.ppa = params.pooling_param
if self.ppa.pool == PoolingParameter.PoolMethod.Value('MAX'):
pool_ty = co.pool_op.max
elif self.ppa.pool == PoolingParameter.PoolMethod.Value('AVE'):
pool_ty = co.pool_op.average
self.pooler = co.Pooler(self.ppa.kernel_size, self.ppa.kernel_size,
self.ppa.stride, self.ppa.stride,
self.ppa.pad, self.ppa.pad,
pool_ty)
def compute_size(self, from_btm, to_top):
self.out_shape = from_btm[self.btm_names[0]]['out_shape'][:]
ori_height = self.out_shape[0]
ori_width = self.out_shape[1]
self.out_shape[0] = int(np.ceil(float(self.out_shape[0] + 2 * self.ppa.pad - self.ppa.kernel_size) / self.ppa.stride)) + 1
self.out_shape[1] = int(np.ceil(float(self.out_shape[1] + 2 * self.ppa.pad - self.ppa.kernel_size) / self.ppa.stride)) + 1
if self.ppa.pad:
if (self.out_shape[0] - 1) * self.ppa.stride >= ori_height + self.ppa.pad:
self.out_shape[0] = self.out_shape[0] - 1
self.out_shape[1] = self.out_shape[1] - 1
to_top[self.top_names[0]] = dict()
to_top[self.top_names[0]]['out_shape'] = self.out_shape[:]
to_top[self.top_names[0]]['stride_on_ori'] = from_btm[self.btm_names[0]]['stride_on_ori'] * self.ppa.stride
to_top[self.top_names[0]]['rec_on_ori'] = from_btm[self.btm_names[0]]['rec_on_ori'] + (self.ppa.kernel_size - 1) * from_btm[self.btm_names[0]]['stride_on_ori']
to_top[self.top_names[0]]['start_on_ori'] = from_btm[self.btm_names[0]]['start_on_ori'] - self.ppa.pad * from_btm[self.btm_names[0]]['stride_on_ori']
self.rec_on_ori = to_top[self.top_names[0]]['rec_on_ori']
self.stride_on_ori = to_top[self.top_names[0]]['stride_on_ori']
self.start_on_ori = to_top[self.top_names[0]]['start_on_ori']
def ff(self, x, phase):
self.ff_x = x
self.ff_y = self.pooler.ff(x)
return self.ff_y
def bp(self, y, phase):
return self.pooler.bp(y, self.ff_y, self.ff_x)
def __str__(self):
return 'pooling'
class DropoutUnit(ComputeUnitSimple):
''' Compute unit for dropout
'''
def __init__(self, params):
super(DropoutUnit, self).__init__(params)
self.scale = 1.0 / (1.0 - self.params.dropout_param.dropout_ratio)
self.keep_ratio = 1 - self.params.dropout_param.dropout_ratio
def ff(self, x, phase):
''' Foward function of dropout
The dropout mask will not be multiplied if under ``"TEST"`` mode.
'''
if phase == "TRAIN":
self.dropmask = owl.randb(x.shape, self.keep_ratio)
return ele.mult(x, self.dropmask)*self.scale
else:
return x
#for gradient test
#return x
def bp(self, y, phase):
if phase == "TRAIN":
return ele.mult(y, self.dropmask)*self.scale
else:
return y
def __str__(self):
return 'dropout'
class SoftmaxUnit(ComputeUnit):
''' Compute unit for softmax
'''
def __init__(self, params):
super(SoftmaxUnit, self).__init__(params)
self.loss_weight = params.loss_weight
def compute_size(self, from_btm, to_top):
to_top[self.top_names[0]] = dict()
to_top[self.top_names[0]]['out_shape'] = from_btm[self.btm_names[0]]['out_shape'][:]
to_top[self.top_names[0]]['rec_on_ori'] = from_btm[self.btm_names[0]]['rec_on_ori']
to_top[self.top_names[0]]['stride_on_ori'] = from_btm[self.btm_names[0]]['stride_on_ori']
to_top[self.top_names[0]]['start_on_ori'] = from_btm[self.btm_names[0]]['start_on_ori']
self.out_shape = to_top[self.top_names[0]]['out_shape'][:]
self.stride_on_ori = to_top[self.top_names[0]]['stride_on_ori']
self.start_on_ori = to_top[self.top_names[0]]['start_on_ori']
self.rec_on_ori = to_top[self.top_names[0]]['rec_on_ori']
def forward(self, from_btm, to_top, phase):
to_top[self.top_names[0]] = co.softmax(from_btm[self.btm_names[0]], co.soft_op.instance)
self.ff_y = to_top[self.top_names[0]]
#turn label into matrix form
nplabel = np.zeros([self.ff_y.shape[1], self.ff_y.shape[0]], dtype=np.float32)
self.strlabel = from_btm[self.btm_names[1]]
for i in range(len(self.strlabel)):
nplabel[i, self.strlabel[i]] = 1
self.y = owl.from_numpy(nplabel)
self.out = self.ff_y
def backward(self, from_top, to_btm, phase):
if len(self.loss_weight) == 1:
to_btm[self.btm_names[0]] = (self.ff_y - self.y)*self.loss_weight[0]
else:
to_btm[self.btm_names[0]] = (self.ff_y - self.y)
def getloss(self):
''' Get the loss of the softmax (cross entropy)
'''
lossmat = ele.mult(ele.ln(self.ff_y), self.y)
res = lossmat.sum(0).sum(1).to_numpy()
return -res[0][0] / lossmat.shape[1]
def __str__(self):
return 'softmax'
class AccuracyUnit(ComputeUnit):
''' Compute unit for calculating accuracy
.. note::
In terms of Minerva's lazy evaluation, the unit is a **non-lazy** one since it gets the actual
contents (accuracy) out of an ``owl.NArray``.
'''
def __init__(self, params):
super(AccuracyUnit, self).__init__(params)
self.acc = 0
self.batch_size = 0
self.top_k = params.accuracy_param.top_k
def compute_size(self, from_btm, to_top):
to_top[self.top_names[0]] = dict()
to_top[self.top_names[0]]['out_shape'] = from_btm[self.btm_names[0]]['out_shape'][:]
to_top[self.top_names[0]]['rec_on_ori'] = from_btm[self.btm_names[0]]['rec_on_ori']
to_top[self.top_names[0]]['stride_on_ori'] = from_btm[self.btm_names[0]]['stride_on_ori']
to_top[self.top_names[0]]['start_on_ori'] = from_btm[self.btm_names[0]]['start_on_ori']
self.out_shape = to_top[self.top_names[0]]['out_shape'][:]
self.stride_on_ori = to_top[self.top_names[0]]['stride_on_ori']
self.start_on_ori = to_top[self.top_names[0]]['start_on_ori']
self.rec_on_ori = to_top[self.top_names[0]]['rec_on_ori']
def forward(self, from_btm, to_top, phase):
if self.top_k == 1:
predict = from_btm[self.btm_names[0]].max_index(0)
ground_truth = owl.from_numpy(from_btm[self.btm_names[1]]).reshape(predict.shape)
self.batch_size = from_btm[self.btm_names[0]].shape[1]
correct = (predict - ground_truth).count_zero()
self.acc = correct * 1.0 / self.batch_size
elif self.top_k == 5:
predict = from_btm[self.btm_names[0]].to_numpy()
top_5 = np.argsort(predict, axis=1)[:,::-1]
ground_truth = from_btm[self.btm_names[1]]
self.batch_size = np.shape(ground_truth)[0]
correct = 0
for i in range(self.batch_size):
for t in range(5):
if ground_truth[i] == top_5[i,t]:
correct += 1
break
self.acc = correct * 1.0 / self.batch_size
else:
assert(FALSE)
def backward(self, from_top, to_btm, phase):
pass
def __str__(self):
return 'accuracy'
class LRNUnit(ComputeUnitSimple):
''' Compute unit for LRN
'''
def __init__(self, params):
super(LRNUnit, self).__init__(params)
self.lrner = co.Lrner(params.lrn_param.local_size, params.lrn_param.alpha, params.lrn_param.beta)
self.scale = None
def ff(self, x, phase):
self.ff_x = x
self.scale = owl.zeros(x.shape)
self.ff_y = self.lrner.ff(x, self.scale)
return self.ff_y
def bp(self, y, phase):
return self.lrner.bp(self.ff_x, self.ff_y, self.scale, y)
def __str__(self):
return 'lrn'
class ConcatUnit(ComputeUnit):
''' Compute unit for concatenation
Concatenate input arrays along the dimension specified by Caffe's ``concat_dim_caffe``
'''
def __init__(self, params):
super(ConcatUnit, self).__init__(params)
self.concat_dim_caffe = params.concat_param.concat_dim
self.slice_count = []
def compute_size(self, from_btm, to_top):
to_top[self.top_names[0]] = dict()
to_top[self.top_names[0]]['out_shape'] = from_btm[self.btm_names[0]]['out_shape'][:]
to_top[self.top_names[0]]['rec_on_ori'] = from_btm[self.btm_names[0]]['rec_on_ori']
to_top[self.top_names[0]]['stride_on_ori'] = from_btm[self.btm_names[0]]['stride_on_ori']
to_top[self.top_names[0]]['start_on_ori'] = from_btm[self.btm_names[0]]['start_on_ori']
self.concat_dim = len(from_btm[self.btm_names[0]]['out_shape']) - 1 - self.concat_dim_caffe
for i in range(1, len(self.btm_names)):
to_top[self.top_names[0]]['out_shape'][self.concat_dim] = to_top[self.top_names[0]]['out_shape'][self.concat_dim] + from_btm[self.btm_names[i]]['out_shape'][self.concat_dim]
self.out_shape = to_top[self.top_names[0]]['out_shape'][:]
self.stride_on_ori = to_top[self.top_names[0]]['stride_on_ori']
self.start_on_ori = to_top[self.top_names[0]]['start_on_ori']
self.rec_on_ori = to_top[self.top_names[0]]['rec_on_ori']
def forward(self, from_btm, to_top, phase):
narrays = []
self.concat_dim = len(from_btm[self.btm_names[0]].shape) - 1 - self.concat_dim_caffe
for i in range(len(self.btm_names)):
narrays.append(from_btm[self.btm_names[i]])
self.slice_count.append(from_btm[self.btm_names[i]].shape[self.concat_dim])
to_top[self.top_names[0]] = owl.concat(narrays, self.concat_dim)
self.out = to_top[self.top_names[0]]
def backward(self, from_top, to_btm, phase):
st_off = 0
for i in range(len(self.btm_names)):
to_btm[self.btm_names[i]] = owl.slice(from_top[self.top_names[0]],
self.concat_dim,
st_off,
self.slice_count[i])
st_off += self.slice_count[i]
def __str__(self):
return 'concat'
class FullyConnection(WeightedComputeUnit):
''' Compute unit for traditional fully connected layer
'''
def __init__(self, params):
super(FullyConnection, self).__init__(params)
self.inner_product_param = params.inner_product_param
self.weight_filler = params.inner_product_param.weight_filler
self.bias_filler = params.inner_product_param.bias_filler
def compute_size(self, from_btm, to_top):
''' Compute the output size and also weight and bias size
The weight size is ``[top_shape[0], btm_shape[0]]``; the bias size is ``[top_shape[0], 1]``
(assume both ``top`` and ``btm`` are 2-dimensional array)
'''
shp = from_btm[self.btm_names[0]]['out_shape'][:]
if len(shp) > 2:
self.in_shape = [np.prod(shp[0:-1], dtype=np.int32), shp[-1]]
else:
self.in_shape = shp
to_top[self.top_names[0]] = dict()
to_top[self.top_names[0]]['out_shape'] = self.in_shape[:]
to_top[self.top_names[0]]['out_shape'][0] = self.inner_product_param.num_output
to_top[self.top_names[0]]['out_shape'][1] = 1
self.out_shape = to_top[self.top_names[0]]['out_shape'][:]
self.wshape = [self.out_shape[0], self.in_shape[0]]
self.bshape = [self.out_shape[0], 1]
if len(shp) > 2:
#last layer is conv layer
self.rec_on_ori = from_btm[self.btm_names[0]]['rec_on_ori'] + (shp[0] - 1) * from_btm[self.btm_names[0]]['stride_on_ori']
self.stride_on_ori = self.rec_on_ori
else:
self.rec_on_ori = from_btm[self.btm_names[0]]['rec_on_ori']
self.stride_on_ori = from_btm[self.btm_names[0]]['stride_on_ori']
to_top[self.top_names[0]]['rec_on_ori'] = self.rec_on_ori
to_top[self.top_names[0]]['stride_on_ori'] = self.stride_on_ori
to_top[self.top_names[0]]['start_on_ori'] = from_btm[self.btm_names[0]]['start_on_ori']
self.start_on_ori = to_top[self.top_names[0]]['start_on_ori']
#set fan_in fan_out
self.fan_out = self.inner_product_param.num_output
self.fan_in = np.prod(from_btm[self.btm_names[0]]['out_shape'][0:len(from_btm[self.btm_names[0]]['out_shape'])])
def ff(self, act, phase):
shp = act.shape
if len(shp) > 2:
a = act.reshape([np.prod(shp[0:-1], dtype=np.int32), shp[-1]])
else:
a = act
self.ff_act = act # save ff value
if self.weight == None:
self.init_weights_with_filler()
return self.weight * a + self.bias
def bp(self, sen, phase):
shp = self.ff_act.shape
if len(shp) > 2:
a = self.ff_act.reshape([np.prod(shp[0:-1], dtype=np.int32), shp[-1]])
else:
a = self.ff_act
self.weightgrad = sen * a.trans()
self.biasgrad = sen.sum(1)
s = self.weight.trans() * sen
if len(shp) > 2:
s = s.reshape(shp)
return s
def __str__(self):
return 'fc'
class ConvConnection(WeightedComputeUnit):
''' Convolution operation
.. note::
The input and output is of size ``[HWCN]``:
- ``H``: image height
- ``W``: image width
- ``C``: number of image channels (feature maps)
- ``N``: size of minibatch
'''
def __init__(self, params):
super(ConvConnection, self).__init__(params)
self.conv_params = params.convolution_param
self.convolver = co.Convolver(self.conv_params.pad,
self.conv_params.pad, self.conv_params.stride, self.conv_params.stride)
self.num_output = params.convolution_param.num_output
self.group = params.convolution_param.group
#TODO: hack, we don't want to slice agian to use it into bp as a parameter
self.group_data = []
self.group_filter = []
self.group_bias = []
self.weight_filler = params.convolution_param.weight_filler
self.bias_filler = params.convolution_param.bias_filler
def compute_size(self, from_btm, to_top):
''' Compute the output size and also weight and bias size
.. note::
The weight(kernel) size is ``[HWCiCo]``; bias shape is ``[Co]``:
- ``H``: kernel_height
- ``W``: kernel_width
- ``Ci``: number of input channels
- ``Co``: number of output channels
'''
self.in_shape = from_btm[self.btm_names[0]]['out_shape'][:]
to_top[self.top_names[0]] = dict()
to_top[self.top_names[0]]['out_shape'] = from_btm[self.btm_names[0]]['out_shape'][:]
to_top[self.top_names[0]]['out_shape'][0] = (to_top[self.top_names[0]]['out_shape'][0] + 2 * self.conv_params.pad - self.conv_params.kernel_size) / self.conv_params.stride + 1
to_top[self.top_names[0]]['out_shape'][1] = (to_top[self.top_names[0]]['out_shape'][1] + 2 * self.conv_params.pad - self.conv_params.kernel_size) / self.conv_params.stride + 1
to_top[self.top_names[0]]['out_shape'][2] = self.num_output
self.out_shape = to_top[self.top_names[0]]['out_shape'][:]
self.wshape = [self.conv_params.kernel_size,
self.conv_params.kernel_size,
self.in_shape[2],
self.num_output]
self.bshape = [self.out_shape[2]]
to_top[self.top_names[0]]['stride_on_ori'] = from_btm[self.btm_names[0]]['stride_on_ori'] * self.conv_params.stride
to_top[self.top_names[0]]['rec_on_ori'] = from_btm[self.btm_names[0]]['rec_on_ori'] + (self.conv_params.kernel_size - 1) * from_btm[self.btm_names[0]]['stride_on_ori']
to_top[self.top_names[0]]['start_on_ori'] = from_btm[self.btm_names[0]]['start_on_ori'] - self.conv_params.pad * from_btm[self.btm_names[0]]['stride_on_ori']
self.stride_on_ori = to_top[self.top_names[0]]['stride_on_ori']
self.start_on_ori = to_top[self.top_names[0]]['start_on_ori']
self.rec_on_ori = to_top[self.top_names[0]]['rec_on_ori']
#set fan_in fan_out
self.fan_out = self.conv_params.kernel_size * self.conv_params.kernel_size * self.conv_params.num_output
self.fan_in = self.conv_params.kernel_size * self.conv_params.kernel_size * from_btm[self.btm_names[0]]['out_shape'][2]
def ff(self, act, phase):
''' Feed-forward of convolution
.. warning::
Currently multi-group convolution (as in AlexNet paper) is not supported. One could walk around it by
using a bigger convolution with number of feature maps doubled.
'''
if self.group == 1:
self.ff_act = act
if self.weight == None:
self.init_weights_with_filler()
return self.convolver.ff(act, self.weight, self.bias)
else:
#currently doesn't support multi-group
assert(False)
def bp(self, sen, phase):
''' Backward propagation of convolution
.. warning::
Currently multi-group convolution (as in AlexNet paper) is not supported. One could walk around it by
using a bigger convolution with number of feature maps doubled.
'''
if self.group == 1:
self.weightgrad = self.convolver.weight_grad(sen, self.ff_act, self.weight)
self.biasgrad = self.convolver.bias_grad(sen)
return self.convolver.bp(sen, self.ff_act, self.weight)
else:
#currently doesn't support multi-group
assert(False)
def __str__(self):
return 'conv'
class DataUnit(ComputeUnit):
''' The base class of dataunit.
:ivar dp: dataprovider, different kind of dp load data from different formats
:ivar generator: the iterator produced by dataprovider
'''
def __init__(self, params, num_gpu):
super(DataUnit, self).__init__(params)
def compute_size(self, from_btm, to_top):
pass
def forward(self, from_btm, to_top, phase):
''' Feed-forward of data unit will get a batch of a fixed batch_size from data provider.
.. note::
Phase indicates whether it's training or testing. Usualy, the data augmentation operation for training involves some randomness, while testing doesn't
'''
if self.generator == None:
self.generator = self.dp.get_mb(phase)
while True:
try:
(samples, labels) = next(self.generator)
if len(labels) == 0:
(samples, labels) = next(self.generator)
except StopIteration:
print 'Have scanned the whole dataset; start from the begginning agin'
self.generator = self.dp.get_mb(phase)
continue
break
to_top[self.top_names[0]] = owl.from_numpy(samples).reshape(
[self.crop_size, self.crop_size, 3, samples.shape[0]])
#may have multiplier labels
for i in range (1, len(self.top_names)):
to_top[self.top_names[i]] = labels[:,i - 1]
#the output of datalayer is the data not label
self.out = to_top[self.top_names[0]]
def backward(self, from_top, to_btm, phase):
# no bp pass
pass
def __str__(self):
return 'data'
class LMDBDataUnit(DataUnit):
''' DataUnit load from LMDB.
:ivar caffe.LayerParameter params: lmdb data layer param defined by Caffe, params.data_param contains information about data source, parmas.transform_param mainly defines data augmentation operations
'''
def __init__(self, params, num_gpu):
super(LMDBDataUnit, self).__init__(params, num_gpu)
if params.include[0].phase == Phase.Value('TRAIN'):
self.dp = LMDBDataProvider(params.data_param, params.transform_param, num_gpu)
else:
self.dp = LMDBDataProvider(params.data_param, params.transform_param, 1)
self.params = params
self.crop_size = params.transform_param.crop_size
self.generator = None
self.out = None
self.multiview = False
def compute_size(self, from_btm, to_top):
self.out_shape = [self.params.transform_param.crop_size,
self.params.transform_param.crop_size,
3, 1]
to_top[self.top_names[0]] = dict()
to_top[self.top_names[0]]['out_shape'] = self.out_shape[:]
to_top[self.top_names[0]]['rec_on_ori'] = 1
to_top[self.top_names[0]]['stride_on_ori'] = 1
to_top[self.top_names[0]]['start_on_ori'] = 0
self.rec_on_ori = 1
self.stride_on_ori = 1
self.start_on_ori = 0
def forward(self, from_btm, to_top, phase):
''' Feed-forward operation may vary according to phase.
.. note::
LMDB data provider now support multi-view testing, if multiview == True, it will produce concequtive 10 batches of different views of the same original image
'''
if self.generator == None:
if self.multiview == False:
self.generator = self.dp.get_mb(phase)
#multiview test
else:
self.generator = self.dp.get_multiview_mb()
while True:
try:
(samples, labels) = next(self.generator)
if len(labels) == 0:
(samples, labels) = next(self.generator)
except StopIteration:
print 'Have scanned the whole dataset; start from the begginning agin'
if self.multiview == False:
self.generator = self.dp.get_mb(phase)
#multiview test
else:
self.generator = self.dp.get_multiview_mb()
continue
break
to_top[self.top_names[0]] = owl.from_numpy(samples).reshape(
[self.crop_size, self.crop_size, 3, samples.shape[0]])
for i in range (1, len(self.top_names)):
to_top[self.top_names[i]] = labels[:,i - 1]
#to_top[self.top_names[0]] = owl.zeros([self.crop_size, self.crop_size, 3, 256])
#for i in range (1, len(self.top_names)):
#to_top[self.top_names[i]] = np.ones(256)
self.out = to_top[self.top_names[0]]
def __str__(self):
return 'lmdb_data'
class ImageDataUnit(DataUnit):
''' DataUnit load from raw images.
:ivar caffe.LayerParameter params: image data layer param defined by Caffe, this is often used when data is limited. Loading from original image will be slower than loading from LMDB
'''
def __init__(self, params, num_gpu):
super(ImageDataUnit, self).__init__(params, num_gpu)
if params.include[0].phase == Phase.Value('TRAIN'):
self.dp = ImageListDataProvider(params.image_data_param, params.transform_param, num_gpu)
else:
self.dp = ImageListDataProvider(params.image_data_param, params.transform_param, 1)
self.params = params
self.crop_size = params.transform_param.crop_size
self.generator = None
def compute_size(self, from_btm, to_top):
self.out_shape = [self.params.transform_param.crop_size,
self.params.transform_param.crop_size,
3, 1]
to_top[self.top_names[0]] = dict()
to_top[self.top_names[0]]['out_shape'] = self.out_shape[:]
to_top[self.top_names[0]]['rec_on_ori'] = 1
to_top[self.top_names[0]]['stride_on_ori'] = 1
to_top[self.top_names[0]]['start_on_ori'] = 0
self.rec_on_ori = 1
self.stride_on_ori = 1
self.start_on_ori = 0
def __str__(self):
return 'image_data'
class ImageWindowDataUnit(DataUnit):
''' DataUnit load from image window patches.
:ivar caffe.LayerParameter params: image window data layer param defined by Caffe, this is often used when data is limited and object bounding box is given
'''
def __init__(self, params, num_gpu):
super(ImageWindowDataUnit, self).__init__(params, num_gpu)
if params.include[0].phase == Phase.Value('TRAIN'):
self.dp = ImageWindowDataProvider(params.window_data_param, num_gpu)
else:
self.dp = ImageWindowDataProvider(params.window_data_param, 1)
self.params = params
self.crop_size = params.window_data_param.crop_size
self.generator = None
#reset generator
def reset_generator(self):
if self.params.include[0].phase == Phase.Value('TRAIN'):
self.generator = self.dp.get_mb('TRAIN')
else:
self.generator = self.dp.get_mb('TEST')
def compute_size(self, from_btm, to_top):
self.out_shape = [self.params.window_data_param.crop_size,
self.params.window_data_param.crop_size,
3, 1]
to_top[self.top_names[0]] = dict()
to_top[self.top_names[0]]['out_shape'] = self.out_shape[:]
to_top[self.top_names[0]]['rec_on_ori'] = 1
to_top[self.top_names[0]]['stride_on_ori'] = 1
to_top[self.top_names[0]]['start_on_ori'] = 0
self.rec_on_ori = 1
self.stride_on_ori = 1
self.start_on_ori = 0
def __str__(self):
return 'window_data'
class Net:
''' The class for neural network structure
The Net is basically a graph (DAG), of which each node is a :py:class:`ComputeUnit`.
:ivar units: all the ``ComputeUnit`` s.
:vartype units: list owl.net.ComputeUnit
:ivar adjacent: the adjacent list (units are represented by their name)
:vartype adjacent: list list str
:ivar reverse_adjacent: the reverse adjacent list (units are represented by their name)
:vartype reverse_adjacent: list list str
:ivar dict name_to_uid: a map from units' name to the unit object
:ivar loss_uids: all the units for computing loss
:vartype loss_uids: list int
:ivar accuracy_uids: all the units for calculating accuracy
:vartype accuracy_uids: list int
'''
def __init__(self):
self.units = []
self.adjacent = []
self.reverse_adjacent = []
self.base_lr = 0
self.base_weight_decay = 0
self.momentum = 0
self.name_to_uid = {}
self.loss_uids = []
self.accuracy_uids = []
def add_unit(self, unit):
''' Method for adding units into the graph
:param owl.net.ComputeUnit unit: the unit to add
'''
uid = len(self.units)
self.units.append(unit)
self.adjacent.append([])
self.reverse_adjacent.append([])
if not unit.name in self.name_to_uid:
self.name_to_uid[unit.name] = []
self.name_to_uid[unit.name].append(uid)
return uid
def connect(self, u1, u2):
''' Method for connecting two units
:param str u1: name of the bottom unit
:param str u2: name of the top unit
'''
self.adjacent[u1].append(u2)
self.reverse_adjacent[u2].append(u1)
def get_units_by_name(self, name):
''' Get ``ComputeUnit`` object by its name
:param str name: unit name
:return: the compute unit object of that name
:rtype: owl.net.ComputeUnit
'''
return [self.units[uid] for uid in self.name_to_uid[name]]
def get_loss_units(self):
''' Get all ``ComputeUnit`` object for loss
:return: all compute unit object for computing loss
:rtype: list owl.net.ComputeUnit
'''
return [self.units[uid] for uid in self.loss_uids]
def get_accuracy_units(self):
''' Get all ``ComputeUnit`` object for accuracy
:return: all compute unit object for computing accuracy
:rtype: list owl.net.ComputeUnit
'''
return [self.units[uid] for uid in self.accuracy_uids]
def get_data_unit(self, phase = 'TRAIN'):
''' Get the ``ComputeUnit`` object for data loading
:param str phase: phase name of the run
:return: the compute unit object for loading data
:rtype: owl.net.ComputeUnit
'''
data_units = self.name_to_uid['data']
for du in data_units:
if not self._is_excluded(du, phase):
return self.units[du]
def get_weighted_unit_ids(self):
''' Get ids for all :py:class:owl.net.WeightedComputeUnit
:return: ids of all weighted compute unit
:rtype: list int
'''
weights_id = []
for i in xrange(len(self.units)):
if isinstance(self.units[i], WeightedComputeUnit):
weights_id.append(i)
return weights_id
def _is_excluded(self, unit, phase):
p = self.units[unit].params
return phase != None and len(p.include) != 0 and p.include[0].phase != Phase.Value(phase)
def _toporder(self, phase = None):
depcount = [len(inunits) for inunits in self.reverse_adjacent]
queue = Queue.Queue()
# remove dep from excluded units
for unit in range(len(depcount)):
if self._is_excluded(unit, phase):
for l in self.adjacent[unit]:
depcount[l] -= 1
# find start units
for unit in range(len(depcount)):
count = depcount[unit]
if count == 0:
queue.put(unit)
# run
while not queue.empty():
unit = queue.get()
if self._is_excluded(unit, phase):
continue
yield unit
for l in self.adjacent[unit]:
depcount[l] -= 1
if depcount[l] == 0:
queue.put(l)
def _reverse_toporder(self, phase = None):
depcount = [len(outunits) for outunits in self.adjacent]
queue = Queue.Queue()
# remove dep from excluded units
for unit in range(len(depcount)):
if self._is_excluded(unit, phase):
for l in self.reverse_adjacent[unit]:
depcount[l] -= 1
# find start units
for unit in range(len(depcount)):
count = depcount[unit]
if count == 0:
queue.put(unit)
# run
while not queue.empty():
unit = queue.get()
if self._is_excluded(unit, phase):
continue
yield unit
for l in self.reverse_adjacent[unit]:
depcount[l] -= 1
if depcount[l] == 0:
queue.put(l)
def compute_size(self, phase = 'TRAIN'):
''' Perform the compute_size phase before running
'''
unit_to_tops = [{} for name in self.units]
for u in self._toporder(phase):
from_btm = {}
for btm in self.reverse_adjacent[u]:
from_btm.update(unit_to_tops[btm])
self.units[u].compute_size(from_btm, unit_to_tops[u])
'''
for u in self._toporder(phase):
print self.units[u].name
print self.units[u].out_shape
print self.units[u].rec_on_ori
print self.units[u].stride_on_ori
print self.units[u].start_on_ori
exit(0)
'''
def forward(self, phase = 'TRAIN'):
''' Perform the forward pass
'''
unit_to_tops = [{} for name in self.units]
for u in self._toporder(phase):
from_btm = {}
for btm in self.reverse_adjacent[u]:
from_btm.update(unit_to_tops[btm])
self.units[u].forward(from_btm, unit_to_tops[u], phase)
def forward_check(self):
''' Check forward function, use the same batch of data, remove random
'''
unit_to_tops = [{} for name in self.units]
for u in self._toporder('TEST'):
from_btm = {}
for btm in self.reverse_adjacent[u]:
from_btm.update(unit_to_tops[btm])
self.units[u].forward(from_btm, unit_to_tops[u], 'CHECK')
def backward(self, phase = 'TRAIN'):
''' Perform the backward pass
'''
unit_to_btms = [{} for name in self.units]
for u in self._reverse_toporder(phase):
from_top = {}
for top in self.adjacent[u]:
for keys in unit_to_btms[top]:
if keys in from_top:
from_top[keys] += unit_to_btms[top][keys]
else:
from_top[keys] = unit_to_btms[top][keys]
self.units[u].backward(from_top, unit_to_btms[u], phase)
def update(self, uid):
''' Update weights of one compute unit of the given uid
:param int uid: id of the compute unit to update
'''
self.units[uid].weight_update(self.current_lr,
self.base_weight_decay,
self.momentum,
self.batch_size)
def weight_update(self):
''' Update weights for all units
'''
for i in range(len(self.units)):
self.update(i)
def __str__(self):
ret = 'digraph G {\n'
for uid in range(len(self.units)):
ret += 'n' + str(uid) + ' [label="' + self.units[uid].name + '"]\n'
for uid in range(len(self.units)):
for nuid in self.adjacent[uid]:
ret += 'n' + str(uid) + ' -> n' + str(nuid) + '\n'
return ret + '}\n'
```
#### File: owl/net/tools.py
```python
import math
import sys
import time
import numpy as np
import owl
from net import Net
from net_helper import CaffeNetBuilder
from caffe import *
from PIL import Image
import scipy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class HeatmapVisualizer:
''' Class of heatmap visualizer.
Heat map can reveal which part of the activation is important. This information is useful when conducting detection and segmentation tasks.
:ivar str solver_file: name of the solver_file, it will tell Minerva the network configuration and model saving path
:ivar snapshot: saved model snapshot index
:ivar str layer_name: name of the layer whose activation will be viusualized as heatmap
:ivar str result_path: path for the result of visualization, heatmapvisualizer will generate a heatmap jpg for each testing image and save the image under result path.
:ivar gpu: the gpu to run testing
'''
def __init__(self, solver_file, snapshot, gpu_idx = 0):
self.solver_file = solver_file
self.snapshot = snapshot
self.gpu = owl.create_gpu_device(gpu_idx)
owl.set_device(self.gpu)
def build_net(self):
self.owl_net = Net()
self.builder = CaffeNetBuilder(self.solver_file)
self.snapshot_dir = self.builder.snapshot_dir
self.builder.build_net(self.owl_net)
self.owl_net.compute_size('TEST')
self.builder.init_net_from_file(self.owl_net, self.snapshot_dir, self.snapshot)
def run(s, layer_name, result_path):
''' Run heatmap visualizer
:param str layer_name: the layer to visualize
:param str result_path: the path to save heatmap
'''
feature_unit = s.owl_net.units[s.owl_net.name_to_uid[layer_name][0]]
#We need the testing data unit
data_unit = None
for i in range(len(s.owl_net.name_to_uid['data'])):
if s.owl_net.units[s.owl_net.name_to_uid['data'][i]].params.include[0].phase == 1:
data_unit = s.owl_net.units[s.owl_net.name_to_uid['data'][i]]
assert(data_unit)
#get the mean data
bp = BlobProto()
#get mean file
if len(data_unit.params.transform_param.mean_file) == 0:
mean_data = np.ones([3, 256, 256], dtype=np.float32)
assert(len(data_unit.params.transform_param.mean_value) == 3)
mean_data[0] = data_unit.params.transform_param.mean_value[0]
mean_data[1] = data_unit.params.transform_param.mean_value[1]
mean_data[2] = data_unit.params.transform_param.mean_value[2]
h_w = 256
else:
with open(data_unit.params.transform_param.mean_file, 'rb') as f:
bp.ParseFromString(f.read())
mean_narray = np.array(bp.data, dtype=np.float32)
h_w = np.sqrt(np.shape(mean_narray)[0] / 3)
mean_data = np.array(bp.data, dtype=np.float32).reshape([3, h_w, h_w])
#get the cropped img
crop_size = data_unit.params.transform_param.crop_size
crop_h_w = (h_w - crop_size) / 2
mean_data = mean_data[:, crop_h_w:crop_h_w + crop_size, crop_h_w:crop_h_w + crop_size]
cur_img = 0
for testiteridx in range(s.owl_net.solver.test_iter[0]):
s.owl_net.forward('TEST')
feature = feature_unit.out.to_numpy()
feature_shape = np.shape(feature)
data = data_unit.out.to_numpy()
img_num = feature_shape[0]
#processing each image
for imgidx in range(img_num):
img_feature = feature[imgidx,:]
#get the image
gbr_img_data = data[imgidx,:] + mean_data
img_data = np.zeros([data_unit.crop_size, data_unit.crop_size, 3], dtype=np.float32)
img_data[:,:,0] = gbr_img_data[2,:,:]
img_data[:,:,1] = gbr_img_data[1,:,:]
img_data[:,:,2] = gbr_img_data[0,:,:]
img_data /= 256
#get the heatmap
f_h = feature_shape[2]
f_w = feature_shape[3]
f_c = feature_shape[1]
heatmap = np.zeros([f_h, f_w], dtype=np.float32)
for cidx in range(f_c):
feature_map = img_feature[cidx,:]
f = np.max(np.max(feature_map)) - np.mean(np.mean(feature_map))
heatmap = heatmap + f * f * feature_map
#resize
heatmap = scipy.misc.imresize(heatmap,[data_unit.crop_size, data_unit.crop_size])
#save
fig, ax = plt.subplots(1,2)
ax[0].axis('off')
ax[1].axis('off')
ax[0].imshow(img_data, aspect='equal')
ax[1].imshow(heatmap, aspect='equal')
#ax[1] = plt.pcolor(heatmap)
info = '%s/%d.jpg' % (result_path, cur_img)
print info
fig.savefig(info)
plt.close('all')
cur_img += 1
print "Finish One Batch %d" % (testiteridx)
feature_file.close()
```
#### File: owl/net/trainer.py
```python
import math
import sys
import time
import numpy as np
import owl
from net import Net
import net
from net_helper import CaffeNetBuilder
from caffe import *
from PIL import Image
class NetTrainer:
''' Class for training neural network
Allows user to train using Caffe's network configure format but on multiple GPUs. One
could use NetTrainer as follows:
>>> trainer = NetTrainer(solver_file, snapshot, num_gpu)
>>> trainer.build_net()
>>> trainer.run()
:ivar str solver_file: path of the solver file in Caffe's proto format
:ivar int snapshot: the idx of snapshot to start with
:ivar int num_gpu: the number of gpu to use
:ivar int sync_freq: the frequency to stop lazy evaluation and print some information. The frequency means every how many
minibatches will the trainer call ``owl.wait_for_all()``. Note that this will influence the training
speed. Normally, the higher value is given, the faster the training speed but the more memory is used
during execution.
'''
def __init__(self, solver_file, snapshot = 0, num_gpu = 1, sync_freq=1):
self.solver_file = solver_file
self.snapshot = snapshot
self.num_gpu = num_gpu
self.sync_freq = sync_freq
self.gpu = [owl.create_gpu_device(i) for i in range(num_gpu)]
def build_net(self):
''' Build network structure using Caffe's proto definition. It will also initialize
the network either from given snapshot or from scratch (using proper initializer).
During initialization, it will first try to load weight from snapshot. If failed, it
will then initialize the weight accordingly.
'''
self.owl_net = Net()
self.builder = CaffeNetBuilder(self.solver_file)
self.snapshot_dir = self.builder.snapshot_dir
self.builder.build_net(self.owl_net, self.num_gpu)
self.owl_net.compute_size()
self.builder.init_net_from_file(self.owl_net, self.snapshot_dir, self.snapshot)
def run(s):
''' Run the training algorithm on multiple GPUs
The basic logic is similar to the traditional single GPU training code as follows (pseudo-code)::
for epoch in range(MAX_EPOCH):
for i in range(NUM_MINI_BATCHES):
# load i^th minibatch
minibatch = loader.load(i, MINI_BATCH_SIZE)
net.ff(minibatch.data)
net.bp(minibatch.label)
grad = net.gradient()
net.update(grad, MINI_BATCH_SIZE)
With Minerva's lazy evaluation and dataflow engine, we are able to modify the above logic
to perform data parallelism on multiple GPUs (pseudo-code)::
for epoch in range(MAX_EPOCH):
for i in range(0, NUM_MINI_BATCHES, NUM_GPU):
gpu_grad = [None for i in range(NUM_GPU)]
for gpuid in range(NUM_GPU):
# specify which gpu following codes are running on
owl.set_device(gpuid)
# each minibatch is split among GPUs
minibatch = loader.load(i + gpuid, MINI_BATCH_SIZE / NUM_GPU)
net.ff(minibatch.data)
net.bp(minibatch.label)
gpu_grad[gpuid] = net.gradient()
net.accumulate_and_update(gpu_grad, MINI_BATCH_SIZE)
So each GPU will take charge of one *mini-mini batch* training, and since all their ``ff``, ``bp`` and ``gradient``
calculations are independent among each others, they could be paralleled naturally using Minerva's DAG engine.
The only problem let is ``accumulate_and_update`` of the the gradient from all GPUs. If we do it on one GPU,
that GPU would become a bottleneck. The solution is to also partition the workload to different GPUs (pseudo-code)::
def accumulate_and_update(gpu_grad, MINI_BATCH_SIZE):
num_layers = len(gpu_grad[0])
for layer in range(num_layers):
upd_gpu = layer * NUM_GPU / num_layers
# specify which gpu to update the layer
owl.set_device(upd_gpu)
for gid in range(NUM_GPU):
if gid != upd_gpu:
gpu_grad[upd_gpu][layer] += gpu_grad[gid][layer]
net.update_layer(layer, gpu_grad[upd_gpu][layer], MINI_BATCH_SIZE)
Since the update of each layer is independent among each others, the update could be paralleled affluently. Minerva's
dataflow engine transparently handles the dependency resolving, scheduling and memory copying among different devices,
so users don't need to care about that.
'''
wgrad = [[] for i in range(s.num_gpu)]
bgrad = [[] for i in range(s.num_gpu)]
last = time.time()
wunits = s.owl_net.get_weighted_unit_ids()
last_start = time.time()
start_idx = s.snapshot * s.owl_net.solver.snapshot
end_idx = s.owl_net.solver.max_iter
for iteridx in range(start_idx, end_idx):
# get the learning rate
if s.owl_net.solver.lr_policy == "poly":
s.owl_net.current_lr = s.owl_net.base_lr * pow(1 - float(iteridx) / s.owl_net.solver.max_iter, s.owl_net.solver.power)
elif s.owl_net.solver.lr_policy == "step":
s.owl_net.current_lr = s.owl_net.base_lr * pow(s.owl_net.solver.gamma, iteridx / s.owl_net.solver.stepsize)
# train on multi-gpu
for gpuid in range(s.num_gpu):
owl.set_device(s.gpu[gpuid])
s.owl_net.forward('TRAIN')
s.owl_net.backward('TRAIN')
for wid in wunits:
wgrad[gpuid].append(s.owl_net.units[wid].weightgrad)
bgrad[gpuid].append(s.owl_net.units[wid].biasgrad)
# weight update
for i in range(len(wunits)):
wid = wunits[i]
upd_gpu = i * s.num_gpu / len(wunits)
owl.set_device(s.gpu[upd_gpu])
for gid in range(s.num_gpu):
if gid == upd_gpu:
continue
wgrad[upd_gpu][i] += wgrad[gid][i]
bgrad[upd_gpu][i] += bgrad[gid][i]
s.owl_net.units[wid].weightgrad = wgrad[upd_gpu][i]
s.owl_net.units[wid].biasgrad = bgrad[upd_gpu][i]
s.owl_net.update(wid)
if iteridx % s.sync_freq == 0:
owl.wait_for_all()
thistime = time.time() - last
speed = s.owl_net.batch_size * s.sync_freq / thistime
print "Finished training %d minibatch (time: %s; speed: %s img/s)" % (iteridx, thistime, speed)
last = time.time()
wgrad = [[] for i in range(s.num_gpu)] # reset gradients
bgrad = [[] for i in range(s.num_gpu)]
# decide whether to display loss
if (iteridx + 1) % (s.owl_net.solver.display) == 0:
lossunits = s.owl_net.get_loss_units()
for lu in lossunits:
print "Training Loss %s: %f" % (lu.name, lu.getloss())
# decide whether to test
if (iteridx + 1) % (s.owl_net.solver.test_interval) == 0:
acc_num = 0
test_num = 0
for testiteridx in range(s.owl_net.solver.test_iter[0]):
s.owl_net.forward('TEST')
all_accunits = s.owl_net.get_accuracy_units()
accunit = all_accunits[len(all_accunits)-1]
#accunit = all_accunits[0]
test_num += accunit.batch_size
acc_num += (accunit.batch_size * accunit.acc)
print "Accuracy the %d mb: %f" % (testiteridx, accunit.acc)
sys.stdout.flush()
print "Testing Accuracy: %f" % (float(acc_num)/test_num)
# decide whether to save model
if (iteridx + 1) % (s.owl_net.solver.snapshot) == 0:
print "Save to snapshot %d, current lr %f" % ((iteridx + 1) / (s.owl_net.solver.snapshot), s.owl_net.current_lr)
s.builder.save_net_to_file(s.owl_net, s.snapshot_dir, (iteridx + 1) / (s.owl_net.solver.snapshot))
sys.stdout.flush()
def gradient_checker(s, checklayer_name):
''' Check backpropagation on multiple GPUs
'''
h = 1e-2
threshold = 1e-4
checklayer = s.owl_net.units[s.owl_net.name_to_uid[checklayer_name][0]]
losslayer = []
for i in xrange(len(s.owl_net.units)):
if isinstance(s.owl_net.units[i], net.SoftmaxUnit):
losslayer.append(i)
last = None
'''
wunits = []
for i in xrange(len(s.owl_net.units)):
if isinstance(s.owl_net.units[i], net.WeightedComputeUnit):
wunits.append(i)
'''
wunits = s.owl_net.get_weighted_unit_ids()
accunits = s.owl_net.get_accuracy_units()
owl.set_device(s.gpu[0])
for iteridx in range(100):
#disturb the weights
oriweight = checklayer.weight
npweight = checklayer.weight.to_numpy()
weightshape = np.shape(npweight)
npweight = npweight.reshape(np.prod(weightshape[0:len(weightshape)]))
position = np.random.randint(0, np.shape(npweight)[0])
disturb = np.zeros(np.shape(npweight), dtype = np.float32)
disturb[position] = h
oriposval = npweight[position]
npweight += disturb
newposval = npweight[position]
npweight = npweight.reshape(weightshape)
checklayer.weight = owl.from_numpy(npweight)
all_loss = 0
# train on multi-gpu
s.owl_net.forward_check()
for i in range(len(losslayer)):
if len(s.owl_net.units[losslayer[i]].loss_weight) == 1:
all_loss += (s.owl_net.units[losslayer[i]].getloss() * s.owl_net.units[losslayer[i]].loss_weight[0])
else:
all_loss += s.owl_net.units[losslayer[i]].getloss()
#get origin loss
checklayer.weight = oriweight
ori_all_loss = 0
# train on multi-gpu
s.owl_net.forward_check()
for i in range(len(losslayer)):
if len(s.owl_net.units[losslayer[i]].loss_weight) == 1:
ori_all_loss += (s.owl_net.units[losslayer[i]].getloss() * s.owl_net.units[losslayer[i]].loss_weight[0])
else:
ori_all_loss += s.owl_net.units[losslayer[i]].getloss()
s.owl_net.backward('TEST')
#get analytic gradient
npgrad = checklayer.weightgrad.to_numpy()
npgrad = npgrad.reshape(np.prod(weightshape[0:len(weightshape)]))
analy_grad = npgrad[position] / s.owl_net.units[losslayer[i]].out.shape[1]
num_grad = (all_loss - ori_all_loss) / h
info = "Gradient Check at positon: %d analy: %f num: %f ratio: %f" % (position, analy_grad, num_grad, analy_grad / num_grad)
print info
class NetTester:
''' Class for performing testing, it can be single-view or multi-view, can be top-1 or top-5
Run it as::
>>> tester = NetTester(solver_file, softmax_layer, accuracy_layer, snapshot, gpu_idx)
>>> tester.build_net()
>>> tester.run(multiview)
:ivar str solver_file: path of the solver file in Caffe's proto format
:ivar int snapshot: the snapshot for testing
:ivar str softmax_layer_name: name of the softmax layer that produce prediction
:ivar str accuracy_layer_name: name of the accuracy layer that produce prediction
:ivar int gpu_idx: which gpu to perform the test
:ivar bool multiview: whether to use multiview tester
'''
def __init__(self, solver_file, softmax_layer_name, accuracy_layer_name, snapshot, gpu_idx = 0):
self.solver_file = solver_file
self.softmax_layer_name = softmax_layer_name
self.accuracy_layer_name = accuracy_layer_name
self.snapshot = snapshot
self.gpu = owl.create_gpu_device(gpu_idx)
owl.set_device(self.gpu)
def build_net(self):
self.owl_net = Net()
self.builder = CaffeNetBuilder(self.solver_file)
self.snapshot_dir = self.builder.snapshot_dir
self.builder.build_net(self.owl_net)
self.owl_net.compute_size('TEST')
self.builder.init_net_from_file(self.owl_net, self.snapshot_dir, self.snapshot)
def run(s, multiview):
#multi-view test
acc_num = 0
test_num = 0
loss_unit = s.owl_net.units[s.owl_net.name_to_uid[s.softmax_layer_name][0]]
accunit = s.owl_net.units[s.owl_net.name_to_uid[s.accuracy_layer_name][0]]
data_unit = None
for data_idx in range(len(s.owl_net.data_layers)):
for i in range(len(s.owl_net.name_to_uid[s.owl_net.data_layers[data_idx]])):
if s.owl_net.units[s.owl_net.name_to_uid[s.owl_net.data_layers[data_idx]][i]].params.include[0].phase == 1:
data_unit = s.owl_net.units[s.owl_net.name_to_uid[s.owl_net.data_layers[data_idx]][i]]
assert(data_unit)
if multiview == True:
data_unit.multiview = True
for testiteridx in range(s.owl_net.solver.test_iter[0]):
if multiview == True:
for i in range(10):
s.owl_net.forward('TEST')
if i == 0:
softmax_val = loss_unit.ff_y
batch_size = softmax_val.shape[1]
softmax_label = loss_unit.y
else:
softmax_val = softmax_val + loss_unit.ff_y
test_num += batch_size
if accunit.top_k == 5:
predict = softmax_val.to_numpy()
top_5 = np.argsort(predict, axis=1)[:,::-1]
ground_truth = softmax_label.max_index(0).to_numpy()
correct = 0
for i in range(batch_size):
for t in range(5):
if ground_truth[i] == top_5[i,t]:
correct += 1
break
acc_num += correct
else:
predict = softmax_val.max_index(0)
truth = softmax_label.max_index(0)
correct = (predict - truth).count_zero()
acc_num += correct
else:
s.owl_net.forward('TEST')
all_accunits = s.owl_net.get_accuracy_units()
batch_size = accunit.batch_size
test_num += batch_size
acc_num += (batch_size * accunit.acc)
correct = batch_size * accunit.acc
print "Accuracy of the %d mb: %f, batch_size: %d, current mean accuracy: %f" % (testiteridx, (correct * 1.0)/batch_size, batch_size, float(acc_num)/test_num)
sys.stdout.flush()
print "Testing Accuracy: %f" % (float(acc_num)/test_num)
class FeatureExtractor:
''' Class for extracting trained features
Feature will be stored in a txt file as a matrix. The size of the feature matrix is [num_img, feature_dimension]
Run it as::
>>> extractor = FeatureExtractor(solver_file, snapshot, gpu_idx)
>>> extractor.build_net()
>>> extractor.run(layer_name, feature_path)
:ivar str solver_file: path of the solver file in Caffe's proto format
:ivar int snapshot: the snapshot for testing
:ivar str layer_name: name of the ayer that produce feature
:ivar int gpu_idx: which gpu to perform the test
'''
def __init__(self, solver_file, snapshot, gpu_idx = 0):
self.solver_file = solver_file
self.snapshot = snapshot
self.gpu = owl.create_gpu_device(gpu_idx)
owl.set_device(self.gpu)
def build_net(self):
self.owl_net = Net()
self.builder = CaffeNetBuilder(self.solver_file)
self.snapshot_dir = self.builder.snapshot_dir
self.builder.build_net(self.owl_net)
self.owl_net.compute_size('TEST')
self.builder.init_net_from_file(self.owl_net, self.snapshot_dir, self.snapshot)
def run(s, layer_name, feature_path):
''' Run feature extractor
:param str layer_name: the layer to extract feature from
:param str feature_path: feature output path
'''
feature_unit = s.owl_net.units[s.owl_net.name_to_uid[layer_name][0]]
feature_file = open(feature_path, 'w')
batch_dir = 0
for testiteridx in range(s.owl_net.solver.test_iter[0]):
s.owl_net.forward('TEST')
feature = feature_unit.out.to_numpy()
feature_shape = np.shape(feature)
img_num = feature_shape[0]
feature_length = np.prod(feature_shape[1:len(feature_shape)])
feature = np.reshape(feature, [img_num, feature_length])
for imgidx in range(img_num):
for feaidx in range(feature_length):
info ='%f ' % (feature[imgidx, feaidx])
feature_file.write(info)
feature_file.write('\n')
print "Finish One Batch %d" % (batch_dir)
batch_dir += 1
feature_file.close()
class FilterVisualizer:
''' Class of filter visualizer.
Find the most interested patches of a filter to demostrate the pattern that filter insterested in. It first read in several images to conduct feed-forward and find the patches have the biggest activation value for a filter. Those patches usually contains the pattern of that filter.
:ivar str solver_file: name of the solver_file, it will tell Minerva the network configuration and model saving path
:ivar snapshot: saved model snapshot index
:ivar str layer_name: name of the layer that will be viusualized, we will visualize all the filters in that layer in one time
:ivar str result_path: path for the result of visualization, filtervisualizer will generate a jpg contains the nine selected patches for each filter in layer_name and save the image under result path.
:ivar gpu: the gpu to run testing
'''
def __init__(self, solver_file, snapshot, layer_name, result_path, gpu_idx = 0):
self.solver_file = solver_file
self.snapshot = snapshot
self.layer_name = layer_name
self.result_path = result_path
self.gpu = owl.create_gpu_device(gpu_idx)
owl.set_device(self.gpu)
def build_net(self):
self.owl_net = Net()
self.builder = CaffeNetBuilder(self.solver_file)
self.snapshot_dir = self.builder.snapshot_dir
self.builder.build_net(self.owl_net)
self.owl_net.compute_size('TEST')
self.builder.init_net_from_file(self.owl_net, self.snapshot_dir, self.snapshot)
def run(s):
#Need Attention, here we may have multiple data layer, just choose the TEST layer
data_unit = None
for data_idx in range(len(s.owl_net.data_layers)):
for i in range(len(s.owl_net.name_to_uid[s.owl_net.data_layers[data_idx]])):
if s.owl_net.units[s.owl_net.name_to_uid[s.owl_net.data_layers[data_idx]][i]].params.include[0].phase == 1:
data_unit = s.owl_net.units[s.owl_net.name_to_uid[s.owl_net.data_layers[data_idx]][i]]
assert(data_unit)
bp = BlobProto()
#get mean file
if len(data_unit.params.transform_param.mean_file) == 0:
mean_data = np.ones([3, 256, 256], dtype=np.float32)
assert(len(data_unit.params.transform_param.mean_value) == 3)
mean_data[0] = data_unit.params.transform_param.mean_value[0]
mean_data[1] = data_unit.params.transform_param.mean_value[1]
mean_data[2] = data_unit.params.transform_param.mean_value[2]
h_w = 256
else:
with open(data_unit.params.transform_param.mean_file, 'rb') as f:
bp.ParseFromString(f.read())
mean_narray = np.array(bp.data, dtype=np.float32)
h_w = np.sqrt(np.shape(mean_narray)[0] / 3)
mean_data = np.array(bp.data, dtype=np.float32).reshape([3, h_w, h_w])
#get the cropped img
crop_size = data_unit.params.transform_param.crop_size
crop_h_w = (h_w - crop_size) / 2
mean_data = mean_data[:, crop_h_w:crop_h_w + crop_size, crop_h_w:crop_h_w + crop_size]
feature_unit = s.owl_net.units[s.owl_net.name_to_uid[s.layer_name][0]]
batch_dir = 0
#we use 10000 images to conduct visualization
all_data = np.zeros([10000, 3, crop_size, crop_size], dtype=np.float32)
feature_shape = feature_unit.out_shape
all_feature = np.zeros([10000, feature_shape[2], feature_shape[1], feature_shape[0]], dtype=np.float32)
print 'Begin Generating Activations from Testing Set'
curimg = 0
for testiteridx in range(s.owl_net.solver.test_iter[0]):
s.owl_net.forward('TEST')
feature = feature_unit.out.to_numpy()
batch_size = np.shape(feature)[0]
all_feature[curimg:curimg+batch_size,:] = feature
data = data_unit.out.to_numpy()
all_data[curimg:curimg+batch_size,:] = data
curimg += batch_size
#HACK TODO: only take 10000 images
if curimg >= 10000:
break
info = 'Now Processed %d images' % (curimg)
print info
print 'Begin Selecting Patches'
#get the result
patch_shape = feature_unit.rec_on_ori
min_val = -float('inf')
#add back the mean file
for i in range(np.shape(all_data)[0]):
all_data[i,:,:,:] += mean_data
if len(feature_shape) == 4:
#iter for each filter, for each filter, we choose nine patch from different image
for i in range(feature_shape[2]):
#create the result image for nine patches
res_img = np.zeros([feature_unit.rec_on_ori * 3, feature_unit.rec_on_ori * 3, 3])
filter_feature = np.copy(all_feature[:,i,:,:])
for patchidx in range(9):
maxidx = np.argmax(filter_feature)
colidx = maxidx % feature_shape[0]
maxidx = (maxidx - colidx) / feature_shape[0]
rowidx = maxidx % feature_shape[1]
maxidx = (maxidx - rowidx) / feature_shape[1]
imgidx = maxidx
info = '%d %d %d' % (imgidx, rowidx, colidx)
filter_feature[imgidx,:,:] = min_val
#get the patch place
patch_start_row = max(0,feature_unit.start_on_ori + rowidx * feature_unit.stride_on_ori)
patch_end_row = min(feature_unit.start_on_ori + rowidx * feature_unit.stride_on_ori + feature_unit.rec_on_ori, data_unit.crop_size)
if patch_start_row == 0:
patch_end_row = feature_unit.rec_on_ori
if patch_end_row == data_unit.crop_size:
patch_start_row = data_unit.crop_size - feature_unit.rec_on_ori
patch_start_col = max(0,feature_unit.start_on_ori + colidx * feature_unit.stride_on_ori)
patch_end_col = min(feature_unit.start_on_ori + colidx * feature_unit.stride_on_ori + feature_unit.rec_on_ori, data_unit.crop_size)
if patch_start_col == 0:
patch_end_col = feature_unit.rec_on_ori
if patch_end_col == data_unit.crop_size:
patch_start_col = data_unit.crop_size - feature_unit.rec_on_ori
patch = all_data[imgidx, :, patch_start_row:patch_end_row, patch_start_col:patch_end_col]
#save img to image
row_in_res = patchidx / 3
col_in_res = patchidx % 3
st_row = row_in_res * patch_shape
st_col = col_in_res * patch_shape
#turn gbr into rgb
res_img[st_row:st_row+patch_end_row - patch_start_row, st_col:st_col + patch_end_col - patch_start_col, 2] = patch[0,:,:]
res_img[st_row:st_row+patch_end_row - patch_start_row, st_col:st_col + patch_end_col - patch_start_col, 1] = patch[1,:,:]
res_img[st_row:st_row+patch_end_row - patch_start_row, st_col:st_col + patch_end_col - patch_start_col, 0] = patch[2,:,:]
#save img
res_img = Image.fromarray(res_img.astype(np.uint8))
res_path = '%s/%d.jpg' % (s.result_path, i)
print res_path
res_img.save(res_path, format = 'JPEG')
else:
#Fully Layers
#iter for each filter, for each filter, we choose nine patch from different image
print feature_shape
for i in range(feature_shape[0]):
#create the result image for nine patches
res_img = np.zeros([data_unit.crop_size * 3, data_unit.crop_size * 3, 3])
filter_feature = np.copy(all_feature[:,i])
for patchidx in range(9):
maxidx = np.max_index(filter_feature)
imgidx = maxidx
filter_feature[imgidx] = min_val
#save img to image
row_in_res = patchidx / 3
col_in_res = patchidx % 3
st_row = row_in_res * data_unit.crop_size
st_col = col_in_res * data_unit.crop_size
#turn gbr into rgb
patch = all_data[imgidx,:,:,:]
res_img[st_row:st_row+data_unit.crop_size,st_col:st_col+data_unit.crop_size, 2] = patch[0,:,:]
res_img[st_row:st_row+data_unit.crop_size,st_col:st_col+data_unit.crop_size, 1] = patch[1,:,:]
res_img[st_row:st_row+data_unit.crop_size,st_col:st_col+data_unit.crop_size, 0] = patch[2,:,:]
#save img
res_img = Image.fromarray(res_img.astype(np.uint8))
res_path = '%s/%d.jpg' % (s.result_path, i)
print res_path
res_img.save(res_path, format = 'JPEG')
```
#### File: scripts/system/dag_pretty_print.py
```python
import sys
import Queue
from dag_utils import Dag
fname = sys.argv[1]
dag = Dag(fname)
dag.load()
name_to_comp = {n : '' for n in dag.node_attr}
remains = set(dag.node_attr.keys())
def get_device_id(n):
n_ty = dag.node_attr[n]['type']
if n_ty == 'd':
return dag.node_attr[n]['device_id']
else:
succ = dag.adj[n][0]
return dag.node_attr[succ]['device_id']
# topological order
visited = {n : False for n in dag.node_attr}
depcount = {n : len(prev) for n, prev in dag.rev_adj.iteritems()}
queue = Queue.Queue()
for n, c in depcount.iteritems():
if c == 0:
queue.put(n)
dev_to_comp = {}
while not queue.empty():
dev_to_names = {}
renew_dev = set()
while not queue.empty():
n = queue.get()
if visited[n]:
continue
visited[n] = True
dev = get_device_id(n)
if not dev in dev_to_names:
dev_to_names[dev] = []
dev_to_names[dev].append(n)
#renew_dev = set(dev_to_names.keys())
for prev in dag.rev_adj[n]:
prev_dev = get_device_id(prev)
if prev_dev != dev:
renew_dev.add(prev_dev)
renew_dev.add(dev)
'''
for succ in dag.adj[n]:
succ_dev = get_device_id(succ)
if succ_dev != dev:
renew_dev.add(succ_dev)
renew_dev.add(dev)
'''
for dev, names in dev_to_names.iteritems():
if not dev in dev_to_comp or dev in renew_dev:
dev_to_comp[dev] = names[0]
for n in names:
name_to_comp[n] = dev_to_comp[dev]
for succ in dag.adj[n]:
depcount[succ] -= 1
if depcount[succ] == 0:
queue.put(succ)
# comp graph
comp_to_names = {}
comp_adj = {}
comp_rev_adj = {}
for n, c in name_to_comp.iteritems():
if not c in comp_to_names:
comp_to_names[c] = []
comp_adj[c] = set()
comp_rev_adj[c] = set()
comp_to_names[c].append(n)
for n in dag.node_attr:
for succ in dag.adj[n]:
if name_to_comp[n] != name_to_comp[succ]:
comp_adj[name_to_comp[n]].add(name_to_comp[succ])
comp_rev_adj[name_to_comp[succ]].add(name_to_comp[n])
# do some merge to reduce #nodes
def merge_to(c1, c2): # there is an edge c2->c1. the func then merge c1 to c2
comp_adj[c2].remove(c1)
comp_adj[c2].update(comp_adj[c1])
comp_to_names[c2] += comp_to_names[c1]
for succ in comp_adj[c1]:
comp_rev_adj[succ].remove(c1)
comp_rev_adj[succ].add(c2)
# clear
comp_to_names[c1] = []
comp_adj[c1] = set()
comp_rev_adj[c1] = set()
'''
depcount = {n : len(prev) for n, prev in comp_rev_adj.iteritems()}
queue = Queue.Queue()
for n, c in depcount.iteritems():
if c == 0:
queue.put(n)
dev_to_comp = {}
while not queue.empty():
c = queue.get()
succ_lst = comp_adj[c]
dev = get_device_id(c)
cand_merge = None
for prev in comp_rev_adj[c]:
if get_device_id(prev) == dev:
succ_dev = dev
for succ in comp_adj[prev]:
if get_device_id(succ) != dev:
succ_dev = get_device_id(succ)
break
if succ_dev == dev:
cand_merge = prev
break
if cand_merge != None:
tmp_rev = set(comp_rev_adj[c])
tmp_rev.remove(cand_merge)
merge_to(c, cand_merge)
for p in tmp_rev:
comp_adj[p].remove(c)
break
#print c, '|', tmp_rev, ' v.s. ', prev, '|', comp_rev_adj[prev]
for succ in succ_lst:
depcount[succ] -= 1
if depcount[succ] == 0:
queue.put(succ)
'''
#print {c : len(ns) for c,ns in comp_to_names.iteritems()}
#print comp_adj
# draw
def get_device_color(d):
colors = ['red', 'blue', 'green', 'orange']
return colors[int(d)]
def one_node_string(n):
s = n + ' '
n_ty = dag.node_attr[n]['type']
if n_ty == 'd':
s += '[shape=box,style=filled,label=\"\",color=' + get_device_color(get_device_id(n)) + ']'
else:
s += '[style=filled,color=' + get_device_color(get_device_id(n)) + ',label=\"' + dag.node_attr[n]['name'][0:6] + '\"]'
return s
def get_size(n):
min_size = 1
max_size = 5
min_num = 1
max_num = 200
return min_size + (max_size - min_size) * float(n - min_num) / (max_num - min_num)
def comp_node_string(c, ns):
s = c + ' [shape=circle,style=bold'
op_names = set()
for n in ns:
n_ty = dag.node_attr[n]['type']
if n_ty == 'o':
op_names.add(dag.node_attr[n]['name'][0:6])
s += ',label=\"#' + str(len(ns)) + '\\n' + ';'.join(list(op_names)) + '\"'
size = get_size(len(ns))
s += ',height=' + str(size) + ',width=' + str(size)
s += ',color=' + get_device_color(get_device_id(c))
s += ']'
return s
num_comps = 0
with open(fname + '.dag', 'w') as f:
f.write('digraph G {\n')
for c, ns in comp_to_names.iteritems():
if len(ns) == 0:
continue
elif len(ns) == 1:
f.write(one_node_string(ns[0]) + '\n')
else:
f.write(comp_node_string(c, ns) + '\n')
num_comps += 1
for c, adj in comp_adj.iteritems():
for succ in adj:
f.write(c + ' -> ' + succ + '\n')
f.write('}\n')
print '#comp:', num_comps
```
#### File: scripts/system/dag_utils.py
```python
import sys
class Dag:
def __init__(self, fname):
self.fname = fname
self.node_attr = {}
self.adj = {}
self.rev_adj = {}
def load(self):
with open(self.fname, 'r') as f:
line = f.readline() # line == 'Nodes:'
line = f.readline()
while not line.startswith('Edges:'):
[name, attr] = line.strip().split('>>>>')
self.node_attr[name] = {pair.split('===')[0] : pair.split('===')[1] for pair in attr.split(';;;')[0:-1]}
self.adj[name] = []
self.rev_adj[name] = []
line = f.readline()
line = f.readline()
while len(line.strip()) != 0:
[src, dst] = line.strip().split(' -> ')
self.adj[src].append(dst)
self.rev_adj[dst].append(src)
line = f.readline()
if __name__ == '__main__':
dag = Dag(sys.argv[1])
dag.load()
``` |
{
"source": "jjzhang166/seahub",
"score": 2
} |
#### File: api2/endpoints/share_link_zip_task.py
```python
import logging
import os
import json
import posixpath
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from django.conf import settings
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.utils import api_error
from seahub.views.file import send_file_access_msg
from seahub.share.models import FileShare
from seahub.utils import is_windows_operating_system, \
is_pro_version
import seaserv
from seaserv import seafile_api
logger = logging.getLogger(__name__)
class ShareLinkZipTaskView(APIView):
throttle_classes = (UserRateThrottle,)
def get(self, request, format=None):
""" Only used for download dir when view dir share link from web.
Permission checking:
1. authenticated user OR anonymous user has passed email code check(if necessary);
"""
# permission check
if is_pro_version() and settings.ENABLE_SHARE_LINK_AUDIT:
if not request.user.is_authenticated() and \
not request.session.get('anonymous_email'):
# if anonymous user has passed email code check,
# then his/her email info will be in session.
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# argument check
share_link_token = request.GET.get('share_link_token', None)
if not share_link_token:
error_msg = 'share_link_token invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
req_path = request.GET.get('path', None)
if not req_path:
error_msg = 'path invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
# recourse check
fileshare = FileShare.objects.get_valid_dir_link_by_token(share_link_token)
if not fileshare:
error_msg = 'share_link_token %s not found.' % share_link_token
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
if req_path[-1] != '/':
req_path += '/'
if req_path == '/':
real_path = fileshare.path
else:
real_path = posixpath.join(fileshare.path, req_path.lstrip('/'))
if real_path[-1] != '/':
real_path += '/'
repo_id = fileshare.repo_id
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
dir_id = seafile_api.get_dir_id_by_path(repo_id, real_path)
if not dir_id:
error_msg = 'Folder %s not found.' % real_path
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# get file server access token
dir_name = repo.name if real_path == '/' else \
os.path.basename(real_path.rstrip('/'))
dir_size = seafile_api.get_dir_size(
repo.store_id, repo.version, dir_id)
if dir_size > seaserv.MAX_DOWNLOAD_DIR_SIZE:
error_msg = 'Unable to download directory "%s": size is too large.' % dir_name
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
try:
seaserv.send_message('seahub.stats', 'dir-download\t%s\t%s\t%s\t%s' %
(repo_id, fileshare.username, dir_id, dir_size))
except Exception as e:
logger.error(e)
is_windows = 0
if is_windows_operating_system(request):
is_windows = 1
fake_obj_id = {
'obj_id': dir_id,
'dir_name': dir_name,
'is_windows': is_windows
}
username = request.user.username
try:
zip_token = seafile_api.get_fileserver_access_token(
repo_id, json.dumps(fake_obj_id), 'download-dir', username)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
if not zip_token:
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
if request.session.get('anonymous_email'):
request.user.username = request.session.get('anonymous_email')
send_file_access_msg(request, repo, real_path, 'share-link')
return Response({'zip_token': zip_token})
```
#### File: seahub/api2/permissions.py
```python
from rest_framework.permissions import BasePermission
from django.conf import settings
from seaserv import check_permission, is_repo_owner, ccnet_api
from seahub.utils import is_pro_version
SAFE_METHODS = ['GET', 'HEAD', 'OPTIONS']
class IsRepoWritable(BasePermission):
"""
Allows access only for user who has write permission to the repo.
"""
def has_permission(self, request, view, obj=None):
if request.method in SAFE_METHODS:
return True
repo_id = view.kwargs.get('repo_id', '')
user = request.user.username if request.user else ''
if user and check_permission(repo_id, user) == 'rw':
return True
return False
class IsRepoAccessible(BasePermission):
"""
Check whether user has Read or Write permission to a repo.
"""
def has_permission(self, request, view, obj=None):
repo_id = view.kwargs.get('repo_id', '')
user = request.user.username if request.user else ''
return True if check_permission(repo_id, user) else False
class IsRepoOwner(BasePermission):
"""
Check whether user is the owner of a repo.
"""
def has_permission(self, request, view, obj=None):
repo_id = view.kwargs.get('repo_id', '')
user = request.user.username if request.user else ''
return True if is_repo_owner(user, repo_id) else False
class IsGroupMember(BasePermission):
"""
Check whether user is in a group.
"""
def has_permission(self, request, view, obj=None):
group_id = int(view.kwargs.get('group_id', ''))
username = request.user.username if request.user else ''
return True if ccnet_api.is_group_user(group_id, username) else False
class CanInviteGuest(BasePermission):
"""Check user has permission to invite a guest.
"""
def has_permission(self, request, *args, **kwargs):
return settings.ENABLE_GUEST_INVITATION and \
request.user.permissions.can_invite_guest()
class CanGenerateShareLink(BasePermission):
"""Check user has permission to generate share link.
"""
def has_permission(self, request, *args, **kwargs):
return request.user.permissions.can_generate_share_link()
class CanGenerateUploadLink(BasePermission):
"""Check user has permission to generate upload link.
"""
def has_permission(self, request, *args, **kwargs):
return request.user.permissions.can_generate_upload_link()
class CanSendShareLinkMail(BasePermission):
"""Check user has permission to generate upload link.
"""
def has_permission(self, request, *args, **kwargs):
return request.user.permissions.can_send_share_link_mail()
class IsProVersion(BasePermission):
"""
Check whether Seafile is pro version
"""
def has_permission(self, request, *args, **kwargs):
return is_pro_version()
```
#### File: seahub/base/context_processors.py
```python
import re
import os
from django.conf import settings as dj_settings
from constance import config
from seahub.settings import SEAFILE_VERSION, SITE_TITLE, SITE_NAME, \
MAX_FILE_NAME, BRANDING_CSS, LOGO_PATH, LOGO_WIDTH, LOGO_HEIGHT,\
SHOW_REPO_DOWNLOAD_BUTTON, SITE_ROOT, ENABLE_GUEST_INVITATION, \
FAVICON_PATH, ENABLE_THUMBNAIL, THUMBNAIL_SIZE_FOR_ORIGINAL, \
MEDIA_ROOT, SHOW_LOGOUT_ICON
try:
from seahub.settings import SEACLOUD_MODE
except ImportError:
SEACLOUD_MODE = False
from seahub.utils import HAS_FILE_SEARCH, EVENTS_ENABLED, \
TRAFFIC_STATS_ENABLED, is_pro_version
try:
from seahub.settings import ENABLE_PUBFILE
except ImportError:
ENABLE_PUBFILE = False
try:
from seahub.settings import ENABLE_SYSADMIN_EXTRA
except ImportError:
ENABLE_SYSADMIN_EXTRA = False
try:
from seahub.settings import MULTI_TENANCY
except ImportError:
MULTI_TENANCY = False
from seahub.api2.endpoints.admin.logo import CUSTOM_LOGO_PATH
from seahub.api2.endpoints.admin.favicon import CUSTOM_FAVICON_PATH
def base(request):
"""
Add seahub base configure to the context.
"""
try:
org = request.user.org
except AttributeError:
org = None
# extra repo id from request path, use in search
repo_id_patt = r".*/([a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12})/.*"
m = re.match(repo_id_patt, request.get_full_path())
search_repo_id = m.group(1) if m is not None else None
file_server_root = config.FILE_SERVER_ROOT
if not file_server_root.endswith('/'):
file_server_root += '/'
logo_path = LOGO_PATH
favicon_path = FAVICON_PATH
# filter ajax/api request out
if (not request.is_ajax()) and ("api2/" not in request.path) and \
("api/v2.1/" not in request.path):
# get logo path
custom_logo_file = os.path.join(MEDIA_ROOT, CUSTOM_LOGO_PATH)
if os.path.exists(custom_logo_file):
logo_path = CUSTOM_LOGO_PATH
# get favicon path
custom_favicon_file = os.path.join(MEDIA_ROOT, CUSTOM_FAVICON_PATH)
if os.path.exists(custom_favicon_file):
favicon_path = CUSTOM_FAVICON_PATH
return {
'seafile_version': SEAFILE_VERSION,
'site_title': SITE_TITLE,
'branding_css': BRANDING_CSS,
'favicon_path': favicon_path,
'logo_path': logo_path,
'logo_width': LOGO_WIDTH,
'logo_height': LOGO_HEIGHT,
'seacloud_mode': SEACLOUD_MODE,
'cloud_mode': request.cloud_mode,
'org': org,
'site_name': SITE_NAME,
'enable_signup': config.ENABLE_SIGNUP,
'max_file_name': MAX_FILE_NAME,
'has_file_search': HAS_FILE_SEARCH,
'enable_pubfile': ENABLE_PUBFILE,
'show_repo_download_button': SHOW_REPO_DOWNLOAD_BUTTON,
'share_link_password_min_length': config.SHARE_LINK_PASSWORD_MIN_LENGTH,
'repo_password_min_length': config.REPO_PASSWORD_MIN_LENGTH,
'events_enabled': EVENTS_ENABLED,
'traffic_stats_enabled': TRAFFIC_STATS_ENABLED,
'sysadmin_extra_enabled': ENABLE_SYSADMIN_EXTRA,
'multi_tenancy': MULTI_TENANCY,
'multi_institution': getattr(dj_settings, 'MULTI_INSTITUTION', False),
'search_repo_id': search_repo_id,
'SITE_ROOT': SITE_ROOT,
'constance_enabled': dj_settings.CONSTANCE_ENABLED,
'FILE_SERVER_ROOT': file_server_root,
'enable_thumbnail': ENABLE_THUMBNAIL,
'thumbnail_size_for_original': THUMBNAIL_SIZE_FOR_ORIGINAL,
'enable_guest_invitation': ENABLE_GUEST_INVITATION,
'enable_terms_and_conditions': dj_settings.ENABLE_TERMS_AND_CONDITIONS,
'show_logout_icon': SHOW_LOGOUT_ICON,
'is_pro': True if is_pro_version() else False,
}
```
#### File: seahub/onlyoffice/views.py
```python
import json
import logging
import os
import requests
import urllib2
from django.core.cache import cache
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from seaserv import seafile_api
from .settings import VERIFY_ONLYOFFICE_CERTIFICATE
from seahub.utils import gen_file_upload_url
# Get an instance of a logger
logger = logging.getLogger(__name__)
@csrf_exempt
def onlyoffice_editor_callback(request):
#request.body:
# {"key":"<KEY>","status":1,
# "users":["uid-1488351242769"],
# "actions":[{"type":1,"userid":"uid-1488351242769"}]}
# "key":"<KEY>","status":2,"url":"https://13.113.111.2/cache/files/Khirz6zTPdfd8_6379/output.docx/output.docx?md5=5oL0qGUqXw72D85f28JaFg==&expires=1488956681&disposition=attachment&ooname=output.docx","changesurl":"https://13.113.111.2/cache/files/Khirz6zTPdfd8_6379/changes.zip/changes.zip?md5=vx3VYwaPEOxtZDA_3yuVrg==&expires=1488956681&disposition=attachment&ooname=output.zip","history":{"serverVersion":"4.2.10","changes":[{"created":"2017-03-01 07:03:11","user":{"id":"uid-1488351774447","name":"Anonymous"}}]},"users":["uid-1488351774447"],"actions":[{"type":0,"userid":"uid-1488351774447"}]}
logger.debug(request.body)
if request.method != 'POST':
return HttpResponse('{"error": 0}')
post_data = json.loads(request.body)
status = int(post_data.get('status', -1))
if status == 2: # document is ready for saving
# the link to the edited document to be saved with the document storage
# service. The link is present when the status value is equal to 2 or 3 only.
url = post_data.get('url')
context = None
if VERIFY_ONLYOFFICE_CERTIFICATE is False:
import ssl
context = ssl._create_unverified_context()
try:
file_content = urllib2.urlopen(url, context=context).read()
except urllib2.URLError as e:
logger.error(e)
else:
# update file
doc_key = post_data.get('key')
doc_info = json.loads(cache.get("ONLYOFFICE_%s" % doc_key))
repo_id = doc_info['repo_id']
file_path = doc_info['file_path']
username = doc_info['username']
update_token = seafile_api.get_fileserver_access_token(repo_id,
'dummy', 'update', username)
if not update_token:
return HttpResponse('{"error": 0}')
update_url = gen_file_upload_url(update_token, 'update-api')
files = {
'file': file_content,
'file_name': os.path.basename(file_path),
'target_file': file_path,
}
requests.post(update_url, files=files)
logger.info('%s updated by %s' % (repo_id + file_path, username))
return HttpResponse('{"error": 0}')
```
#### File: endpoints/admin/test_logo.py
```python
import os
from tests.api.apitestbase import ApiTestBase
from tests.common.utils import urljoin
from tests.common.common import BASE_URL
from django.core.urlresolvers import reverse
from seahub.settings import MEDIA_ROOT
from seahub.api2.endpoints.admin.logo import CUSTOM_LOGO_PATH
class AdminLogoTest(ApiTestBase):
def test_update_logo(self):
custom_symlink = os.path.join(MEDIA_ROOT, os.path.dirname(CUSTOM_LOGO_PATH))
if os.path.exists(custom_symlink):
os.remove(custom_symlink)
assert not os.path.exists(custom_symlink)
# update user avatar
logo_url = reverse('api-v2.1-admin-logo')
logo_url = urljoin(BASE_URL, logo_url)
logo_file = os.path.join(os.getcwd(), 'media/img/seafile-logo.png')
with open(logo_file) as f:
json_resp = self.admin_post(logo_url, files={'logo': f}).json()
assert json_resp['success'] == True
assert os.path.exists(custom_symlink)
assert os.path.islink(custom_symlink)
def test_update_logo_with_invalid_user_permission(self):
# update user avatar
logo_url = reverse('api-v2.1-admin-logo')
logo_url = urljoin(BASE_URL, logo_url)
logo_file = os.path.join(os.getcwd(), 'media/img/seafile-logo.png')
with open(logo_file) as f:
json_resp = self.post(logo_url, files={'logo': f}, expected=403).json()
```
#### File: api/endpoints/test_shared_repos.py
```python
import json
from django.core.urlresolvers import reverse
import seaserv
from seaserv import seafile_api
from seahub.profile.models import Profile
from seahub.test_utils import BaseTestCase
from tests.common.utils import randstring
class SharedReposTest(BaseTestCase):
def share_repo_to_user(self):
seafile_api.share_repo(
self.repo.id, self.user.username,
self.admin.username, 'rw')
def share_repo_to_group(self):
seafile_api.set_group_repo(
self.repo.id, self.group.id,
self.user.username, 'rw')
def share_repo_to_public(self):
seafile_api.add_inner_pub_repo(
self.repo.id, 'rw')
def setUp(self):
self.repo_id = self.repo.id
self.group_id = self.group.id
self.user_name = self.user.username
self.admin_name = self.admin.username
self.url = reverse('api-v2.1-shared-repos')
def tearDown(self):
seafile_api.remove_share(self.repo_id, self.user_name, self.admin_name)
seafile_api.unset_group_repo(self.repo_id, self.group_id, self.user_name)
seafile_api.remove_inner_pub_repo(self.repo_id)
self.remove_repo()
def test_can_get_when_share_to_user(self):
self.share_repo_to_user()
contact_email = <EMAIL>' % (randstring(6), randstring(6))
nickname = randstring(6)
p = Profile.objects.add_or_update(self.admin_name, nickname=nickname)
p.contact_email = contact_email
p.save()
self.login_as(self.user)
resp = self.client.get(self.url)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp[0]['share_type'] == 'personal'
assert json_resp[0]['repo_id'] == self.repo_id
assert json_resp[0]['user_email'] == self.admin_name
assert json_resp[0]['user_name'] == nickname
assert json_resp[0]['contact_email'] == contact_email
assert len(json_resp[0]['modifier_email']) > 0
assert len(json_resp[0]['modifier_name']) > 0
assert len(json_resp[0]['modifier_contact_email']) > 0
def test_can_get_when_share_to_group(self):
self.share_repo_to_group()
self.login_as(self.user)
resp = self.client.get(self.url)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp[0]['share_type'] == 'group'
assert json_resp[0]['repo_id'] == self.repo_id
assert json_resp[0]['group_id'] == self.group_id
def test_can_get_when_share_to_public(self):
self.share_repo_to_public()
self.login_as(self.user)
resp = self.client.get(self.url)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp[0]['share_type'] == 'public'
def test_get_with_invalid_repo_permission(self):
self.share_repo_to_user()
self.share_repo_to_group()
self.share_repo_to_public()
# login with admin, then get user's share repo info
self.login_as(self.admin)
resp = self.client.get(self.url)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(json_resp) == 0
def test_can_update_user_share_perm(self):
self.share_repo_to_user()
assert seafile_api.check_permission_by_path(
self.repo_id, '/', self.admin_name) == 'rw'
self.login_as(self.user)
url = reverse('api-v2.1-shared-repo', args=[self.repo_id])
data = 'permission=r&share_type=personal&user=%s' % self.admin_name
resp = self.client.put(url, data, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
assert seafile_api.check_permission_by_path(
self.repo_id, '/', self.admin_name) == 'r'
def test_can_update_group_share_perm(self):
self.share_repo_to_group()
# print seafile_api.get_folder_group_perm(self.repo_id, '/', int(self.group_id))
repos = seafile_api.get_group_repos_by_owner(self.user_name)
assert repos[0].permission == 'rw'
self.login_as(self.user)
url = reverse('api-v2.1-shared-repo', args=[self.repo_id])
data = 'permission=r&share_type=group&group_id=%s' % self.group_id
resp = self.client.put(url, data, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
repos = seafile_api.get_group_repos_by_owner(self.user_name)
assert repos[0].permission == 'r'
def test_can_update_public_share_perm(self):
for r in seaserv.seafserv_threaded_rpc.list_inner_pub_repos():
seafile_api.remove_inner_pub_repo(r.repo_id)
self.share_repo_to_public()
repos = seafile_api.list_inner_pub_repos_by_owner(self.user_name)
assert repos[0].permission == 'rw'
self.login_as(self.user)
url = reverse('api-v2.1-shared-repo', args=[self.repo_id])
data = 'permission=r&share_type=public'
resp = self.client.put(url, data, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
repos = seafile_api.list_inner_pub_repos_by_owner(self.user_name)
assert repos[0].permission == 'r'
def test_delete_user_share(self):
self.share_repo_to_user()
# admin user can view repo
assert seafile_api.check_permission_by_path(
self.repo_id, '/', self.admin_name) == 'rw'
self.login_as(self.user)
args = '?share_type=personal&user=%s' % self.admin_name
url = reverse('api-v2.1-shared-repo', args=[self.repo_id]) + args
resp = self.client.delete(url, {}, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
# admin user can NOT view repo
assert seafile_api.check_permission_by_path(
self.repo_id, '/', self.admin_name) == None
def test_delete_group_share(self):
self.share_repo_to_group()
# repo in group
repos = seafile_api.get_group_repos_by_owner(self.user_name)
assert repos[0].permission == 'rw'
self.login_as(self.user)
args = '?share_type=group&group_id=%s' % self.group_id
url = reverse('api-v2.1-shared-repo', args=[self.repo_id]) + args
resp = self.client.delete(url, {}, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
# repo NOT in group
repos = seafile_api.get_group_repos_by_owner(self.user_name)
assert len(repos) == 0
def test_delete_public_share(self):
for r in seaserv.seafserv_threaded_rpc.list_inner_pub_repos():
seafile_api.remove_inner_pub_repo(r.repo_id)
self.share_repo_to_public()
# repo in public
repos = seafile_api.list_inner_pub_repos_by_owner(
self.user_name)
assert repos[0].permission == 'rw'
self.login_as(self.user)
args = '?share_type=public'
url = reverse('api-v2.1-shared-repo', args=[self.repo_id]) + args
resp = self.client.delete(url, {}, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
# repo NOT in public
repos = seafile_api.list_inner_pub_repos_by_owner(
self.user_name)
assert len(repos) == 0
def test_update_perm_if_not_owner(self):
self.share_repo_to_user()
# admin can view repo but NOT owner
assert seafile_api.check_permission_by_path(
self.repo_id, '/', self.admin_name) == 'rw'
self.login_as(self.admin)
url = reverse('api-v2.1-shared-repo', args=[self.repo_id])
data = 'permission=r&share_type=personal'
resp = self.client.put(url, data, 'application/x-www-form-urlencoded')
self.assertEqual(403, resp.status_code)
def test_delete_perm_if_not_owner(self):
self.share_repo_to_user()
# admin can view repo but NOT owner
assert seafile_api.check_permission_by_path(
self.repo_id, '/', self.admin_name) == 'rw'
self.login_as(self.admin)
args = '?share_type=personal&user=%s' % self.admin_name
url = reverse('api-v2.1-shared-repo', args=[self.repo_id]) + args
resp = self.client.delete(url, {}, 'application/x-www-form-urlencoded')
self.assertEqual(403, resp.status_code)
``` |
{
"source": "jjzhang166/Smart-Home",
"score": 3
} |
#### File: Smart-Home/lib/command.py
```python
from datetime import datetime
from mysql import connector
from lib import log
from config import mysql as mysql_con
class Command:
# 往数据库里插入一条命令
@staticmethod
def insert(text):
try:
conn = ""
conn = connector.connect(host=mysql_con.HOST,
user=mysql_con.USERNAME,
password=<PASSWORD>,
port=mysql_con.PORT,
database=mysql_con.DATABASE,
charset=mysql_con.CHARSET)
cursor = conn.cursor()
cursor.execute("INSERT INTO command (`content`,`status`,`posttime`) VALUES (%s,%s,%s)",
(text, 0, datetime.timestamp(datetime.now())))
# 提交记录
conn.commit()
log.normal("往数据库中插入新命令:" + text)
except Exception as e:
log.exp("往数据库中插入新命令:" + text, e)
finally:
if "" != conn:
conn.close()
# 执行完毕后设置命令状态为已完成
@staticmethod
def exec(command_id):
try:
conn = ""
conn = connector.connect(host=mysql_con.HOST,
user=mysql_con.USERNAME,
password=<PASSWORD>,
port=mysql_con.PORT,
database=mysql_con.DATABASE,
charset=mysql_con.CHARSET)
cursor = conn.cursor()
cursor.execute("UPDATE command SET `status`=%s WHERE id=%s",
('1', command_id))
# 提交记录
conn.commit()
log.normal("更新命令状态 命令编号:%d" % command_id)
except Exception as e:
log.exp("更新命令状态 命令编号:%d" % command_id, e)
finally:
if "" != conn:
conn.close()
```
#### File: jjzhang166/Smart-Home/recode.py
```python
import pyaudio
import numpy as np
from datetime import datetime
import wave
import time
import config.recode as config
from lib import log
# 将data中的数据保存到名为filename的WAV文件中
def save_wave_file(file_name, data):
try:
with wave.open(file_name, 'wb') as wf:
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(config.RATE)
wf.writeframes(b''.join(data))
except IOError as e:
log.exp("保存wav文件失败: " + file_name, e)
# 开始录音
def start():
# 开启声音输入
pa = pyaudio.PyAudio()
stream = pa.open(format=pyaudio.paInt16, channels=1, rate=config.RATE, input=True,
frames_per_buffer=config.NUM_BLOCK)
save_count = 0
save_buffer = []
start_recode = 0
log.normal("开始监听麦克风...")
while True:
# 读入NUM_SAMPLES个取样
string_audio_data = stream.read(config.NUM_BLOCK)
# 将读入的数据转换为数组
audio_data = np.fromstring(string_audio_data, dtype=np.short)
# 计算大于LEVEL的取样的个数
large_sample_count = np.sum(audio_data > config.LEVEL)
if large_sample_count < config.COUNT_NUM:
# 未达到记录等级
if save_count > 1:
save_count -= 1
save_buffer.append(string_audio_data)
else:
if 1 == start_recode:
start_recode = 0
save_buffer.append(string_audio_data)
log.normal("录音结束")
else:
save_buffer = [string_audio_data, ]
time.sleep(0.01)
else:
# 达到记录等级
# 将要保存的数据存放到save_buffer中
save_buffer.append(string_audio_data)
if 0 == start_recode:
save_count = config.SAVE_LENGTH
start_recode = 1
log.normal("开始录音")
if 0 == start_recode:
# 将save_buffer中的数据写入WAV文件,WAV文件的文件名是保存的时刻
if len(save_buffer) > 1:
filename = "cache/sound/before_" + datetime.now().strftime("%Y-%m-%d_%H_%M_%S") + ".wav"
save_wave_file(filename, save_buffer)
save_buffer = []
log.normal(filename + " 保存文件")
```
#### File: Smart-Home/script/time.py
```python
import sys
from datetime import datetime
from lib import baidu_voice
# 现在时间
def time_now():
baidu_voice.Voice.tts("现在时间:%s点%s分" % (datetime.now().hour, datetime.now().minute))
# 今天日期
def time_today():
baidu_voice.Voice.tts("今天是:%s月%s号" % (datetime.now().month, datetime.now().day))
if __name__ == '__main__':
param = sys.argv[1]
if "now" == param:
time_now()
if "today" == param:
time_today()
```
#### File: Smart-Home/script/weather.py
```python
import sys
from datetime import datetime
from lib import baidu_voice
from lib import log
import sys
from urllib import request, parse
import json
def get_weather():
url = 'https://api.thinkpage.cn/v3/weather/daily.json?'
data = {
"key": "<KEY>",
"location": "anyang",
"language": "zh-Hans",
"unit": "c",
"start": "0",
"days": "2",
}
url = url + parse.urlencode(data)
req = request.Request(url)
try:
with request.urlopen(req, timeout=5) as req:
content = req.read().decode("utf-8")
data = json.loads(content)
if data and isinstance(data, dict):
return data["results"][0]["daily"]
else:
baidu_voice.Voice.tts("天气获取失败")
except Exception as e:
log.exp("获取天气 %s" % url, e)
baidu_voice.Voice.tts("天气获取失败")
return url
# 明天天气
def weather_tomorrow():
data = get_weather()[1]
if "" == data["precip"]:
data["precip"] = "未知"
# 计算风力等级
if "" != data["wind_speed"]:
if 1 > int(data["wind_speed"]):
data["wind_level"] = 0
if 1 <= int(data["wind_speed"]) <= 5:
data["wind_level"] = 1
if 6 <= int(data["wind_speed"]) <= 11:
data["wind_level"] = 2
if 12 <= int(data["wind_speed"]) <= 19:
data["wind_level"] = 3
if 20 <= int(data["wind_speed"]) <= 28:
data["wind_level"] = 4
if 29 <= int(data["wind_speed"]) <= 38:
data["wind_level"] = 5
if 39 <= int(data["wind_speed"]) <= 49:
data["wind_level"] = 6
if 50 <= int(data["wind_speed"]) <= 61:
data["wind_level"] = 7
if 62 <= int(data["wind_speed"]) <= 74:
data["wind_level"] = 8
if 75 <= int(data["wind_speed"]) <= 88:
data["wind_level"] = 9
if 89 <= int(data["wind_speed"]) <= 102:
data["wind_level"] = 10
baidu_voice.Voice.tts("明天天气:白天%s 晚间%s 最高温度 %s度 最低温度 %s度 降水概率 %s %s风 %s级" %
(data["text_day"], data["text_night"], data["high"], data["low"],
data["precip"], data["wind_direction"], data["wind_level"]))
# 今天天气
def weather_today():
data = get_weather()[0]
if "" == data["precip"]:
data["precip"] = "未知"
# 计算风力等级
if "" != data["wind_speed"]:
if 1 > int(data["wind_speed"]):
data["wind_level"] = 0
if 1 <= int(data["wind_speed"]) <= 5:
data["wind_level"] = 1
if 6 <= int(data["wind_speed"]) <= 11:
data["wind_level"] = 2
if 12 <= int(data["wind_speed"]) <= 19:
data["wind_level"] = 3
if 20 <= int(data["wind_speed"]) <= 28:
data["wind_level"] = 4
if 29 <= int(data["wind_speed"]) <= 38:
data["wind_level"] = 5
if 39 <= int(data["wind_speed"]) <= 49:
data["wind_level"] = 6
if 50 <= int(data["wind_speed"]) <= 61:
data["wind_level"] = 7
if 62 <= int(data["wind_speed"]) <= 74:
data["wind_level"] = 8
if 75 <= int(data["wind_speed"]) <= 88:
data["wind_level"] = 9
if 89 <= int(data["wind_speed"]) <= 102:
data["wind_level"] = 10
baidu_voice.Voice.tts("今天天气:白天%s 晚间%s 最高温度 %s度 最低温度 %s度 降水概率 %s %s风 %s级" %
(data["text_day"], data["text_night"], data["high"], data["low"],
data["precip"], data["wind_direction"], data["wind_level"]))
if __name__ == '__main__':
param = sys.argv[1]
if "today" == param:
weather_today()
if "tomorrow" == param:
weather_tomorrow()
``` |
{
"source": "jjzhang166/telegram-app",
"score": 2
} |
#### File: telegram/tests/__init__.py
```python
from autopilot.testcase import AutopilotTestCase
from autopilot import platform
from ubuntuuitoolkit import emulators as toolkit_emulators
from autopilot.input import Mouse, Touch, Pointer
from telegram import emulators
from os.path import abspath, dirname, join
from autopilot.matchers import Eventually
from testtools.matchers import Equals
import fixtures
import ubuntuuitoolkit
import subprocess
class TelegramAppTestCase(AutopilotTestCase):
def __init__(self, *args):
super(TelegramAppTestCase, self).__init__(*args)
self._main_view = None
@property
def main_view(self):
if self._main_view is None:
self._main_view = self.app.wait_select_single(emulators.MainView)
return self._main_view
def setUp(self, parameter=""):
#self.pointing_device = Pointer(self.input_device_class.create())
super(TelegramAppTestCase, self).setUp()
subprocess.call(['pkill', 'telegram-app'])
#Preconditions: Logged In
if platform.model() == "Desktop":
self.app = self.launch_desktop_application(parameter)
else:
self.app = self.launch_mobile_application(parameter)
self.assertThat(self.main_view.visible, Eventually(Equals(True)))
# self.check_user_has_logged_in()
# if (self.check_user_has_logged_in()):
# self.assertThat(self.main_view.visible, Eventually(Equals(True)))
# else:
# raise RuntimeError("User must be logged in")
def launch_desktop_application(self, parameter):
#Setup the lib path environment variable using absolute path values, required by the app to access the necessary libs
library_path = abspath(join(dirname(__file__), '..', '..', '..', '..', 'build_desktop',))
envValue = library_path + ':$LD_LIBRARY_PATH'
self.useFixture(fixtures.EnvironmentVariable('LD_LIBRARY_PATH',envValue))
#Launch the test application using absolute path values
full_path = abspath(join(dirname(__file__), '..', '..', '..', '..', 'build_desktop','lib','x86_64-linux-gnu','bin','telegram'))
print(full_path + " " + parameter)
return self.launch_test_application(
full_path,
parameter,
app_type='qt',
emulator_base=toolkit_emulators.UbuntuUIToolkitEmulatorBase)
def launch_mobile_application(self, parameter):
return self.launch_click_package(
"com.ubuntu.telegram",
app_uris=['QT_LOAD_TESTABILITY=1'],
emulator_base=toolkit_emulators.UbuntuUIToolkitEmulatorBase)
def check_user_has_logged_in(self):
account_page = self.main_view.account_page
self.assertThat(account_page.visible, Eventually(Equals(True)))
# try:
# self.main_view.get_account_page()
# self.assertThat(self.main_view.account_page.visible, Eventually(Equals(True)))
# return True
# except:
# return False
```
#### File: telegram/tests/test_Offline.py
```python
from telegram.tests import TelegramAppTestCase
from telegram import emulators
# import subprocess
import ubuntuuitoolkit
from autopilot.matchers import Eventually
from testtools.matchers import Equals
from autopilot.display import Display
from autopilot import platform
from testtools import skipUnless
class BaseTelegramTestCase(TelegramAppTestCase):
def setUp(self):
super(BaseTelegramTestCase, self).setUp()
class OfflineTests(BaseTelegramTestCase):
@skipUnless(
platform.model() != "Desktop",
"Offline Test is only available on Device"
)
def test_check_for_offline_message(self):
display = Display.create()
screenWidth = display.get_screen_width()
screenHeight = display.get_screen_height()
#Pull menu from top
self.main_view.pointing_device.drag(screenWidth-202, 0, screenWidth-202, screenHeight)
#Tap Flight Mode switch
self.main_view.pointing_device.move(screenWidth-130,150)
self.main_view.pointing_device.click()
#Close menu
self.main_view.pointing_device.drag(0,screenHeight, 0,0)
account_page = self.main_view.account_page
# if account_page.get_default_header().title in ('Telegram', 'Connecting...'):
# print("WARNING - Please turn ON Airplane Mode")
self.assertThat(account_page.get_default_header().title, Eventually(Equals("Waiting for network...")))
#Pull menu from top
self.main_view.pointing_device.drag(screenWidth-180, 0, screenWidth-180, screenHeight)
#Tap Flight Mode switch
self.main_view.pointing_device.move(screenWidth-130,150)
self.main_view.pointing_device.click()
#Close menu
self.main_view.pointing_device.drag(0,screenHeight, 0,0)
# Check header no longer says offline message
self.assertThat(account_page.get_default_header().title, Eventually(Equals("Telegram")))
```
#### File: autopilot/telegram/utilities.py
```python
from autopilot.input import Keyboard
""" KEYBOARD UTILITIES """
global mainKeyboard
def enter_text_in_text_area(text_area, text):
#Assign keyboard type
mainKeyboard = Keyboard.create()
#Focus TextArea and begin typing
with mainKeyboard.focused_type(text_area) as kb:
kb.type(text)
def enter_text_in_text_area_and_press_enter(text_area, text):
#Assign keyboard type
mainKeyboard = Keyboard.create()
#Focus TextArea and begin typing
with mainKeyboard.focused_type(text_area) as kb:
kb.type(text)
#Simulate 'Enter' key being pressed
mainKeyboard.press_and_release('Enter')
``` |
{
"source": "jjzhang166/tensorflow",
"score": 2
} |
#### File: python/eager/backprop_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.eager import tensor
from tensorflow.python.eager import test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
class BackpropTest(test.TestCase):
def testAggregateGradients(self):
def fn(x):
ind1 = tensor.Tensor(np.array([0, 1]))
ind2 = tensor.Tensor(np.array([2, 3]))
ind3 = tensor.Tensor(np.array([1, 3]))
# A mixture of IndexedSlices and dense tensor to aggregate.
g1 = embedding_ops.embedding_lookup(x, ind1)
g2 = embedding_ops.embedding_lookup(x, ind2)
g3 = embedding_ops.embedding_lookup(x, ind3)
g4 = math_ops.reduce_sum(x * tensor.Tensor(2.0))
return g1 * g2 * g3 * g4
var_np = np.random.rand(4, 2).astype(np.float32)
var = tensor.Tensor(var_np)
grad = backprop.gradients_function(fn, [0])(var)[0]
with context.graph_mode(), self.test_session():
tf_var = array_ops.constant(var_np, dtypes.float32)
tf_ind1 = array_ops.constant([0, 1])
tf_ind2 = array_ops.constant([2, 3])
tf_ind3 = array_ops.constant([1, 3])
tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2)
tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3)
tf_g4 = math_ops.reduce_sum(tf_var * 2.0, reduction_indices=(0, 1))
tf_y = tf_g1 * tf_g2 * tf_g3 * tf_g4
tf_grad = gradients.gradients(tf_y, [tf_var])[0]
tf_dense_grad = math_ops.unsorted_segment_sum(
tf_grad.values, tf_grad.indices, tf_grad.dense_shape[0])
self.assertAllClose(grad.numpy(), tf_dense_grad.eval())
def testImplicitGradWithResourceVariable(self):
x = resource_variable_ops.ResourceVariable(initial_value=tensor.Tensor(1.0),
name='x')
def fn():
tape.watch(x.handle)
b = tensor.Tensor(2.0)
c = math_ops.add(x.value(), b)
return math_ops.add(c, tensor.Tensor(3.0))
grad = backprop.implicit_grad(fn)()[0][1]
self.assertEqual(grad.numpy(), 1.0)
def testGPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
def fn(x):
with context.device('/gpu:0'):
b = tensor.Tensor(2.0)
c = math_ops.add(x.as_gpu_tensor(), b)
# TODO(apassos): remove as_cpu_tensor below by making TensorVSPace aware
# of devices.
return math_ops.add(c, tensor.Tensor(3.0)).as_cpu_tensor()
grad = backprop.gradients_function(fn, [0])(tensor.Tensor(1.0))[0]
self.assertEqual(grad.numpy(), 1.0)
def testCPU(self):
def fn(x):
b = tensor.Tensor(2.0)
c = math_ops.add(x, b)
return math_ops.add(c, tensor.Tensor(3.0))
grad = backprop.gradients_function(fn, [0])(tensor.Tensor(1.0))[0]
self.assertEqual(grad.numpy(), 1.0)
def testTensorCopyGPU2CPU2GPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
def f(a, b):
return a.as_cpu_tensor() + b.as_cpu_tensor()
with context.device('/gpu:0'):
a = tensor.Tensor(1.0)
b = tensor.Tensor(2.0)
grad = backprop.gradients_function(f, [0])(a, b)[0]
self.assertEqual(grad.numpy(), 1.0)
def testEmptyParams(self):
def fn(a, b):
return a * b
x = tensor.Tensor(1.0)
y = tensor.Tensor(2.0)
dx, dy = backprop.gradients_function(fn)(x, y)
self.assertAllEqual(dx.numpy(), y.numpy())
self.assertAllEqual(dy.numpy(), x.numpy())
def testTensorCopyCPU2GPU2CPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
# forward: a (cpu->gpu) -> add (gpu) -> c (gpu->cpu) -> add (cpu) -> e (cpu)
# back: e (cpu) -> add (cpu) -> c (cpu->gpu) -> add (gpu) -> grad (gpu->cpu)
def f(a, b):
with context.device('/gpu:0'):
c = math_ops.add(a.as_gpu_tensor(0), b.as_gpu_tensor(0))
return math_ops.add(c.as_cpu_tensor(), tensor.Tensor(3.0))
with context.device('/cpu:0'):
a = tensor.Tensor(1.0)
b = tensor.Tensor(2.0)
grad = backprop.gradients_function(f, [0])(a, b)[0]
self.assertEqual(grad.numpy(), 1.0)
def testGetAttrType(self):
typ = backprop.op_attr_type('Add', 'T')
self.assertEqual(typ, pywrap_tensorflow.TF_ATTR_TYPE)
def testGetAttrList(self):
typ = backprop.op_attr_type('MaxPool', 'ksize')
self.assertEqual(typ, [pywrap_tensorflow.TF_ATTR_INT])
def testMakeAttrType(self):
self.assertEqual(dtypes.float32,
backprop.make_attr(pywrap_tensorflow.TF_ATTR_TYPE, 1))
def testMakeAttrTypeList(self):
self.assertEqual([dtypes.float32],
backprop.make_attr([pywrap_tensorflow.TF_ATTR_TYPE], [1]))
def testMakeAttrShape(self):
for s in ([], None, [1, 2, 3], [None, None], [1, None, 3]):
expected = tensor_shape.TensorShape(s).as_proto()
actual = backprop.make_attr(pywrap_tensorflow.TF_ATTR_SHAPE, s)
self.assertEqual(
expected,
actual,
msg=('For shape %r, expected %r != %r actual' % (s, expected,
actual)))
def testMakeAttrShapeList(self):
shape_list = [[], None, [1, 2, 3], [None, None], [1, None, 3]]
self.assertEqual(
[tensor_shape.TensorShape(s).as_proto() for s in shape_list],
backprop.make_attr([pywrap_tensorflow.TF_ATTR_SHAPE], shape_list))
if __name__ == '__main__':
test.main()
```
#### File: python/estimator/run_config.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import six
from tensorflow.core.protobuf import config_pb2
_USE_DEFAULT = object()
# A list of the property names in RunConfig that the user is allowed to change.
_DEFAULT_REPLACEABLE_LIST = [
'model_dir',
'tf_random_seed',
'save_summary_steps',
'save_checkpoints_steps',
'save_checkpoints_secs',
'session_config',
'keep_checkpoint_max',
'keep_checkpoint_every_n_hours',
'log_step_count_steps'
]
_SAVE_CKPT_ERR = (
'`save_checkpoints_steps` and `save_checkpoints_secs` cannot be both set.'
)
def _validate_save_ckpt_with_replaced_keys(new_copy, replaced_keys):
"""Validates the save ckpt properties."""
# Ensure one (and only one) of save_steps and save_secs is not None.
# Also, if user sets one save ckpt property, say steps, the other one (secs)
# should be set as None to improve usability.
save_steps = new_copy.save_checkpoints_steps
save_secs = new_copy.save_checkpoints_secs
if ('save_checkpoints_steps' in replaced_keys and
'save_checkpoints_secs' in replaced_keys):
# If user sets both properties explicitly, we need to error out if both
# are set or neither of them are set.
if save_steps is not None and save_secs is not None:
raise ValueError(_SAVE_CKPT_ERR)
elif 'save_checkpoints_steps' in replaced_keys and save_steps is not None:
new_copy._save_checkpoints_secs = None # pylint: disable=protected-access
elif 'save_checkpoints_secs' in replaced_keys and save_secs is not None:
new_copy._save_checkpoints_steps = None # pylint: disable=protected-access
def _validate_properties(run_config):
"""Validates the properties."""
def _validate(property_name, cond, message):
property_value = getattr(run_config, property_name)
if property_value is not None and not cond(property_value):
raise ValueError(message)
_validate('model_dir', lambda dir: dir,
message='model_dir should be non-empty')
_validate('save_summary_steps', lambda steps: steps >= 0,
message='save_summary_steps should be >= 0')
_validate('save_checkpoints_steps', lambda steps: steps >= 0,
message='save_checkpoints_steps should be >= 0')
_validate('save_checkpoints_secs', lambda secs: secs >= 0,
message='save_checkpoints_secs should be >= 0')
_validate('session_config',
lambda sc: isinstance(sc, config_pb2.ConfigProto),
message='session_config must be instance of ConfigProto')
_validate('keep_checkpoint_max', lambda keep_max: keep_max >= 0,
message='keep_checkpoint_max should be >= 0')
_validate('keep_checkpoint_every_n_hours', lambda keep_hours: keep_hours > 0,
message='keep_checkpoint_every_n_hours should be > 0')
_validate('log_step_count_steps', lambda num_steps: num_steps > 0,
message='log_step_count_steps should be > 0')
_validate('tf_random_seed', lambda seed: isinstance(seed, six.integer_types),
message='tf_random_seed must be integer.')
class TaskType(object):
MASTER = 'master'
PS = 'ps'
WORKER = 'worker'
class RunConfig(object):
"""This class specifies the configurations for an `Estimator` run."""
def __init__(self,
model_dir=None,
tf_random_seed=1,
save_summary_steps=100,
save_checkpoints_steps=_USE_DEFAULT,
save_checkpoints_secs=_USE_DEFAULT,
session_config=None,
keep_checkpoint_max=5,
keep_checkpoint_every_n_hours=10000,
log_step_count_steps=100):
"""Constructs a RunConfig.
Args:
model_dir: directory where model parameters, graph, etc are saved. If
`None`, will use a default value set by the Estimator.
tf_random_seed: Random seed for TensorFlow initializers.
Setting this value allows consistency between reruns.
save_summary_steps: Save summaries every this many steps.
save_checkpoints_steps: Save checkpoints every this many steps. Can not be
specified with `save_checkpoints_secs`.
save_checkpoints_secs: Save checkpoints every this many seconds. Can not
be specified with `save_checkpoints_steps`. Defaults to 600 seconds.
If both `save_checkpoints_steps` and `save_checkpoints_secs` are None,
then checkpoints are disabled.
session_config: a ConfigProto used to set session parameters, or None.
keep_checkpoint_max: The maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. Defaults to 5 (that is, the 5 most recent
checkpoint files are kept.)
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
to be saved. The default value of 10,000 hours effectively disables
the feature.
log_step_count_steps: The frequency, in number of global steps, that the
global step/sec will be logged during training.
Raises:
ValueError: If both `save_checkpoints_steps` and `save_checkpoints_secs`
are set.
"""
if (save_checkpoints_steps == _USE_DEFAULT and
save_checkpoints_secs == _USE_DEFAULT):
save_checkpoints_steps = None
save_checkpoints_secs = 600
elif save_checkpoints_secs == _USE_DEFAULT:
save_checkpoints_secs = None
elif save_checkpoints_steps == _USE_DEFAULT:
save_checkpoints_steps = None
elif (save_checkpoints_steps is not None and
save_checkpoints_secs is not None):
raise ValueError(_SAVE_CKPT_ERR)
RunConfig._replace(
self,
allowed_properties_list=_DEFAULT_REPLACEABLE_LIST,
model_dir=model_dir,
tf_random_seed=tf_random_seed,
save_summary_steps=save_summary_steps,
save_checkpoints_steps=save_checkpoints_steps,
save_checkpoints_secs=save_checkpoints_secs,
session_config=session_config,
keep_checkpoint_max=keep_checkpoint_max,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
log_step_count_steps=log_step_count_steps)
@property
def cluster_spec(self):
return None
@property
def evaluation_master(self):
return ''
@property
def is_chief(self):
return True
@property
def master(self):
return ''
@property
def num_ps_replicas(self):
return 0
@property
def num_worker_replicas(self):
return 1
@property
def task_id(self):
return 0
@property
def task_type(self):
return TaskType.WORKER
@property
def tf_random_seed(self):
return self._tf_random_seed
@property
def save_summary_steps(self):
return self._save_summary_steps
@property
def save_checkpoints_secs(self):
return self._save_checkpoints_secs
@property
def session_config(self):
return self._session_config
@property
def save_checkpoints_steps(self):
return self._save_checkpoints_steps
@property
def keep_checkpoint_max(self):
return self._keep_checkpoint_max
@property
def keep_checkpoint_every_n_hours(self):
return self._keep_checkpoint_every_n_hours
@property
def log_step_count_steps(self):
return self._log_step_count_steps
@property
def model_dir(self):
return self._model_dir
def replace(self, **kwargs):
"""Returns a new instance of `RunConfig` replacing specified properties.
Only the properties in the following list are allowed to be replaced:
- `model_dir`.
- `tf_random_seed`,
- `save_summary_steps`,
- `save_checkpoints_steps`,
- `save_checkpoints_secs`,
- `session_config`,
- `keep_checkpoint_max`,
- `keep_checkpoint_every_n_hours`,
- `log_step_count_steps`,
In addition, either `save_checkpoints_steps` or `save_checkpoints_secs`
can be set (should not be both).
Args:
**kwargs: keyword named properties with new values.
Raises:
ValueError: If any property name in `kwargs` does not exist or is not
allowed to be replaced, or both `save_checkpoints_steps` and
`save_checkpoints_secs` are set.
Returns:
a new instance of `RunConfig`.
"""
return RunConfig._replace(
copy.deepcopy(self),
allowed_properties_list=_DEFAULT_REPLACEABLE_LIST,
**kwargs)
@staticmethod
def _replace(config, allowed_properties_list=None, **kwargs):
"""See `replace`.
N.B.: This implementation assumes that for key named "foo", the underlying
property the RunConfig holds is "_foo" (with one leading underscore).
Args:
config: The RunConfig to replace the values of.
allowed_properties_list: The property name list allowed to be replaced.
**kwargs: keyword named properties with new values.
Raises:
ValueError: If any property name in `kwargs` does not exist or is not
allowed to be replaced, or both `save_checkpoints_steps` and
`save_checkpoints_secs` are set.
Returns:
a new instance of `RunConfig`.
"""
allowed_properties_list = allowed_properties_list or []
for key, new_value in six.iteritems(kwargs):
if key in allowed_properties_list:
setattr(config, '_' + key, new_value)
continue
raise ValueError(
'Replacing {} is not supported. Allowed properties are {}.'.format(
key, allowed_properties_list))
_validate_save_ckpt_with_replaced_keys(config, kwargs.keys())
_validate_properties(config)
return config
``` |
{
"source": "jjzhang166/Vulkan",
"score": 3
} |
#### File: jjzhang166/Vulkan/deploy_all_android.py
```python
import glob
import os
import shutil
import subprocess
####################
#
# Functions
#
####################
def copy(src, dst):
# Copy exact file or given by wildcard
for filename in glob.glob(src):
print("Copying asset '%s' to '%s'" % (os.path.basename(filename), dst))
shutil.copyfile(filename, dst)
####################
#
# Main
#
####################
print("Deploying all Android projects")
allExamples = os.listdir()
for example in allExamples:
if example.startswith("VKTS_Example"):
print("Processing '%s'" % (example))
copy(example + "/Android/bin/NativeActivity-debug.apk", example + "-debug.apk")
``` |
{
"source": "jjzhang166/Vulkan-LoaderAndValidationLayers",
"score": 2
} |
#### File: jjzhang166/Vulkan-LoaderAndValidationLayers/vk-generate.py
```python
import sys
import vulkan
def generate_get_proc_addr_check(name):
return " if (!%s || %s[0] != 'v' || %s[1] != 'k')\n" \
" return NULL;" % ((name,) * 3)
class Subcommand(object):
def __init__(self, argv):
self.argv = argv
self.headers = vulkan.headers
self.protos = vulkan.protos
self.outfile = None
def run(self):
if self.outfile:
with open(self.outfile, "w") as outfile:
outfile.write(self.generate())
else:
print(self.generate())
def generate(self):
copyright = self.generate_copyright()
header = self.generate_header()
body = self.generate_body()
footer = self.generate_footer()
contents = []
if copyright:
contents.append(copyright)
if header:
contents.append(header)
if body:
contents.append(body)
if footer:
contents.append(footer)
return "\n\n".join(contents)
def generate_copyright(self):
return """/* THIS FILE IS GENERATED. DO NOT EDIT. */
/*
* Copyright (c) 2015-2016 The Khronos Group Inc.
* Copyright (c) 2015-2016 Valve Corporation
* Copyright (c) 2015-2016 LunarG, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: <NAME> <<EMAIL>>
*/"""
def generate_header(self):
return "\n".join(["#include <" + h + ">" for h in self.headers])
def generate_body(self):
pass
def generate_footer(self):
pass
class DispatchTableOpsSubcommand(Subcommand):
def __init__(self, argv):
self.argv = argv
self.headers = vulkan.headers_all
self.protos = vulkan.protos_all
self.outfile = None
def run(self):
if len(self.argv) < 1:
print("DispatchTableOpsSubcommand: <prefix> unspecified")
return
self.prefix = self.argv[0]
if len(self.argv) > 2:
print("DispatchTableOpsSubcommand: <prefix> [outfile]")
return
if len(self.argv) == 2:
self.outfile = self.argv[1]
super(DispatchTableOpsSubcommand, self).run()
def generate_header(self):
return "\n".join(["#include <vulkan/vulkan.h>",
"#include <vulkan/vk_layer.h>",
"#include <string.h>"])
def _generate_init_dispatch(self, type):
stmts = []
func = []
if type == "device":
# GPA has to be first one and uses wrapped object
stmts.append(" memset(table, 0, sizeof(*table));")
stmts.append(" // Core device function pointers")
stmts.append(" table->GetDeviceProcAddr = (PFN_vkGetDeviceProcAddr) gpa(device, \"vkGetDeviceProcAddr\");")
for proto in self.protos:
if proto.name == "CreateInstance" or proto.name == "EnumerateInstanceExtensionProperties" or \
proto.name == "EnumerateInstanceLayerProperties" or proto.params[0].ty == "VkInstance" or \
proto.params[0].ty == "VkPhysicalDevice" or proto.name == "GetDeviceProcAddr":
continue
if proto.name == "GetMemoryWin32HandleNV":
stmts.append("#ifdef VK_USE_PLATFORM_WIN32_KHR")
stmts.append(" table->%s = (PFN_vk%s) gpa(device, \"vk%s\");" %
(proto.name, proto.name, proto.name))
stmts.append("#endif // VK_USE_PLATFORM_WIN32_KHR")
else:
stmts.append(" table->%s = (PFN_vk%s) gpa(device, \"vk%s\");" %
(proto.name, proto.name, proto.name))
func.append("static inline void %s_init_device_dispatch_table(VkDevice device,"
% self.prefix)
func.append("%s VkLayerDispatchTable *table,"
% (" " * len(self.prefix)))
func.append("%s PFN_vkGetDeviceProcAddr gpa)"
% (" " * len(self.prefix)))
else:
stmts.append(" memset(table, 0, sizeof(*table));")
stmts.append(" // Core instance function pointers")
stmts.append(" table->GetInstanceProcAddr = (PFN_vkGetInstanceProcAddr) gpa(instance, \"vkGetInstanceProcAddr\");")
KHR_printed = False
EXT_printed = False
Win32_printed = False
XLIB_printed = False
XCB_printed = False
MIR_printed = False
WAY_printed = False
Android_printed = False
for proto in self.protos:
if proto.params[0].ty != "VkInstance" and proto.params[0].ty != "VkPhysicalDevice" or \
proto.name == "CreateDevice" or proto.name == "GetInstanceProcAddr":
continue
if Win32_printed and 'Win32' not in proto.name:
stmts.append("#endif // VK_USE_PLATFORM_WIN32_KHR")
Win32_printed = False
if XLIB_printed and 'Xlib' not in proto.name:
stmts.append("#endif // VK_USE_PLATFORM_XLIB_KHR")
XLIB_printed = False
if XCB_printed and 'Xcb' not in proto.name:
stmts.append("#endif // VK_USE_PLATFORM_XCB_KHR")
XCB_printed = False
if MIR_printed and 'Mir' not in proto.name:
stmts.append("#endif // VK_USE_PLATFORM_MIR_KHR")
MIR_printed = False
if WAY_printed and 'Wayland' not in proto.name:
stmts.append("#endif // VK_USE_PLATFORM_WAYLAND_KHR")
WAY_printed = False
if Android_printed and 'Android' not in proto.name:
stmts.append("#endif // VK_USE_PLATFORM_ANDROID_KHR")
Android_printed = False
if 'KHR' in proto.name and 'Win32' in proto.name:
if not Win32_printed:
stmts.append("#ifdef VK_USE_PLATFORM_WIN32_KHR")
Win32_printed = True
if 'KHR' in proto.name and 'Xlib' in proto.name:
if not XLIB_printed:
stmts.append("#ifdef VK_USE_PLATFORM_XLIB_KHR")
XLIB_printed = True
if 'KHR' in proto.name and 'Xcb' in proto.name:
if not XCB_printed:
stmts.append("#ifdef VK_USE_PLATFORM_XCB_KHR")
XCB_printed = True
if 'KHR' in proto.name and 'Mir' in proto.name:
if not MIR_printed:
stmts.append("#ifdef VK_USE_PLATFORM_MIR_KHR")
MIR_printed = True
if 'KHR' in proto.name and 'Wayland' in proto.name:
if not WAY_printed:
stmts.append("#ifdef VK_USE_PLATFORM_WAYLAND_KHR")
WAY_printed = True
if 'KHR' in proto.name and 'Android' in proto.name:
if not Android_printed:
stmts.append("#ifdef VK_USE_PLATFORM_ANDROID_KHR")
Android_printed = True
if 'KHR' in proto.name and not KHR_printed:
stmts.append(" // KHR instance extension function pointers")
KHR_printed = True
if 'EXT' in proto.name and not EXT_printed:
stmts.append(" // EXT instance extension function pointers")
EXT_printed = True
stmts.append(" table->%s = (PFN_vk%s) gpa(instance, \"vk%s\");" %
(proto.name, proto.name, proto.name))
func.append("static inline void %s_init_instance_dispatch_table(" % self.prefix)
func.append("%s VkInstance instance," % (" " * len(self.prefix)))
func.append("%s VkLayerInstanceDispatchTable *table," % (" " * len(self.prefix)))
func.append("%s PFN_vkGetInstanceProcAddr gpa)" % (" " * len(self.prefix)))
func.append("{")
func.append("%s" % "\n".join(stmts))
func.append("}")
return "\n".join(func)
def generate_body(self):
body = [self._generate_init_dispatch("device"),
self._generate_init_dispatch("instance")]
return "\n\n".join(body)
class WinDefFileSubcommand(Subcommand):
def run(self):
library_exports = {
"all": [],
"icd": [
"vk_icdGetInstanceProcAddr",
],
"layer": [
"vkGetInstanceProcAddr",
"vkGetDeviceProcAddr",
"vkEnumerateInstanceLayerProperties",
"vkEnumerateInstanceExtensionProperties"
],
"layer_multi": [
"multi2GetInstanceProcAddr",
"multi1GetDeviceProcAddr"
]
}
if len(self.argv) < 2 or len(self.argv) > 3 or self.argv[1] not in library_exports:
print("WinDefFileSubcommand: <library-name> {%s} [outfile]" %
"|".join(library_exports.keys()))
return
self.library = self.argv[0]
if self.library == "VkLayer_multi":
self.exports = library_exports["layer_multi"]
else:
self.exports = library_exports[self.argv[1]]
if len(self.argv) == 3:
self.outfile = self.argv[2]
super(WinDefFileSubcommand, self).run()
def generate_copyright(self):
return """; THIS FILE IS GENERATED. DO NOT EDIT.
;;;; Begin Copyright Notice ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Vulkan
;
; Copyright (c) 2015-2016 The Khronos Group Inc.
; Copyright (c) 2015-2016 Valve Corporation
; Copyright (c) 2015-2016 LunarG, Inc.
;
; Licensed under the Apache License, Version 2.0 (the "License");
; you may not use this file except in compliance with the License.
; You may obtain a copy of the License at
;
; http://www.apache.org/licenses/LICENSE-2.0
;
; Unless required by applicable law or agreed to in writing, software
; distributed under the License is distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; See the License for the specific language governing permissions and
; limitations under the License.
;
; Author: <NAME> <<EMAIL>>
;;;; End Copyright Notice ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;"""
def generate_header(self):
return "; The following is required on Windows, for exporting symbols from the DLL"
def generate_body(self):
body = []
body.append("LIBRARY " + self.library)
body.append("EXPORTS")
for proto in self.exports:
if self.library != "VkLayerSwapchain" or proto != "vkEnumerateInstanceExtensionProperties" and proto != "vkEnumerateInstanceLayerProperties":
body.append( proto)
return "\n".join(body)
def main():
wsi = {
"Win32",
"Android",
"Xcb",
"Xlib",
"Wayland",
"Mir",
"Display",
"AllPlatforms"
}
subcommands = {
"dispatch-table-ops": DispatchTableOpsSubcommand,
"win-def-file": WinDefFileSubcommand,
}
if len(sys.argv) < 3 or sys.argv[1] not in wsi or sys.argv[2] not in subcommands:
print("Usage: %s <wsi> <subcommand> [options]" % sys.argv[0])
print
print("Available sucommands are: %s" % " ".join(subcommands))
exit(1)
subcmd = subcommands[sys.argv[2]](sys.argv[3:])
subcmd.run()
if __name__ == "__main__":
main()
``` |
{
"source": "jjzhang166/VulkanSamples",
"score": 2
} |
#### File: VulkanSamples/layers/spec.py
```python
import sys
import xml.etree.ElementTree as etree
import urllib2
#############################
# spec.py script
#
# Overview - this script is intended to generate validation error codes and message strings from the xhtml version of
# the specification. In addition to generating the header file, it provides a number of corrollary services to aid in
# generating/updating the header.
#
# Ideal flow - Not there currently, but the ideal flow for this script would be that you run the script, it pulls the
# latest spec, compares it to the current set of generated error codes, and makes any updates as needed
#
# Current flow - the current flow acheives all of the ideal flow goals, but with more steps than are desired
# 1. Get the spec - right now spec has to be manually generated or pulled from the web
# 2. Generate header from spec - This is done in a single command line
# 3. Generate database file from spec - Can be done along with step #2 above, the database file contains a list of
# all error enums and message strings, along with some other info on if those errors are implemented/tested
# 4. Update header using a given database file as the root and a new spec file as goal - This makes sure that existing
# errors keep the same enum identifier while also making sure that new errors get a unique_id that continues on
# from the end of the previous highest unique_id.
#
# TODO:
# 1. Improve string matching to add more automation for figuring out which messages are changed vs. completely new
#
#############################
spec_filename = "vkspec.html" # can override w/ '-spec #' option
out_filename = "vk_validation_error_messages.h" # can override w/ '-out #' option
db_filename = "vk_validation_error_database.txt" # can override w/ '-gendb #' option
gen_db = False # set to True when '-gendb #' option provided
spec_compare = False # set to True with '-compare <db_filename>' option
# This is the root spec link that is used in error messages to point users to spec sections
#old_spec_url = "https://www.khronos.org/registry/vulkan/specs/1.0/xhtml/vkspec.html"
spec_url = "https://www.khronos.org/registry/vulkan/specs/1.0-extensions/xhtml/vkspec.html"
# After the custom validation error message, this is the prefix for the standard message that includes the
# spec valid usage language as well as the link to nearest section of spec to that language
error_msg_prefix = "For more information refer to Vulkan Spec Section "
ns = {'ns': 'http://www.w3.org/1999/xhtml'}
validation_error_enum_name = "VALIDATION_ERROR_"
# Dict of new enum values that should be forced to remap to old handles, explicitly set by -remap option
remap_dict = {}
def printHelp():
print "Usage: python spec.py [-spec <specfile.html>] [-out <headerfile.h>] [-gendb <databasefile.txt>] [-compare <databasefile.txt>] [-update] [-remap <new_id-old_id,count>] [-help]"
print "\n Default script behavior is to parse the specfile and generate a header of unique error enums and corresponding error messages based on the specfile.\n"
print " Default specfile is from online at %s" % (spec_url)
print " Default headerfile is %s" % (out_filename)
print " Default databasefile is %s" % (db_filename)
print "\nIf '-gendb' option is specified then a database file is generated to default file or <databasefile.txt> if supplied. The database file stores"
print " the list of enums and their error messages."
print "\nIf '-compare' option is specified then the given database file will be read in as the baseline for generating the new specfile"
print "\nIf '-update' option is specified this triggers the master flow to automate updating header and database files using default db file as baseline"
print " and online spec file as the latest. The default header and database files will be updated in-place for review and commit to the git repo."
print "\nIf '-remap' option is specified it supplies forced remapping from new enum ids to old enum ids. This should only be specified along with -update"
print " option. Starting at newid and remapping to oldid, count ids will be remapped. Default count is '1' and use ':' to specify multiple remappings."
class Specification:
def __init__(self):
self.tree = None
self.val_error_dict = {} # string for enum is key that references 'error_msg' and 'api'
self.error_db_dict = {} # dict of previous error values read in from database file
self.delimiter = '~^~' # delimiter for db file
self.implicit_count = 0
self.copyright = """/* THIS FILE IS GENERATED. DO NOT EDIT. */
/*
* Vulkan
*
* Copyright (c) 2016 Google Inc.
* Copyright (c) 2016 LunarG, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: <NAME> <<EMAIL>>
*/"""
def _checkInternetSpec(self):
"""Verify that we can access the spec online"""
try:
online = urllib2.urlopen(spec_url,timeout=1)
return True
except urllib2.URLError as err:
return False
return False
def loadFile(self, online=True, spec_file=spec_filename):
"""Load an API registry XML file into a Registry object and parse it"""
# Check if spec URL is available
if (online and self._checkInternetSpec()):
print "Using spec from online at %s" % (spec_url)
self.tree = etree.parse(urllib2.urlopen(spec_url))
else:
print "Using local spec %s" % (spec_file)
self.tree = etree.parse(spec_file)
#self.tree.write("tree_output.xhtml")
#self.tree = etree.parse("tree_output.xhtml")
self.parseTree()
def updateDict(self, updated_dict):
"""Assign internal dict to use updated_dict"""
self.val_error_dict = updated_dict
def parseTree(self):
"""Parse the registry Element, once created"""
print "Parsing spec file..."
unique_enum_id = 0
self.root = self.tree.getroot()
#print "ROOT: %s" % self.root
prev_heading = '' # Last seen section heading or sub-heading
prev_link = '' # Last seen link id within the spec
api_function = '' # API call that a check appears under
error_strings = set() # Flag any exact duplicate error strings and skip them
implicit_count = 0
for tag in self.root.iter(): # iterate down tree
# Grab most recent section heading and link
if tag.tag in ['h2', 'h3', 'h4']:
#if tag.get('class') != 'title':
# continue
print "Found heading %s" % (tag.tag)
prev_heading = "".join(tag.itertext())
# Insert a space between heading number & title
sh_list = prev_heading.rsplit('.', 1)
prev_heading = '. '.join(sh_list)
prev_link = tag.get('id')
print "Set prev_heading %s to have link of %s" % (prev_heading.encode("ascii", "ignore"), prev_link.encode("ascii", "ignore"))
elif tag.tag == 'a': # grab any intermediate links
if tag.get('id') != None:
prev_link = tag.get('id')
#print "Updated prev link to %s" % (prev_link)
elif tag.tag == 'div' and tag.get('class') == 'listingblock':
# Check and see if this is API function
code_text = "".join(tag.itertext()).replace('\n', '')
code_text_list = code_text.split()
if len(code_text_list) > 1 and code_text_list[1].startswith('vk'):
api_function = code_text_list[1].strip('(')
print "Found API function: %s" % (api_function)
#elif tag.tag == '{http://www.w3.org/1999/xhtml}div' and tag.get('class') == 'sidebar':
elif tag.tag == 'div' and tag.get('class') == 'content':
# parse down sidebar to check for valid usage cases
valid_usage = False
implicit = False
for elem in tag.iter():
if elem.tag == 'div' and None != elem.text and 'Valid Usage' in elem.text:
valid_usage = True
if '(Implicit)' in elem.text:
implicit = True
else:
implicit = False
elif valid_usage and elem.tag == 'li': # grab actual valid usage requirements
error_msg_str = "%s '%s' which states '%s' (%s#%s)" % (error_msg_prefix, prev_heading, "".join(elem.itertext()).replace('\n', ''), spec_url, prev_link)
# Some txt has multiple spaces so split on whitespace and join w/ single space
error_msg_str = " ".join(error_msg_str.split())
if error_msg_str in error_strings:
print "WARNING: SKIPPING adding repeat entry for string. Please review spec and file issue as appropriate. Repeat string is: %s" % (error_msg_str)
else:
error_strings.add(error_msg_str)
enum_str = "%s%05d" % (validation_error_enum_name, unique_enum_id)
# TODO : '\' chars in spec error messages are most likely bad spec txt that needs to be updated
self.val_error_dict[enum_str] = {}
self.val_error_dict[enum_str]['error_msg'] = error_msg_str.encode("ascii", "ignore").replace("\\", "/")
self.val_error_dict[enum_str]['api'] = api_function
self.val_error_dict[enum_str]['implicit'] = False
if implicit:
self.val_error_dict[enum_str]['implicit'] = True
self.implicit_count = self.implicit_count + 1
unique_enum_id = unique_enum_id + 1
#print "Validation Error Dict has a total of %d unique errors and contents are:\n%s" % (unique_enum_id, self.val_error_dict)
def genHeader(self, header_file):
"""Generate a header file based on the contents of a parsed spec"""
print "Generating header %s..." % (header_file)
file_contents = []
file_contents.append(self.copyright)
file_contents.append('\n#pragma once')
file_contents.append('\n// Disable auto-formatting for generated file')
file_contents.append('// clang-format off')
file_contents.append('\n#include <unordered_map>')
file_contents.append('\n// enum values for unique validation error codes')
file_contents.append('// Corresponding validation error message for each enum is given in the mapping table below')
file_contents.append('// When a given error occurs, these enum values should be passed to the as the messageCode')
file_contents.append('// parameter to the PFN_vkDebugReportCallbackEXT function')
enum_decl = ['enum UNIQUE_VALIDATION_ERROR_CODE {\n VALIDATION_ERROR_UNDEFINED = -1,']
error_string_map = ['static std::unordered_map<int, char const *const> validation_error_map{']
enum_value = 0
for enum in sorted(self.val_error_dict):
#print "Header enum is %s" % (enum)
enum_value = int(enum.split('_')[-1])
enum_decl.append(' %s = %d,' % (enum, enum_value))
error_string_map.append(' {%s, "%s"},' % (enum, self.val_error_dict[enum]['error_msg']))
enum_decl.append(' %sMAX_ENUM = %d,' % (validation_error_enum_name, enum_value + 1))
enum_decl.append('};')
error_string_map.append('};\n')
file_contents.extend(enum_decl)
file_contents.append('\n// Mapping from unique validation error enum to the corresponding error message')
file_contents.append('// The error message should be appended to the end of a custom error message that is passed')
file_contents.append('// as the pMessage parameter to the PFN_vkDebugReportCallbackEXT function')
file_contents.extend(error_string_map)
#print "File contents: %s" % (file_contents)
with open(header_file, "w") as outfile:
outfile.write("\n".join(file_contents))
def analyze(self):
"""Print out some stats on the valid usage dict"""
# Create dict for # of occurences of identical strings
str_count_dict = {}
unique_id_count = 0
for enum in self.val_error_dict:
err_str = self.val_error_dict[enum]['error_msg']
if err_str in str_count_dict:
print "Found repeat error string"
str_count_dict[err_str] = str_count_dict[err_str] + 1
else:
str_count_dict[err_str] = 1
unique_id_count = unique_id_count + 1
print "Processed %d unique_ids" % (unique_id_count)
repeat_string = 0
for es in str_count_dict:
if str_count_dict[es] > 1:
repeat_string = repeat_string + 1
print "String '%s' repeated %d times" % (es, repeat_string)
print "Found %d repeat strings" % (repeat_string)
print "Found %d implicit checks" % (self.implicit_count)
def genDB(self, db_file):
"""Generate a database of check_enum, check_coded?, testname, error_string"""
db_lines = []
# Write header for database file
db_lines.append("# This is a database file with validation error check information")
db_lines.append("# Comments are denoted with '#' char")
db_lines.append("# The format of the lines is:")
db_lines.append("# <error_enum>%s<check_implemented>%s<testname>%s<api>%s<errormsg>%s<note>" % (self.delimiter, self.delimiter, self.delimiter, self.delimiter, self.delimiter))
db_lines.append("# error_enum: Unique error enum for this check of format %s<uniqueid>" % validation_error_enum_name)
db_lines.append("# check_implemented: 'Y' if check has been implemented in layers, 'U' for unknown, or 'N' for not implemented")
db_lines.append("# testname: Name of validation test for this check, 'Unknown' for unknown, or 'None' if not implmented")
db_lines.append("# api: Vulkan API function that this check is related to")
db_lines.append("# errormsg: The unique error message for this check that includes spec language and link")
db_lines.append("# note: Free txt field with any custom notes related to the check in question")
for enum in sorted(self.val_error_dict):
# Default to unknown if check or test are implemented, then update below if appropriate
implemented = 'U'
testname = 'Unknown'
note = ''
implicit = self.val_error_dict[enum]['implicit']
# If we have an existing db entry for this enum, use its implemented/testname values
if enum in self.error_db_dict:
implemented = self.error_db_dict[enum]['check_implemented']
testname = self.error_db_dict[enum]['testname']
note = self.error_db_dict[enum]['note']
if implicit and 'implicit' not in note: # add implicit note
if '' != note:
note = "implicit, %s" % (note)
else:
note = "implicit"
#print "delimiter: %s, id: %s, str: %s" % (self.delimiter, enum, self.val_error_dict[enum])
# No existing entry so default to N for implemented and None for testname
db_lines.append("%s%s%s%s%s%s%s%s%s%s%s" % (enum, self.delimiter, implemented, self.delimiter, testname, self.delimiter, self.val_error_dict[enum]['api'], self.delimiter, self.val_error_dict[enum]['error_msg'], self.delimiter, note))
db_lines.append("\n") # newline at end of file
print "Generating database file %s" % (db_file)
with open(db_file, "w") as outfile:
outfile.write("\n".join(db_lines))
def readDB(self, db_file):
"""Read a db file into a dict, format of each line is <enum><implemented Y|N?><testname><errormsg>"""
db_dict = {} # This is a simple db of just enum->errormsg, the same as is created from spec
max_id = 0
with open(db_file, "r") as infile:
for line in infile:
line = line.strip()
if line.startswith('#') or '' == line:
continue
db_line = line.split(self.delimiter)
if len(db_line) != 6:
print "ERROR: Bad database line doesn't have 6 elements: %s" % (line)
error_enum = db_line[0]
implemented = db_line[1]
testname = db_line[2]
api = db_line[3]
error_str = db_line[4]
note = db_line[5]
db_dict[error_enum] = error_str
# Also read complete database contents into our class var for later use
self.error_db_dict[error_enum] = {}
self.error_db_dict[error_enum]['check_implemented'] = implemented
self.error_db_dict[error_enum]['testname'] = testname
self.error_db_dict[error_enum]['api'] = api
self.error_db_dict[error_enum]['error_string'] = error_str
self.error_db_dict[error_enum]['note'] = note
unique_id = int(db_line[0].split('_')[-1])
if unique_id > max_id:
max_id = unique_id
return (db_dict, max_id)
# Compare unique ids from original database to data generated from updated spec
# 1. If a new id and error code exactly match original, great
# 2. If new id is not in original, but exact error code is, need to use original error code
# 3. If new id and new error are not in original, make sure new id picks up from end of original list
# 4. If new id in original, but error strings don't match then:
# 4a. If error string has exact match in original, update new to use original
# 4b. If error string not in original, may be updated error message, manually address
def compareDB(self, orig_error_msg_dict, max_id):
"""Compare orig database dict to new dict, report out findings, and return potential new dict for parsed spec"""
# First create reverse dicts of err_strings to IDs
next_id = max_id + 1
orig_err_to_id_dict = {}
# Create an updated dict in-place that will be assigned to self.val_error_dict when done
updated_val_error_dict = {}
for enum in orig_error_msg_dict:
orig_err_to_id_dict[orig_error_msg_dict[enum]] = enum
new_err_to_id_dict = {}
for enum in self.val_error_dict:
new_err_to_id_dict[self.val_error_dict[enum]['error_msg']] = enum
ids_parsed = 0
# Values to be used for the update dict
update_enum = ''
update_msg = ''
update_api = ''
# Now parse through new dict and figure out what to do with non-matching things
for enum in sorted(self.val_error_dict):
ids_parsed = ids_parsed + 1
enum_list = enum.split('_') # grab sections of enum for use below
# Default update values to be the same
update_enum = enum
update_msg = self.val_error_dict[enum]['error_msg']
update_api = self.val_error_dict[enum]['api']
implicit = self.val_error_dict[enum]['implicit']
# Any user-forced remap takes precendence
if enum_list[-1] in remap_dict:
enum_list[-1] = remap_dict[enum_list[-1]]
new_enum = "_".join(enum_list)
print "NOTE: Using user-supplied remap to force %s to be %s" % (enum, new_enum)
update_enum = new_enum
elif enum in orig_error_msg_dict:
if self.val_error_dict[enum]['error_msg'] == orig_error_msg_dict[enum]:
print "Exact match for enum %s" % (enum)
# Nothing to see here
if enum in updated_val_error_dict:
print "ERROR: About to overwrite entry for %s" % (enum)
elif self.val_error_dict[enum]['error_msg'] in orig_err_to_id_dict:
# Same value w/ different error id, need to anchor to original id
print "Need to switch new id %s to original id %s" % (enum, orig_err_to_id_dict[self.val_error_dict[enum]['error_msg']])
# Update id at end of new enum to be same id from original enum
enum_list[-1] = orig_err_to_id_dict[self.val_error_dict[enum]['error_msg']].split('_')[-1]
new_enum = "_".join(enum_list)
if new_enum in updated_val_error_dict:
print "ERROR: About to overwrite entry for %s" % (new_enum)
update_enum = new_enum
else:
# No error match:
# First check if only link has changed, in which case keep ID but update message
orig_msg_list = orig_error_msg_dict[enum].split('(', 1)
new_msg_list = self.val_error_dict[enum]['error_msg'].split('(', 1)
if orig_msg_list[0] == new_msg_list[0]: # Msg is same bug link has changed, keep enum & update msg
print "NOTE: Found that only spec link changed for %s so keeping same id w/ new link" % (enum)
# This seems to be a new error so need to pick it up from end of original unique ids & flag for review
else:
enum_list[-1] = "%05d" % (next_id)
new_enum = "_".join(enum_list)
next_id = next_id + 1
print "MANUALLY VERIFY: Updated new enum %s to be unique %s. Make sure new error msg is actually unique and not just changed" % (enum, new_enum)
print " New error string: %s" % (self.val_error_dict[enum]['error_msg'])
if new_enum in updated_val_error_dict:
print "ERROR: About to overwrite entry for %s" % (new_enum)
update_enum = new_enum
else: # new enum is not in orig db
if self.val_error_dict[enum]['error_msg'] in orig_err_to_id_dict:
print "New enum %s not in orig dict, but exact error message matches original unique id %s" % (enum, orig_err_to_id_dict[self.val_error_dict[enum]['error_msg']])
# Update new unique_id to use original
enum_list[-1] = orig_err_to_id_dict[self.val_error_dict[enum]['error_msg']].split('_')[-1]
new_enum = "_".join(enum_list)
if new_enum in updated_val_error_dict:
print "ERROR: About to overwrite entry for %s" % (new_enum)
update_enum = new_enum
else:
enum_list[-1] = "%05d" % (next_id)
new_enum = "_".join(enum_list)
next_id = next_id + 1
print "Completely new id and error code, update new id from %s to unique %s" % (enum, new_enum)
if new_enum in updated_val_error_dict:
print "ERROR: About to overwrite entry for %s" % (new_enum)
update_enum = new_enum
updated_val_error_dict[update_enum] = {}
updated_val_error_dict[update_enum]['error_msg'] = update_msg
updated_val_error_dict[update_enum]['api'] = update_api
updated_val_error_dict[update_enum]['implicit'] = implicit
# Assign parsed dict to be the udpated dict based on db compare
print "In compareDB parsed %d entries" % (ids_parsed)
return updated_val_error_dict
def validateUpdateDict(self, update_dict):
"""Compare original dict vs. update dict and make sure that all of the checks are still there"""
# Currently just make sure that the same # of checks as the original checks are there
#orig_ids = {}
orig_id_count = len(self.val_error_dict)
#update_ids = {}
update_id_count = len(update_dict)
if orig_id_count != update_id_count:
print "Original dict had %d unique_ids, but updated dict has %d!" % (orig_id_count, update_id_count)
return False
print "Original dict and updated dict both have %d unique_ids. Great!" % (orig_id_count)
return True
# TODO : include some more analysis
# User passes in arg of form <new_id1>-<old_id1>[,count1]:<new_id2>-<old_id2>[,count2]:...
# new_id# = the new enum id that was assigned to an error
# old_id# = the previous enum id that was assigned to the same error
# [,count#] = The number of ids to remap starting at new_id#=old_id# and ending at new_id[#+count#-1]=old_id[#+count#-1]
# If not supplied, then ,1 is assumed, which will only update a single id
def updateRemapDict(remap_string):
"""Set up global remap_dict based on user input"""
remap_list = remap_string.split(":")
for rmap in remap_list:
count = 1 # Default count if none supplied
id_count_list = rmap.split(',')
if len(id_count_list) > 1:
count = int(id_count_list[1])
new_old_id_list = id_count_list[0].split('-')
for offset in range(count):
remap_dict["%05d" % (int(new_old_id_list[0]) + offset)] = "%05d" % (int(new_old_id_list[1]) + offset)
for new_id in sorted(remap_dict):
print "Set to remap new id %s to old id %s" % (new_id, remap_dict[new_id])
if __name__ == "__main__":
i = 1
use_online = True # Attempt to grab spec from online by default
update_option = False
while (i < len(sys.argv)):
arg = sys.argv[i]
i = i + 1
if (arg == '-spec'):
spec_filename = sys.argv[i]
# If user specifies local specfile, skip online
use_online = False
i = i + 1
elif (arg == '-out'):
out_filename = sys.argv[i]
i = i + 1
elif (arg == '-gendb'):
gen_db = True
# Set filename if supplied, else use default
if i < len(sys.argv) and not sys.argv[i].startswith('-'):
db_filename = sys.argv[i]
i = i + 1
elif (arg == '-compare'):
db_filename = sys.argv[i]
spec_compare = True
i = i + 1
elif (arg == '-update'):
update_option = True
spec_compare = True
gen_db = True
elif (arg == '-remap'):
updateRemapDict(sys.argv[i])
i = i + 1
elif (arg in ['-help', '-h']):
printHelp()
sys.exit()
if len(remap_dict) > 1 and not update_option:
print "ERROR: '-remap' option can only be used along with '-update' option. Exiting."
sys.exit()
spec = Specification()
spec.loadFile(use_online, spec_filename)
#spec.parseTree()
#spec.genHeader(out_filename)
spec.analyze()
if (spec_compare):
# Read in old spec info from db file
(orig_err_msg_dict, max_id) = spec.readDB(db_filename)
# New spec data should already be read into self.val_error_dict
updated_dict = spec.compareDB(orig_err_msg_dict, max_id)
update_valid = spec.validateUpdateDict(updated_dict)
if update_valid:
spec.updateDict(updated_dict)
else:
sys.exit()
if (gen_db):
spec.genDB(db_filename)
print "Writing out file (-out) to '%s'" % (out_filename)
spec.genHeader(out_filename)
##### Example dataset
# <div class="sidebar">
# <div class="titlepage">
# <div>
# <div>
# <p class="title">
# <strong>Valid Usage</strong> # When we get to this guy, we know we're under interesting sidebar
# </p>
# </div>
# </div>
# </div>
# <div class="itemizedlist">
# <ul class="itemizedlist" style="list-style-type: disc; ">
# <li class="listitem">
# <em class="parameter">
# <code>device</code>
# </em>
# <span class="normative">must</span> be a valid
# <code class="code">VkDevice</code> handle
# </li>
# <li class="listitem">
# <em class="parameter">
# <code>commandPool</code>
# </em>
# <span class="normative">must</span> be a valid
# <code class="code">VkCommandPool</code> handle
# </li>
# <li class="listitem">
# <em class="parameter">
# <code>flags</code>
# </em>
# <span class="normative">must</span> be a valid combination of
# <code class="code">
# <a class="link" href="#VkCommandPoolResetFlagBits">VkCommandPoolResetFlagBits</a>
# </code> values
# </li>
# <li class="listitem">
# <em class="parameter">
# <code>commandPool</code>
# </em>
# <span class="normative">must</span> have been created, allocated, or retrieved from
# <em class="parameter">
# <code>device</code>
# </em>
# </li>
# <li class="listitem">All
# <code class="code">VkCommandBuffer</code>
# objects allocated from
# <em class="parameter">
# <code>commandPool</code>
# </em>
# <span class="normative">must</span> not currently be pending execution
# </li>
# </ul>
# </div>
# </div>
##### Second example dataset
# <div class="sidebar">
# <div class="titlepage">
# <div>
# <div>
# <p class="title">
# <strong>Valid Usage</strong>
# </p>
# </div>
# </div>
# </div>
# <div class="itemizedlist">
# <ul class="itemizedlist" style="list-style-type: disc; ">
# <li class="listitem">The <em class="parameter"><code>queueFamilyIndex</code></em> member of any given element of <em class="parameter"><code>pQueueCreateInfos</code></em> <span class="normative">must</span> be unique within <em class="parameter"><code>pQueueCreateInfos</code></em>
# </li>
# </ul>
# </div>
# </div>
# <div class="sidebar">
# <div class="titlepage">
# <div>
# <div>
# <p class="title">
# <strong>Valid Usage (Implicit)</strong>
# </p>
# </div>
# </div>
# </div>
# <div class="itemizedlist"><ul class="itemizedlist" style="list-style-type: disc; "><li class="listitem">
#<em class="parameter"><code>sType</code></em> <span class="normative">must</span> be <code class="code">VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO</code>
#</li><li class="listitem">
#<em class="parameter"><code>pNext</code></em> <span class="normative">must</span> be <code class="literal">NULL</code>
#</li><li class="listitem">
#<em class="parameter"><code>flags</code></em> <span class="normative">must</span> be <code class="literal">0</code>
#</li><li class="listitem">
#<em class="parameter"><code>pQueueCreateInfos</code></em> <span class="normative">must</span> be a pointer to an array of <em class="parameter"><code>queueCreateInfoCount</code></em> valid <code class="code">VkDeviceQueueCreateInfo</code> structures
#</li>
``` |
{
"source": "jj-zhu/jadagger",
"score": 2
} |
#### File: jj-zhu/jadagger/run_dagger.py
```python
import pickle
import tensorflow as tf
import numpy as np
import tf_util
import gym
import load_policy
def main():
#===========================================================================
# generate expert data
#===========================================================================
# param
expert_policy_file = 'experts/Humanoid-v1.pkl'
envname = 'Humanoid-v1'
render = 1
num_rollouts = 25
max_timesteps = 0
# policy_fn contains expert policy
policy_fn = load_policy.load_policy(expert_policy_file)
with tf.Session():
tf_util.initialize()
import gym
env = gym.make(envname)
max_steps = max_timesteps or env.spec.timestep_limit
returns = []
observations = []
actions = []
for i in range(num_rollouts):
print('iter', i)
obs = env.reset()
done = False
totalr = 0.
steps = 0
while not done:
action = policy_fn(obs[None, :])
# action using expert policy policy_fn
observations.append(obs)
actions.append(action)
obs, r, done, _ = env.step(action)
totalr += r
steps += 1
if render:
env.render()
if steps % 100 == 0: print("%i/%i" % (steps, max_steps))
if steps >= max_steps:
break
returns.append(totalr)
print('returns', returns)
print('mean return', np.mean(returns))
print('std of return', np.std(returns))
# pass observations, actions to imitation learning
obs_data = np.squeeze(np.array(observations))
act_data = np.squeeze(np.array(actions))
save_expert_mean = np.mean(returns)
save_expert_std = np.std(returns)
#===========================================================================
# set up the network structure for the imitation learning policy function
#===========================================================================
# dim for input/output
obs_dim = obs_data.shape[1]
act_dim = act_data.shape[1]
# architecture of the MLP policy function
x = tf.placeholder(tf.float32, shape=[None, obs_dim])
yhot = tf.placeholder(tf.float32, shape=[None, act_dim])
h1 = tf.layers.dense(inputs=x, units=128, activation=tf.nn.relu)
h2 = tf.layers.dense(inputs=h1, units=64, activation=tf.nn.relu)
h3 = tf.layers.dense(inputs=h2, units=32, activation=tf.nn.relu)
yhat = tf.layers.dense(inputs=h3, units=act_dim, activation=None)
loss_l2 = tf.reduce_mean(tf.square(yhot - yhat))
train_step = tf.train.AdamOptimizer().minimize(loss_l2)
#===========================================================================
# run DAgger alg
#===========================================================================
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# record return and std for plotting
save_mean = []
save_std = []
save_train_size = []
# loop for dagger alg
for i_dagger in xrange(50):
print 'DAgger iteration ', i_dagger
# train a policy by fitting the MLP
batch_size = 25
for step in range(10000):
batch_i = np.random.randint(0, obs_data.shape[0], size=batch_size)
train_step.run(feed_dict={x: obs_data[batch_i, ], yhot: act_data[batch_i, ]})
if (step % 1000 == 0):
print 'opmization step ', step
print 'obj value is ', loss_l2.eval(feed_dict={x:obs_data, yhot:act_data})
print 'Optimization Finished!'
# use trained MLP to perform
max_steps = env.spec.timestep_limit
returns = []
observations = []
actions = []
for i in range(num_rollouts):
print('iter', i)
obs = env.reset()
done = False
totalr = 0.
steps = 0
while not done:
action = yhat.eval(feed_dict={x:obs[None, :]})
observations.append(obs)
actions.append(action)
obs, r, done, _ = env.step(action)
totalr += r
steps += 1
if render:
env.render()
if steps % 100 == 0: print("%i/%i" % (steps, max_steps))
if steps >= max_steps:
break
returns.append(totalr)
print('mean return', np.mean(returns))
print('std of return', np.std(returns))
# expert labeling
act_new = []
for i_label in xrange(len(observations)):
act_new.append(policy_fn(observations[i_label][None, :]))
# record training size
train_size = obs_data.shape[0]
# data aggregation
obs_data = np.concatenate((obs_data, np.array(observations)), axis=0)
act_data = np.concatenate((act_data, np.squeeze(np.array(act_new))), axis=0)
# record mean return & std
save_mean = np.append(save_mean, np.mean(returns))
save_std = np.append(save_std, np.std(returns))
save_train_size = np.append(save_train_size, train_size)
dagger_results = {'means': save_mean, 'stds': save_std, 'train_size': save_train_size,
'expert_mean':save_expert_mean, 'expert_std':save_expert_std}
print 'DAgger iterations finished!'
if __name__ == '__main__':
main()
``` |
{
"source": "jjzhunet9/mmdgm",
"score": 2
} |
#### File: mmdgm/conv-mmdgm/c_6layer_svhn_imputation_1000_for_test.py
```python
import os
import sys
import time
import math
import numpy as np
import theano
import theano.tensor as T
import theano.tensor.shared_randomstreams
from util import datapy, color, paramgraphics
#from optimization import optimizer
from optimization import optimizer_separated
from layer import FullyConnected, nonlinearity
from layer import GaussianHidden, NoParamsGaussianVisiable,Pegasos
#from layer import ConvMaxPool_GauInit_DNN, UnpoolConvNon_GauInit_DNN
from layer import ConvMaxPool_GauInit_DNN, UnpoolConvNon_GauInit_DNN
def c_6layer_svhn_imputation(seed=0, ctype='cva',
pertub_type=5, pertub_prob=0, pertub_prob1=16, visualization_times=20,
denoise_times=200, predir=None, n_batch=900, batch_size=500):
"""
Missing data imputation
"""
'''
svhn
'''
n_channels = 3
colorImg = True
dim_w = 32
dim_h = 32
dim_input=(dim_h, dim_w)
n_classes = 10
first_drop=0.6
if os.environ.has_key('first_drop'):
first_drop = float(os.environ['first_drop'])
last_drop=1
if os.environ.has_key('last_drop'):
last_drop = float(os.environ['last_drop'])
nkerns_1=96
if os.environ.has_key('nkerns_1'):
nkerns_1 = int(os.environ['nkerns_1'])
nkerns_2=96
if os.environ.has_key('nkerns_2'):
nkerns_2 = int(os.environ['nkerns_2'])
opt_med='mom'
if os.environ.has_key('opt_med'):
opt_med = os.environ['opt_med']
train_logvar=True
if os.environ.has_key('train_logvar'):
train_logvar = bool(int(os.environ['train_logvar']))
dataset='svhnlcn'
if os.environ.has_key('dataset'):
dataset = os.environ['dataset']
n_z=256
if os.environ.has_key('n_z'):
n_z = int(os.environ['n_z'])
#cp->cd->cpd->cd->c
nkerns=[nkerns_1, nkerns_1, nkerns_1, nkerns_2, nkerns_2]
drops=[0, 1, 1, 1, 0, 1]
drop_p=[1, first_drop, first_drop, first_drop, 1, last_drop]
n_hidden=[n_z]
logdir = 'results/imputation/'+ctype+'/svhn/'+ctype+'_6layer_'+dataset+'_'
logdir += str(int(time.time()))+'/'
if not os.path.exists(logdir): os.makedirs(logdir)
print predir
with open(logdir+'hook.txt', 'a') as f:
print >>f, predir
color.printRed('dataset '+dataset)
test_set_x, test_set_x_pertub, pertub_label, pertub_number = datapy.load_pertub_data_svhn(dirs='data_imputation/', dataset=dataset, pertub_type=pertub_type, pertub_prob=pertub_prob, pertub_prob1=pertub_prob1)
pixel_max, pixel_min = datapy.load_max_min(dirs='data_imputation/', dataset=dataset, pertub_prob=pertub_prob)
# compute number of minibatches for training, validation and testing
#n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
#n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
random_z = T.matrix('random_z')
x_pertub = T.matrix('x_pertub') # the data is presented as rasterized images
p_label = T.matrix('p_label')
drop = T.iscalar('drop')
activation = nonlinearity.relu
rng = np.random.RandomState(seed)
rng_share = theano.tensor.shared_randomstreams.RandomStreams(0)
input_x = x_pertub.reshape((batch_size, n_channels, dim_h, dim_w))
recg_layer = []
cnn_output = []
l = []
d = []
#1
recg_layer.append(ConvMaxPool_GauInit_DNN.ConvMaxPool_GauInit_DNN(
rng,
image_shape=(batch_size, n_channels, dim_h, dim_w),
filter_shape=(nkerns[0], n_channels, 5, 5),
poolsize=(2, 2),
border_mode='same',
activation=activation
))
if drops[0]==1:
cnn_output.append(recg_layer[-1].drop_output(input=input_x, drop=drop, rng=rng_share, p=drop_p[0]))
else:
cnn_output.append(recg_layer[-1].output(input=input_x))
l+=[1, 2]
d+=[1, 1]
#2
recg_layer.append(ConvMaxPool_GauInit_DNN.ConvMaxPool_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[0], 16, 16),
filter_shape=(nkerns[1], nkerns[0], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation
))
if drops[1]==1:
cnn_output.append(recg_layer[-1].drop_output(cnn_output[-1], drop=drop, rng=rng_share, p=drop_p[1]))
else:
cnn_output.append(recg_layer[-1].output(cnn_output[-1]))
l+=[1, 2]
d+=[1, 1]
#3
recg_layer.append(ConvMaxPool_GauInit_DNN.ConvMaxPool_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[1], 16, 16),
filter_shape=(nkerns[2], nkerns[1], 3, 3),
poolsize=(2, 2),
border_mode='same',
activation=activation
))
if drops[2]==1:
cnn_output.append(recg_layer[-1].drop_output(cnn_output[-1], drop=drop, rng=rng_share, p=drop_p[2]))
else:
cnn_output.append(recg_layer[-1].output(cnn_output[-1]))
l+=[1, 2]
d+=[1, 1]
#4
recg_layer.append(ConvMaxPool_GauInit_DNN.ConvMaxPool_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[2], 8, 8),
filter_shape=(nkerns[3], nkerns[2], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation
))
if drops[3]==1:
cnn_output.append(recg_layer[-1].drop_output(cnn_output[-1], drop=drop, rng=rng_share, p=drop_p[3]))
else:
cnn_output.append(recg_layer[-1].output(cnn_output[-1]))
l+=[1, 2]
d+=[1, 1]
#5
'''
--------------------- (2,2) or (4,4)
'''
recg_layer.append(ConvMaxPool_GauInit_DNN.ConvMaxPool_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[3], 8, 8),
filter_shape=(nkerns[4], nkerns[3], 3, 3),
poolsize=(2, 2),
border_mode='same',
activation=activation
))
if drops[4]==1:
cnn_output.append(recg_layer[-1].drop_output(cnn_output[-1], drop=drop, rng=rng_share, p=drop_p[4]))
else:
cnn_output.append(recg_layer[-1].output(cnn_output[-1]))
l+=[1, 2]
d+=[1, 1]
mlp_input_x = cnn_output[-1].flatten(2)
activations = []
activations.append(mlp_input_x)
#1
'''
---------------------No MLP
'''
'''
recg_layer.append(FullyConnected.FullyConnected(
rng=rng,
n_in= 4 * 4 * nkerns[-1],
n_out=n_hidden[0],
activation=activation
))
if drops[-1]==1:
activations.append(recg_layer[-1].drop_output(input=mlp_input_x, drop=drop, rng=rng_share, p=drop_p[-1]))
else:
activations.append(recg_layer[-1].output(input=mlp_input_x))
'''
#stochastic layer
recg_layer.append(GaussianHidden.GaussianHidden(
rng=rng,
input=activations[-1],
n_in=4 * 4 * nkerns[-1],
n_out=n_hidden[0],
activation=None
))
l+=[1, 2]
d+=[1, 1]
l+=[1, 2]
d+=[1, 1]
z = recg_layer[-1].sample_z(rng_share)
gene_layer = []
z_output = []
random_z_output = []
#1
gene_layer.append(FullyConnected.FullyConnected(
rng=rng,
n_in=n_hidden[0],
n_out = 4*4*nkerns[-1],
activation=activation
))
z_output.append(gene_layer[-1].output(input=z))
random_z_output.append(gene_layer[-1].output(input=random_z))
l+=[1, 2]
d+=[1, 1]
#2
'''
gene_layer.append(FullyConnected.FullyConnected(
rng=rng,
n_in=n_hidden[0],
n_out = 4*4*nkerns[-1],
activation=activation
))
if drop_inverses[0]==1:
z_output.append(gene_layer[-1].drop_output(input=z_output[-1], drop=drop_inverse, rng=rng_share))
random_z_output.append(gene_layer[-1].drop_output(input=random_z_output[-1], drop=drop_inverse, rng=rng_share))
else:
z_output.append(gene_layer[-1].output(input=z_output[-1]))
random_z_output.append(gene_layer[-1].output(input=random_z_output[-1]))
'''
input_z = z_output[-1].reshape((batch_size, nkerns[-1], 4, 4))
input_random_z = random_z_output[-1].reshape((n_batch, nkerns[-1], 4, 4))
#1
gene_layer.append(UnpoolConvNon_GauInit_DNN.UnpoolConvNon_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[-1], 4, 4),
filter_shape=(nkerns[-2], nkerns[-1], 3, 3),
poolsize=(2, 2),
border_mode='same',
activation=activation
))
l+=[1, 2]
d+=[1, 1]
z_output.append(gene_layer[-1].output(input=input_z))
random_z_output.append(gene_layer[-1].output_random_generation(input=input_random_z, n_batch=n_batch))
#2
gene_layer.append(UnpoolConvNon_GauInit_DNN.UnpoolConvNon_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[-2], 8, 8),
filter_shape=(nkerns[-3], nkerns[-2], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation
))
l+=[1, 2]
d+=[1, 1]
z_output.append(gene_layer[-1].output(input=z_output[-1]))
random_z_output.append(gene_layer[-1].output_random_generation(input=random_z_output[-1], n_batch=n_batch))
#3
gene_layer.append(UnpoolConvNon_GauInit_DNN.UnpoolConvNon_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[-3], 8, 8),
filter_shape=(nkerns[-4], nkerns[-3], 3, 3),
poolsize=(2, 2),
border_mode='same',
activation=activation
))
l+=[1, 2]
d+=[1, 1]
z_output.append(gene_layer[-1].output(input=z_output[-1]))
random_z_output.append(gene_layer[-1].output_random_generation(input=random_z_output[-1], n_batch=n_batch))
#4
gene_layer.append(UnpoolConvNon_GauInit_DNN.UnpoolConvNon_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[-4], 16, 16),
filter_shape=(nkerns[-5], nkerns[-4], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation
))
l+=[1, 2]
d+=[1, 1]
z_output.append(gene_layer[-1].output(input=z_output[-1]))
random_z_output.append(gene_layer[-1].output_random_generation(input=random_z_output[-1], n_batch=n_batch))
#5-1 stochastic layer
# for this layer, the activation is None to get a Guassian mean
gene_layer.append(UnpoolConvNon_GauInit_DNN.UnpoolConvNon_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[-5], 16, 16),
filter_shape=(n_channels, nkerns[-5], 5, 5),
poolsize=(2, 2),
border_mode='same',
activation=None
))
l+=[1, 2]
d+=[1, 1]
x_mean=gene_layer[-1].output(input=z_output[-1])
random_x_mean=gene_layer[-1].output_random_generation(input=random_z_output[-1], n_batch=n_batch)
#5-2 stochastic layer
# for this layer, the activation is None to get logvar
if train_logvar:
gene_layer.append(UnpoolConvNon_GauInit_DNN.UnpoolConvNon_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[-5], 16, 16),
filter_shape=(n_channels, nkerns[-5], 5, 5),
poolsize=(2, 2),
border_mode='same',
activation=None
))
l+=[1, 2]
d+=[1, 1]
x_logvar=gene_layer[-1].output(input=z_output[-1])
random_x_logvar=gene_layer[-1].output_random_generation(input=random_z_output[-1], n_batch=n_batch)
else:
x_logvar = theano.shared(np.ones((batch_size, n_channels, dim_h, dim_w), dtype='float32'))
random_x_logvar = theano.shared(np.ones((n_batch, n_channels, dim_h, dim_w), dtype='float32'))
gene_layer.append(NoParamsGaussianVisiable.NoParamsGaussianVisiable(
#rng=rng,
#mean=z_output[-1],
#data=input_x,
))
logpx = gene_layer[-1].logpx(mean=x_mean, logvar=x_logvar, data=input_x)
random_x = gene_layer[-1].sample_x(rng_share=rng_share, mean=random_x_mean, logvar=random_x_logvar)
x_denoised = p_label*x+(1-p_label)*x_mean.flatten(2)
mse = ((x - x_denoised)**2).sum() / pertub_number
params=[]
for g in gene_layer:
params+=g.params
for r in recg_layer:
params+=r.params
'''
train_activations = theano.function(
inputs=[index],
outputs=T.concatenate(activations, axis=1),
givens={
x_pertub: train_set_x[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0)
}
)
'''
'''
valid_activations = theano.function(
inputs=[index],
outputs=T.concatenate(activations, axis=1),
givens={
x_pertub: valid_set_x[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0)
}
)
'''
test_activations = theano.function(
inputs=[x_pertub],
outputs=T.concatenate(activations, axis=1),
givens={
drop: np.cast['int32'](0)
}
)
imputation_model = theano.function(
inputs=[index, x_pertub],
outputs=[x_denoised, mse],
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
p_label:pertub_label[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0),
#drop_inverse: np.cast['int32'](0)
}
)
##################
# Pretrain MODEL #
##################
model_epoch = 100
if os.environ.has_key('model_epoch'):
model_epoch = int(os.environ['model_epoch'])
if predir is not None:
color.printBlue('... setting parameters')
color.printBlue(predir)
if model_epoch == -1:
pre_train = np.load(predir+'best-model.npz')
else:
pre_train = np.load(predir+'model-'+str(model_epoch)+'.npz')
pre_train = pre_train['model']
if ctype == 'cva':
for (para, pre) in zip(params, pre_train):
para.set_value(pre)
elif ctype == 'cmmva':
for (para, pre) in zip(params, pre_train[:-2]):
para.set_value(pre)
else:
exit()
else:
exit()
###############
# TRAIN MODEL #
###############
print '... training'
scale = False
epoch = 0
n_visualization = 900
pixel_max = pixel_max[:n_visualization]
pixel_min = pixel_min[:n_visualization]
output = np.ones((n_visualization, visualization_times+2, n_channels*dim_input[0]*dim_input[1]))
output[:,0,:] = test_set_x.get_value()[:n_visualization,:]
output[:,1,:] = test_set_x_pertub.get_value()[:n_visualization,:]
image = paramgraphics.mat_to_img(paramgraphics.scale_max_min(output[:,0,:].T,pixel_max,pixel_min), dim_input, colorImg=colorImg, scale=scale)
image.save(logdir+'data.png', 'PNG')
image = paramgraphics.mat_to_img(paramgraphics.scale_max_min(output[:,1,:].T,pixel_max,pixel_min), dim_input, colorImg=colorImg, scale=scale)
image.save(logdir+'data_pertub.png', 'PNG')
tmp = test_set_x_pertub.get_value()
while epoch < denoise_times:
epoch = epoch + 1
for i in xrange(n_test_batches):
d, m = imputation_model(i, tmp[i * batch_size: (i + 1) * batch_size])
tmp[i * batch_size: (i + 1) * batch_size] = np.asarray(d)
if epoch<=visualization_times:
output[:,epoch+1,:] = tmp[:n_visualization,:]
image = paramgraphics.mat_to_img(paramgraphics.scale_max_min(tmp[:n_visualization,:].T,pixel_max,pixel_min), dim_input, colorImg=colorImg, scale=scale)
image.save(logdir+'procedure-'+str(epoch)+'.png', 'PNG')
np.savez(logdir+'procedure-'+str(epoch), tmp=tmp)
'''
image = paramgraphics.mat_to_img((output.reshape(-1,32*32*3)).T, dim_input, colorImg=colorImg, tile_shape=(n_visualization,22), scale=scale)
image.save(logdir+'output.png', 'PNG')
np.savez(logdir+'output', output=output)
'''
'''
# save original train features and denoise test features
for i in xrange(n_train_batches):
if i == 0:
train_features = np.asarray(train_activations(i))
else:
train_features = np.vstack((train_features, np.asarray(train_activations(i))))
for i in xrange(n_valid_batches):
if i == 0:
valid_features = np.asarray(valid_activations(i))
else:
valid_features = np.vstack((valid_features, np.asarray(valid_activations(i))))
for i in xrange(n_test_batches):
if i == 0:
test_features = np.asarray(test_activations(tmp[i * batch_size: (i + 1) * batch_size]))
else:
test_features = np.vstack((test_features, np.asarray(test_activations(tmp[i * batch_size: (i + 1) * batch_size]))))
np.save(logdir+'train_features', train_features)
#np.save(logdir+'valid_features', valid_features)
np.save(logdir+'test_features', test_features)
'''
if __name__ == '__main__':
ctype = sys.argv[1]
pertub_type = int(sys.argv[2])
pertub_prob = float(sys.argv[3])
pertub_prob1 = float(sys.argv[4])
denoise_times = int(sys.argv[5])
predir = sys.argv[6]
c_6layer_svhn_imputation(ctype=ctype, denoise_times=denoise_times,
pertub_type=pertub_type, pertub_prob=pertub_prob,
pertub_prob1=pertub_prob1, predir=predir)
```
#### File: mmdgm/conv-mmdgm/cnn_6layer_svhn.py
```python
import os
import sys
import time
import numpy as np
import theano
import theano.tensor as T
from layer import ConvMaxPool_GauInit_DNN
from layer import FullyConnected
from layer import LogisticRegression, Pegasos
from util import datapy, color
from layer import nonlinearity
from optimization import optimizer_separated
def deep_cnn_6layer_svhn_final_svm(learning_rate=0.01,
n_epochs=500,
dataset='svhngcn_var',
batch_size=500,
dropout_flag=1,
seed=0,
predir=None,
preepoch=10,
activation=None,
weight_decay=1e-4):
'''
svhn
'''
n_channels = 3
dim_w = 32
dim_h = 32
n_classes = 10
epoch_threshold = 200
if os.environ.has_key('epoch_threshold'):
epoch_threshold = int(os.environ['epoch_threshold'])
first_drop=0.6
if os.environ.has_key('first_drop'):
first_drop = float(os.environ['first_drop'])
last_drop=1
if os.environ.has_key('last_drop'):
last_drop = float(os.environ['last_drop'])
nkerns_1=96
if os.environ.has_key('nkerns_1'):
nkerns_1 = int(os.environ['nkerns_1'])
nkerns_2=96
if os.environ.has_key('nkerns_2'):
nkerns_2 = int(os.environ['nkerns_2'])
opt_med='adam'
if os.environ.has_key('opt_med'):
opt_med = os.environ['opt_med']
std = 2e-2
if os.environ.has_key('std'):
std = os.environ['std']
pattern = 'hinge'
if os.environ.has_key('pattern'):
pattern = os.environ['pattern']
Loss_L = 1
if os.environ.has_key('Loss_L'):
Loss_L = float(os.environ['Loss_L'])
#cp->cd->cpd->cd->c
nkerns=[nkerns_1, nkerns_1, nkerns_1, nkerns_2, nkerns_2]
drops=[0, 1, 1, 1, 0, 1]
drop_p=[1, first_drop, first_drop, first_drop, 1, last_drop]
#skerns=[5, 3, 3, 3, 3]
#pools=[2, 1, 2, 1, 1]
#modes=['same']*5
logdir = 'results/supervised/cnn/svhn/deep_cnn_6layer_'+pattern+'_'+dataset+str(nkerns)+str(drops)+'_'+str(weight_decay)+'_'+str(learning_rate)+'_'+str(std)+'_'+str(Loss_L)+'_'+str(int(time.time()))+'/'
if dropout_flag==1:
logdir = 'results/supervised/cnn/svhn/deep_cnn_6layer_'+pattern+'_'+dataset+str(drop_p)+str(nkerns)+str(drops)+'_'+str(weight_decay)+'_'+str(learning_rate)+'_'+str(std)+'_'+str(Loss_L)+'_dropout_'+str(int(time.time()))+'/'
if not os.path.exists(logdir): os.makedirs(logdir)
print 'logdir:', logdir
print 'deep_cnn_6layer_svm', nkerns, drops, drop_p, seed, dropout_flag
print 'epoch_threshold', epoch_threshold, 'opt_med', opt_med
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'logdir:', logdir
print >>f, 'epoch_threshold', epoch_threshold, 'opt_med', opt_med
print >>f, 'deep_cnn_6layer_svm', nkerns, drops, drop_p, seed, dropout_flag
rng = np.random.RandomState(0)
rng_share = theano.tensor.shared_randomstreams.RandomStreams(0)
color.printRed('dataset '+dataset)
datasets = datapy.load_data_svhn(dataset, have_matrix=True)
train_set_x, train_set_y, train_y_matrix = datasets[0]
test_set_x, test_set_y, test_y_matrix = datasets[1]
valid_set_x, valid_set_y, valid_y_matrix = datasets[2]
#datasets = datapy.load_data_svhn(dataset, have_matrix=False)
#train_set_x, train_set_y = datasets[0]
#test_set_x, test_set_y = datasets[1]
#valid_set_x, valid_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
n_valid_batches /= batch_size
n_test_batches /= batch_size
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# start-snippet-1
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
'''
dropout
'''
drop = T.iscalar('drop')
y_matrix = T.imatrix('y_matrix') # labels, presented as 2D matrix of int labels
print '... building the model'
layer0_input = x.reshape((batch_size, n_channels, dim_h, dim_w))
if activation =='nonlinearity.relu':
activation = nonlinearity.relu
elif activation =='nonlinearity.tanh':
activation = nonlinearity.tanh
elif activation =='nonlinearity.softplus':
activation = nonlinearity.softplus
recg_layer = []
cnn_output = []
l = []
d = []
#1
recg_layer.append(ConvMaxPool_GauInit_DNN.ConvMaxPool_GauInit_DNN(
rng,
image_shape=(batch_size, n_channels, dim_h, dim_w),
filter_shape=(nkerns[0], n_channels, 5, 5),
poolsize=(2, 2),
border_mode='same',
activation=activation,
std=std
))
if drops[0]==1:
cnn_output.append(recg_layer[-1].drop_output(layer0_input, drop=drop, rng=rng_share, p=drop_p[0]))
else:
cnn_output.append(recg_layer[-1].output(layer0_input))
l+=[1, 2]
d+=[1, 0]
#2
recg_layer.append(ConvMaxPool_GauInit_DNN.ConvMaxPool_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[0], 16, 16),
filter_shape=(nkerns[1], nkerns[0], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation,
std=std
))
if drops[1]==1:
cnn_output.append(recg_layer[-1].drop_output(cnn_output[-1], drop=drop, rng=rng_share, p=drop_p[1]))
else:
cnn_output.append(recg_layer[-1].output(cnn_output[-1]))
l+=[1, 2]
d+=[1, 0]
#3
recg_layer.append(ConvMaxPool_GauInit_DNN.ConvMaxPool_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[1], 16, 16),
filter_shape=(nkerns[2], nkerns[1], 3, 3),
poolsize=(2, 2),
border_mode='same',
activation=activation,
std=std
))
if drops[2]==1:
cnn_output.append(recg_layer[-1].drop_output(cnn_output[-1], drop=drop, rng=rng_share, p=drop_p[2]))
else:
cnn_output.append(recg_layer[-1].output(cnn_output[-1]))
l+=[1, 2]
d+=[1, 0]
#4
recg_layer.append(ConvMaxPool_GauInit_DNN.ConvMaxPool_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[2], 8, 8),
filter_shape=(nkerns[3], nkerns[2], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation,
std=std
))
if drops[3]==1:
cnn_output.append(recg_layer[-1].drop_output(cnn_output[-1], drop=drop, rng=rng_share, p=drop_p[3]))
else:
cnn_output.append(recg_layer[-1].output(cnn_output[-1]))
l+=[1, 2]
d+=[1, 0]
#5
recg_layer.append(ConvMaxPool_GauInit_DNN.ConvMaxPool_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[3], 8, 8),
filter_shape=(nkerns[4], nkerns[3], 3, 3),
poolsize=(2, 2),
border_mode='same',
activation=activation,
std=std
))
if drops[4]==1:
cnn_output.append(recg_layer[-1].drop_output(cnn_output[-1], drop=drop, rng=rng_share, p=drop_p[4]))
else:
cnn_output.append(recg_layer[-1].output(cnn_output[-1]))
l+=[1, 2]
d+=[1, 0]
feature = cnn_output[-1].flatten(2)
# classify the values of the fully-connected sigmoidal layer
'''
large weight of pegasos to avoid gradient disappeared
'''
std_pegasos=std
weight_decay_pegasos=weight_decay
classifier = Pegasos.Pegasos(input=feature, rng=rng, n_in=nkerns[-1]*4*4, n_out=n_classes, weight_decay=0, loss=Loss_L, std=std_pegasos, pattern=pattern)
#classifier = LogisticRegression.LogisticRegression(
# input=feature,
# n_in=nkerns[-1],
# n_out=n_classes
# )
l+=[1, 2]
d+=[weight_decay_pegasos / weight_decay, 0]
# the cost we minimize during training is the NLL of the model
cost = classifier.hinge_loss(n_classes, y, y_matrix)
#cost = classifier.negative_log_likelihood(y)
params=[]
for r in recg_layer:
params+=r.params
params += classifier.params
# create a list of gradients for all model parameters
grads = T.grad(cost, params)
l_r = theano.shared(np.asarray(learning_rate, dtype=np.float32))
if opt_med=='adam':
get_optimizer = optimizer_separated.get_adam_optimizer_min(learning_rate=l_r, decay1 = 0.1, decay2 = 0.001, weight_decay=weight_decay)
elif opt_med=='mom':
get_optimizer = optimizer_separated.get_momentum_optimizer_min(learning_rate=l_r, weight_decay=weight_decay)
updates = get_optimizer(w=params,g=grads, l=l, d=d)
pog = []
for (p,g) in zip(params, grads):
pog.append(p.max())
pog.append((p**2).mean())
pog.append((g**2).mean())
pog.append((T.sqrt(pog[-2] / pog[-1]))/ 1e3)
paramovergrad = theano.function(
inputs=[index],
outputs=pog,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size],
y_matrix: train_y_matrix[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](dropout_flag)
}
)
parameters = theano.function(
inputs=[],
outputs=params,
)
# create a function to compute the mistakes that are made by the model
test_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0)
}
)
valid_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0)
}
)
train_activations = theano.function(
inputs=[index],
outputs=feature,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0)
}
)
test_activations = theano.function(
inputs=[index],
outputs=feature,
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0)
}
)
train_model = theano.function(
inputs=[index],
outputs=[cost, classifier.errors(y)],
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size],
y_matrix: train_y_matrix[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](dropout_flag)
}
)
if predir is not None:
color.printBlue('... setting parameters')
color.printBlue(predir)
pre_train = np.load(predir+'svhn_model-'+str(preepoch)+'.npz')
pre_train = pre_train['model']
for (para, pre) in zip(params, pre_train):
para.set_value(pre)
this_test_losses = [test_model(i) for i in xrange(n_test_batches)]
this_test_score = np.mean(this_test_losses)
#print predir
print 'preepoch', preepoch, 'prescore', this_test_score
with open(logdir+'hook.txt', 'a') as f:
print >>f, predir
print >>f, 'preepoch', preepoch, 'prescore', this_test_score
print '... training'
validation_frequency = n_train_batches/10
best_train_loss = 10000.0
best_valid_score = 10000.0
best_epoch = 0
test_score = 0
start_time = time.clock()
epoch = 0
n_epochs = 100
test_epochs = 40
record = 0
'''
pog = [paramovergrad(i) for i in xrange(n_train_batches)]
pog = np.mean(pog, axis=0)
#print 'before train ----------pog', pog
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'before train ----------pog', pog
'''
while (epoch < n_epochs):
epoch = epoch + 1
tmp1 = time.clock()
preW = None
currentW = None
minibatch_avg_cost = 0
train_error = 0
if (epoch - record) >= 7:
record = epoch
l_r.set_value(np.cast['float32'](l_r.get_value()/3.0))
print '---------', epoch, l_r.get_value()
with open(logdir+'hook.txt', 'a') as f:
print >>f,'---------', epoch, l_r.get_value()
'''
decay_epoch = epoch - test_epochs
if decay_epoch > 0 and decay_epoch % 30==0:
l_r.set_value(np.cast['float32'](l_r.get_value()/3.0))
print '---------', epoch, l_r.get_value()
with open(logdir+'hook.txt', 'a') as f:
print >>f,'---------', epoch, l_r.get_value()
'''
if epoch%5==0:
'''
for i in xrange(n_train_batches):
if i == 0:
train_features = np.asarray(train_activations(i))
else:
train_features = np.vstack((train_features, np.asarray(train_activations(i))))
for i in xrange(n_test_batches):
if i == 0:
test_features = np.asarray(test_activations(i))
else:
test_features = np.vstack((test_features, np.asarray(test_activations(i))))
np.save(logdir+'train_features-'+str(epoch), train_features)
np.save(logdir+'test_features-'+str(epoch), test_features)
'''
model = parameters()
for i in xrange(len(model)):
model[i] = np.asarray(model[i]).astype(np.float32)
np.savez(logdir+'svhn_model-'+str(epoch), model=model)
for minibatch_index in xrange(n_train_batches):
if (minibatch_index <11):
preW = currentW
currentW = parameters()
for i in xrange(len(currentW)):
currentW[i] = np.asarray(currentW[i]).astype(np.float32)
if preW is not None:
for (c,p) in zip(currentW, preW):
#print minibatch_index, (c**2).mean(), ((c-p)**2).mean(), np.sqrt((c**2).mean()/((c-p)**2).mean())
with open(logdir+'delta_w.txt', 'a') as f:
print >>f,minibatch_index, (c**2).mean(), ((c-p)**2).mean(), np.sqrt((c**2).mean()/((c-p)**2).mean())
co, te = train_model(minibatch_index)
minibatch_avg_cost+=co
train_error+=te
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
print epoch, minibatch_index
with open(logdir+'hook.txt', 'a') as f:
print >>f, epoch, minibatch_index
print 'Stochastic hinge loss and training error', minibatch_avg_cost / float(minibatch_index), train_error / float(minibatch_index)
#print 'time', time.clock() - tmp1
with open(logdir+'hook.txt', 'a') as f:
# print >>f, 'pog', pog
print >>f,'Stochastic hinge loss and training error', minibatch_avg_cost / float(minibatch_index), train_error / float(minibatch_index)
#print >>f,'time', time.clock() - tmp1
this_valid_losses = [valid_model(i) for i in xrange(n_valid_batches)]
this_valid_score = np.mean(this_valid_losses)
print(
'epoch %i, minibatch %i/%i, valid error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
#this_validation_loss * 100,
this_valid_score *100.
)
)
with open(logdir+'hook.txt', 'a') as f:
print >>f, (
'epoch %i, minibatch %i/%i, valid error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
#this_validation_loss * 100,
this_valid_score *100.
)
)
if this_valid_score < best_valid_score:
this_test_losses = [test_model(i) for i in xrange(n_test_batches)]
this_test_score = np.mean(this_test_losses)
best_valid_score = this_valid_score
test_score = this_test_score
best_epoch = epoch
record = epoch
print 'Update best model', this_test_score
with open(logdir+'hook.txt', 'a') as f:
print >>f,'Update best model', this_test_score
print 'So far best model', best_epoch, test_score
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'So far best model', best_epoch, test_score
pogzero = np.asarray(paramovergrad(0))
#print 'pogzero', pogzero
with open(logdir+'pog.txt', 'a') as f:
print >>f, 'pogzero', pogzero
#pog = [paramovergrad(i) for i in xrange(n_train_batches)]
#pog = np.mean(pog, axis=0)
#print 'pog', pog
print 'So far best model', test_score
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'So far best model', test_score
end_time = time.clock()
print 'The code run for %d epochs, with %f epochs/sec' % (
epoch, 1. * epoch / (end_time - start_time))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.1fs' % ((end_time - start_time)))
if __name__ == '__main__':
activation = 'nonlinearity.'+sys.argv[1]
dropout_flag = int(sys.argv[2])
learning_rate = float(sys.argv[3])
weight_decay = float(sys.argv[4])
dataset = 'svhn'+sys.argv[5]
if len(sys.argv) > 6:
predir = sys.argv[6]
preepoch = int(sys.argv[7])
deep_cnn_6layer_svhn_final_svm(dataset=dataset,learning_rate=learning_rate,
activation=activation, dropout_flag=dropout_flag, predir=predir, preepoch=preepoch, weight_decay=weight_decay)
else:
deep_cnn_6layer_svhn_final_svm(dataset=dataset,learning_rate=learning_rate,
activation=activation, dropout_flag=dropout_flag, weight_decay=weight_decay)
```
#### File: conv-mmdgm/layer/nonlinearity.py
```python
import theano.tensor as T
import numpy as np
import theano
def dropout(rng, values, p):
mask = rng.binomial(n=1, p=p, size=values.shape, dtype=theano.config.floatX)
output = values * mask
return np.cast[theano.config.floatX](1.0/p) * output
def sigmoid(x):
return T.nnet.sigmoid(x)
def tanh(x):
return T.tanh(x)
def softplus(x):
return T.log(T.exp(x) + 1)
def relu(x):
return x*(x>0)
def relu2(x):
return x*(x>0) + 0.01 * x
def initialize_vector(rng, n_out, std=1e-2):
z = rng.normal(0, std, size=(n_out,))
return np.asarray(z, dtype=theano.config.floatX)
def initialize_matrix(rng, n_in, n_out):
z = rng.normal(0, 1, size=(n_in, n_out)) / np.sqrt(n_in)
return np.asarray(z, dtype=theano.config.floatX)
```
#### File: mmdgm/conv-mmdgm/lcn_svhn.py
```python
import scipy.io as io
import numpy as np
import cPickle
import os, sys
import scipy.io as sio
import theano
import theano.tensor as T
import pylearn2.expr.preprocessing as pypp
import pylearn2.datasets.norb as norb
import PIL.Image
import pylab
from util import datapy, color, paramgraphics
from random import shuffle
from util import lcn
def perform_lcn(saveDir,strl,x, y):
n_channels=(0,1,2)
dim_input = (32, 32)
colorImg = True
x = x.astype(np.float32)
print x.shape
print x.max()
print x.min()
print np.max(np.mean(x, axis=1))
print np.min(np.mean(x, axis=1))
print strl
print y[:10]
print y[40:50]
image = paramgraphics.mat_to_img(x[:100,:].T, dim_input, colorImg=colorImg, scale=True)
image.save(saveDir+'svhn_before_lcn_gcn_norm_'+strl+'.png', 'PNG')
#flatten->'b,c,0,1'->'b,0,1,c'
x = x.reshape(-1,3,32,32)
x = np.swapaxes(x, 1, 2)
x = np.swapaxes(x, 2, 3)
lcn.transform(x=x,channels=n_channels,img_shape=dim_input)
#'b,0,1,c'->'b,c,0,1'->flatten
print x.shape
x = np.swapaxes(x, 2, 3)
x = np.swapaxes(x, 1, 2)
x = x.reshape((-1,32*32*3))
print x.max()
print x.min()
print np.max(np.mean(x, axis=1))
print np.min(np.mean(x, axis=1))
image = paramgraphics.mat_to_img(x[:100,:].T, dim_input, colorImg=colorImg, scale=True)
image.save(saveDir+'svhn_after_lcn_gcn_norm_'+strl+'.png', 'PNG')
return x
saveDir = 'data/SVHN/MYDATA/'
f = file("data/SVHN/MYDATA/svhngcn_norm.bin","rb")
train_x = np.load(f)
train_y = np.load(f)
valid_x = np.load(f)
valid_y = np.load(f)
test_x = np.load(f)
test_y = np.load(f)
f.close()
valid_x = perform_lcn(saveDir,'valid', valid_x, valid_y)
test_x = perform_lcn(saveDir,'test', test_x, test_y)
train_x = perform_lcn(saveDir,'train', train_x, train_y)
f = file(saveDir+"svhnlcn.bin","wb")
np.save(f,train_x)
np.save(f,train_y)
np.save(f,valid_x)
np.save(f,valid_y)
np.save(f,test_x)
np.save(f,test_y)
f.close()
f = file(saveDir+"svhnlcn_only_test_for_imputation.bin","wb")
np.save(f,test_x)
np.save(f,test_y)
f.close()
``` |
{
"source": "jjzhuwu/Warfarin-dosing",
"score": 3
} |
#### File: jjzhuwu/Warfarin-dosing/baseline.py
```python
import numpy as np
class Baseline:
def __init__(self):
pass
def fit(self, X, y):
pass
class Fixed_Dose(Baseline):
def predict(self, X):
return np.array([35 for i in range(X.shape[0])])
class Clinical_Dosing_Alg(Baseline):
coef = np.array([[4.0376, -0.2546, 0.0118, 0.0134, -0.6752, 0.4060, 0.0443,\
0, 1.2799, -0.5695]])
def predict(self, X):
"""
Requires the columns to be same as load_data.Load_Data.choosen_columns
"""
ones_X = np.concatenate((np.ones((X.shape[0], 1)), X.iloc[:, 0:9]), axis=1)
return np.square([email protected])
class Pharma_Dosing_Alg(Baseline):
def predict(self, X):
sqaured_dosage = 5.6044-0.2614*X['Age_in_decade']\
+0.0087*X['Height (cm)'] +0.0128*X['Weight (kg)'] \
-0.8677*X['VKORC1_A/G'] -1.6974*X['VKORC1_A/A'] \
-0.4854*X['VKORC1_Unknown']-0.5211*X['CYP2C9_*1/*2'] \
-0.9357*X['CYP2C9_*1/*3']-1.0616*X['CYP2C9_*2/*2'] \
-1.9206*X['CYP2C9_*2/*3']-2.3312*X['CYP2C9_*3/*3'] \
-0.2188*X['CYP2C9_Unknown']-0.1092*X['Race_Asian'] \
-0.2760*X['Race_Black or African American'] \
-0.1032*X['Race_Unknown']+1.1816*X['enzyme_inducer']\
-0.5503*X['Amiodarone (Cordarone)']
return np.square(sqaured_dosage)
```
#### File: jjzhuwu/Warfarin-dosing/linrel.py
```python
import numpy as np
class LinRel:
def __init__(self, K, reward, delta=0.1):
"""
Parameters:
K: number of actions
reward: matrix of size K*K, reward[i][j] gives the deterministic
reward when the true label is i and the prediction is j
"""
self.K = K
self.A_s = np.array(range(K))
self.orig_reward = reward
self.r = self.normalize_reward(reward)
self.delta = delta
self.model_name = "SupLinRel"
def data_load(self, X, y, shuffle=True):
self.X = X.values
self.y = y
X_max = np.max(X, axis=0)
self.Z_max = np.append(X_max, np.ones(self.K-1))
self.T = X.shape[0]
self.d = X.shape[1]
self.Z = np.zeros((self.d+self.K-1, 0))
self.past_reward = np.zeros(0)
if shuffle:
indexes = np.random.permutation(self.T)
self.X = self.X[indexes, :]
self.y = self.y[indexes]
def normalize_reward(self, reward):
"""
Normalize reward such that rewards are in [0, 1]
"""
reward_max = np.max(reward)
reward_min = np.min(reward)
normed_reward = (reward-reward_min)/(reward_max-reward_min)
return normed_reward
def create_zi(self, x_j, i):
"""
Create and normalize z_i so that normed_z_i has L2(Euclidean) norm at most 1.
Assuming z_i = [x_j, 1_{i=1}, ..., 1_{i=K-1}]
"""
z_i = np.zeros(self.d+self.K-1)
z_i[0:x_j.shape[0]] = x_j
if i > 0:
z_i[x_j.shape[0]+i-1]=1
normed_z_i = z_i/self.Z_max/np.sqrt(self.d+self.K-1)
return normed_z_i
def compute_ucb(self, j):
"""
A_s is a subset of {0, 1, ..., K-1}
"""
x_j = self.X[j, :].T
if self.Z.shape[1] < self.d:
return np.array([]), np.array([])
else:
D, U = np.linalg.eig(self.Z @ self.Z.T)
U = U.T
mask_geq_1 = D >= 1
D[D < 0.5] = 0.5
D_inv_masked = np.diag(1/D * mask_geq_1)
"""
W contains the vectors z_i
U_tilde contains the vectors u_i tilde
V_tilde contains the vectorss v_i tilde
"""
W = np.zeros((self.d+self.K-1, self.A_s.shape[0]))
for i in range(self.A_s.shape[0]):
W[:, i] = self.create_zi(x_j, self.A_s[i])
UW = U @ W
U_tilde = (UW.T * mask_geq_1).T
V_tilde = UW - U_tilde
"""
A contains the transpose of a_i
"""
A = U_tilde.T @ D_inv_masked @ U @ self.Z
width = np.sqrt(np.sum(A**2, axis=1)*np.log(2*self.T*self.K/self.delta)) + np.sqrt(np.sum(V_tilde**2, axis=0))
ucb = (self.past_reward @ A.T).reshape(-1) + width
return ucb, width
```
#### File: jjzhuwu/Warfarin-dosing/utils.py
```python
import numpy as np
import scipy.stats
def dose_to_category(y):
"""
Convert dose from continuous to categorical variables. Label 0 for low dosage, label 1
for medium dosage, and label 2 for high dosage.
"""
if len(y.shape) > 1:
y = y.reshape(-1)
low_bound = 21
high_bound = 49
y_cat = np.zeros((y.shape[0], 3))
y_cat[:,0] = y < low_bound
y_cat[:,1] = np.logical_and(y >= low_bound, y < high_bound)
y_cat[:,2] = y >= high_bound
return np.argmax(y_cat, axis=1)
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
w = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, w
``` |
{
"source": "j-jzk/donors_registry",
"score": 2
} |
#### File: migrations/versions/9c632e7c77df_minimum_donation_count_for_each_medal.py
```python
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "9c632e7c77df"
down_revision = "<PASSWORD>"
branch_labels = None
depends_on = None
def upgrade():
op.add_column("medals", sa.Column("minimum_donations", sa.Integer(), nullable=True))
op.execute("UPDATE medals SET minimum_donations = 10 WHERE slug = 'br';")
op.execute("UPDATE medals SET minimum_donations = 20 WHERE slug = 'st';")
op.execute("UPDATE medals SET minimum_donations = 40 WHERE slug = 'zl';")
op.execute("UPDATE medals SET minimum_donations = 80 WHERE slug = 'kr3';")
op.execute("UPDATE medals SET minimum_donations = 120 WHERE slug = 'kr2';")
op.execute("UPDATE medals SET minimum_donations = 160 WHERE slug = 'kr1';")
op.execute("UPDATE medals SET minimum_donations = 250 WHERE slug = 'plk';")
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("medals", "minimum_donations")
# ### end Alembic commands ###
```
#### File: donors_registry/registry/app.py
```python
import logging
import sys
from flask import Flask, render_template
from registry import batch, commands, donor, public, user
from registry.extensions import (
bcrypt,
csrf_protect,
db,
debug_toolbar,
login_manager,
migrate,
)
from registry.utils import template_globals
def create_app(config_object="registry.settings"):
"""Create application factory, as explained here:
http://flask.pocoo.org/docs/patterns/appfactories/.
:param config_object: The configuration object to use.
"""
app = Flask(__name__.split(".")[0])
app.config.from_object(config_object)
app.context_processor(template_globals)
register_extensions(app)
register_blueprints(app)
register_commands(app)
configure_logger(app)
@app.errorhandler(404)
def page_not_found(e):
return render_template("404.html"), 404
@app.template_filter("format_time")
def format_time(date, format="%d.%m.%Y %H:%M:%S"):
return date.strftime(format)
return app
def register_extensions(app):
"""Register Flask extensions."""
bcrypt.init_app(app)
db.init_app(app)
csrf_protect.init_app(app)
login_manager.init_app(app)
debug_toolbar.init_app(app)
migrate.init_app(app, db)
return None
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.views.blueprint)
app.register_blueprint(user.views.blueprint)
app.register_blueprint(donor.views.blueprint)
app.register_blueprint(batch.views.blueprint)
return None
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.create_user)
app.cli.add_command(commands.install_test_data)
def configure_logger(app):
"""Configure loggers."""
handler = logging.StreamHandler(sys.stdout)
if not app.logger.handlers:
app.logger.addHandler(handler)
```
#### File: registry/donor/forms.py
```python
from flask_wtf import FlaskForm
from wtforms import BooleanField, HiddenField, TextAreaField
from wtforms.validators import DataRequired
from registry.donor.models import AwardedMedals
class RemoveMedalForm(FlaskForm):
rodne_cislo = HiddenField(validators=[DataRequired()])
medal_id = HiddenField(validators=[DataRequired()])
def validate(self):
self.awarded_medal = AwardedMedals.query.get(
(self.rodne_cislo.data, self.medal_id.data)
)
return self.awarded_medal is not None
class AwardMedalForm(FlaskForm):
medal_id = HiddenField(validators=[DataRequired()])
def add_checkboxes(self, rodna_cisla):
for rodne_cislo in rodna_cisla:
name = "rodne_cislo_" + rodne_cislo
checkbox = BooleanField(_form=self, _name="rodne_cislo", default="checked")
checkbox.data = rodne_cislo
setattr(self, name, checkbox)
def add_one_rodne_cislo(self, rodne_cislo):
rodne_cislo_input = HiddenField(
_form=self, _name="rodne_cislo", validators=[DataRequired()]
)
rodne_cislo_input.data = rodne_cislo
setattr(self, "rodne_cislo", rodne_cislo_input)
class NoteForm(FlaskForm):
rodne_cislo = HiddenField(validators=[DataRequired()])
note = TextAreaField("Poznámka k dárci")
```
#### File: donors_registry/registry/utils.py
```python
from flask import flash
from registry.donor.models import Medals
def flash_errors(form, category="warning"):
"""Flash all errors for a form."""
for field, errors in form.errors.items():
for error in errors:
flash(f"{getattr(form, field).label.text} - {error}", category)
def template_globals():
"""
Injected into all templates
- all medals are needed for the nav bar
"""
all_medals = Medals.query.all()
return dict(all_medals=all_medals)
``` |
{
"source": "j-jzk/opi-gcode-interpreter",
"score": 3
} |
#### File: opi-gcode-interpreter/opi-gcode-interpreter/motor_controller.py
```python
import sys
import OPi.GPIO as GPIO
from time import sleep
class MotorController:
def __init__(self):
# Distances in mm that each step of the stepper motor propels each axis
self.mm_per_step = {'x': 0.000523, 'y': 0.000523, 'z': 0.000523}
# This value will be used as speed for the 'rapid movement' of the machine
# Measured in steps per second
self.rapid_speed_maximum = 150.0
# This value will be used as the fastest to move the machine when mill is lowered
# Measured in steps per second
self.work_speed_maximum = 150.0
# These are the pins to control the L293D motor drivers for the CNC
# in the order they are activated
self.control_pins = {'x': (35, 31, 37, 33),
'y': (38, 32, 40, 36)}
# 'z': (23, 27, 17, 22)}
# Tracks how many steps have been taken on each axis at any point
self.current_steps = {'x': 0, 'y': 0, 'z': 0}
# Configure pinout options
GPIO.setboard(GPIO.PCPCPLUS)
GPIO.setmode(GPIO.BOARD)
# Setup motor control pins
for pins in self.control_pins.values():
for pin in pins:
GPIO.setup(pin, GPIO.OUT)
def axis_step(self, axis, direction):
# Interacts with the pins to move the motor to the next step
# Releases holding torque power from previously powered pin
GPIO.output(self.control_pins[axis][self.current_steps[axis] % 4], 0)
# Increments counter that keeps track of which part of the (4-phase) rotation cycle we are in
self.current_steps[axis] += direction
# print(str(axis) + ": " + str(self.current_steps[axis]))
# Power next pin in phase to drive motor
GPIO.output(self.control_pins[axis][self.current_steps[axis] % 4], 1)
# Allows standalone running of the motor_controller script to move carts along axis by amount specified in args
# Syntax is <axis> <direction> <steps> eg python motor_controller.py x -1 2400
if __name__ == "__main__":
motor_controller = MotorController()
for i in range(0, int(sys.argv[3])):
motor_controller.axis_step(sys.argv[1], int(sys.argv[2]))
sleep(1/motor_controller.work_speed_maximum)
GPIO.cleanup()
``` |
{
"source": "j-jzk/opi-web-motor",
"score": 3
} |
#### File: j-jzk/opi-web-motor/app.py
```python
from time import sleep
from flask import Flask, render_template
import atexit
import motor
motor.setup()
app = Flask(__name__)
@app.route('/')
def hello():
return '<a href="/home">here</a>'
@app.route('/home/')
def home():
return render_template('index.html')
@app.route('/set/<value>')
def set(value):
value = int(value)
try:
if value > 0:
for i in range(value):
motor.fw(0.008)
elif value < 0:
for i in range(-value):
motor.bw(0.008)
return "Done moving - position: %d" % (value)
except:
return "Error"
atexit.register(motor.cleanup)
``` |
{
"source": "jk0/pyhole",
"score": 2
} |
#### File: core/irc/message.py
```python
import irc.client as irclib
class Message(object):
def __init__(self, session, message):
self.session = session
self.message = message
@property
def message(self):
return self._message
@message.setter
def message(self, _message):
self._message = _message
@property
def session(self):
return self._session
@session.setter
def session(self, _session):
self._session = _session
def _mangle_msg(self, msg):
"""Prepare the message for sending."""
if not hasattr(msg, "encode"):
try:
msg = str(msg)
except Exception:
self.session.log.error("msg cannot be converted to string")
return
msg = msg.split("\n")
# NOTE(jk0): 10 is completely arbitrary for now.
if len(msg) > 10:
msg = msg[0:8]
msg.append("...")
return msg
def dispatch(self, reply):
raise NotImplementedError("Message Dispatcher is not implemented")
@staticmethod
def getMessage(**kwargs):
return kwargs.pop("full_message")
class Notice(Message):
def __init__(self, session, message, target):
super(Notice, self).__init__(session, message)
self.target = target
def dispatch(self, reply):
"""Dispatch message as notice."""
_reply = self._mangle_msg(reply)
for line in _reply:
self.session.notice(self.target, line)
if irclib.is_channel(self.target):
self.session.log.info("-%s- <%s> %s" % (self.target,
self.session.nick,
line))
else:
self.session.log.info("<%s> %s" % (self.session.nick, line))
class Reply(Message):
def __init__(self, session, message, source, target):
super(Reply, self).__init__(session, message)
self.source = source
self.target = target
def dispatch(self, reply):
"""dispatch message as a reply."""
_reply = self._mangle_msg(reply)
for line in _reply:
if self.session.addressed:
source = self.source.split("!")[0]
self.session.reply(self.target, "%s: %s" % (source, line))
self.session.log.info("-%s- <%s> %s: %s" % (self.target,
self.session.nick,
source, line))
else:
self.session.reply(self.target, line)
if irclib.is_channel(self.target):
self.session.log.info("-%s- <%s> %s" % (self.target,
self.session.nick,
line))
else:
self.session.log.info("<%s> %s" % (self.session.nick,
line))
```
#### File: pyhole/core/utils.py
```python
import argparse
import datetime
import os
import re
import shutil
import threading
from BeautifulSoup import BeautifulStoneSoup
import config
import version
def admin(func):
"""Require admin rights."""
def wrap(self, message, *args, **kwargs):
if message.source in self.session.admins:
return func(self, message, *args, **kwargs)
return message.dispatch("Sorry, you are not authorized to do that.")
wrap.__doc__ = func.__doc__
wrap.__name__ = func.__name__
wrap.__module__ = func.__module__
return wrap
def require_params(func):
"""Require parameters."""
def wrap(self, message, params, *args, **kwargs):
if not params:
message.dispatch(wrap.__doc__)
return
return func(self, message, params, *args, **kwargs)
wrap.__doc__ = func.__doc__
wrap.__name__ = func.__name__
wrap.__module__ = func.__module__
return wrap
def spawn(func):
"""Thread-spawning decorator."""
def wrap(*args, **kwargs):
t = threading.Thread(target=func, args=args, kwargs=kwargs)
t.setDaemon(True)
t.start()
wrap.__doc__ = func.__doc__
wrap.__name__ = func.__name__
wrap.__module__ = func.__module__
return wrap
def decode_entities(html):
"""Strip HTML entities from a string and make it printable."""
html = " ".join(str(x).strip() for x in BeautifulStoneSoup(html,
convertEntities=BeautifulStoneSoup.HTML_ENTITIES).findAll(
text=True))
return filter(lambda x: ord(x) > 9 and ord(x) < 127, html)
def ensure_int(param):
"""Ensure the given param is an int."""
try:
param = re.sub("\W", "", param)
return int(param)
except ValueError:
return None
def build_options():
"""Generate command line options."""
parser = argparse.ArgumentParser(version=version.version_string())
parser.add_argument("-c", "--config", default=get_conf_file_path(),
help="specify the path to a configuration file")
parser.add_argument("-d", "--debug", action="store_true",
help="show debugging output")
return parser.parse_known_args()[0]
def get_option(option):
"""Retrive an option from the command line."""
parsed_args = build_options()
return vars(parsed_args).get(option)
def debug_enabled():
"""Return whether or not debug mode is enabled."""
debug_option = get_option("debug")
debug_config = get_config().get("debug", type="bool")
return debug_option or debug_config
def get_home_directory():
"""Return the home directory."""
home_dir = os.getenv("HOME") + "/.pyhole/"
make_directory(home_dir)
return home_dir
def get_directory(new_dir):
"""Return a directory."""
home_dir = get_home_directory()
new_dir = os.path.join(home_dir, new_dir, "")
make_directory(new_dir)
return new_dir
def make_directory(directory):
"""Make a direectory if it doesn't exist."""
if not os.path.exists(directory):
os.makedirs(directory)
def get_conf_file_path():
"""Return the path to the conf file."""
paths = (
"/etc/pyhole.conf",
"/etc/pyhole/pyhole.conf"
)
for path in paths:
if os.path.exists(path):
return path
return get_home_directory() + "pyhole.conf"
def get_conf_file():
"""Return the path to the conf file."""
return get_option("config") or get_conf_file_path()
def get_config(section="Pyhole"):
"""Return the default config object."""
return config.Config(get_conf_file(), section)
def write_file(directory, file_name, data):
"""Write data to file."""
directory = get_directory(directory)
with open(directory + file_name, "w") as open_file:
open_file.write(str(data).strip())
def read_file(directory, file_name):
"""Read and return the data in file."""
directory = get_directory(directory)
try:
with open(directory + file_name, "r") as open_file:
data = open_file.read()
return data
except IOError:
return None
def list_files(directory):
"""List files in a directory."""
directory = get_directory(directory)
return os.listdir(directory)
def prepare_config():
"""Prepare a sample configuration file."""
conf_file = get_conf_file()
if os.path.exists(conf_file):
return
try:
print "Copying sample configuration file to: %s" % conf_file
shutil.copyfile("pyhole.conf.sample", conf_file)
print "Done. Please edit before running again."
except IOError:
# NOTE(jk0): Could not locate sample configuration file. This should
# only happen when Read the Docs generates the documentation.
pass
def datetime_now_string():
"""ISO 8601 formatted string of the current datetime."""
return datetime.datetime.utcnow().isoformat()
```
#### File: pyhole/pyhole/main.py
```python
import time
from pyhole.core import api
from pyhole.core import logger
from pyhole.core import process
from pyhole.core import utils
from pyhole.core import version
def Main():
"""Main loop."""
config = utils.get_config()
log = logger.get_logger()
networks = config.get("networks", type="list")
log.info("Starting %s..." % version.version_string())
log.info("Connecting to networks: %s" % ", ".join(networks))
procs = []
for network in networks:
proc = process.Process(network)
proc.start()
procs.append(proc)
try:
if config.get("api_enabled", type="bool"):
api.run()
while True:
time.sleep(1)
for proc in procs:
if not proc.is_alive():
procs.remove(proc)
if not procs:
log.info("No longer connected to any networks; shutting down.")
raise KeyboardInterrupt
except KeyboardInterrupt:
for proc in procs:
proc.terminate()
```
#### File: pyhole/plugins/jira.py
```python
from pyhole.core import plugin
from pyhole.core import request
from pyhole.core import utils
class JiraClient(object):
def __init__(self):
jira = utils.get_config("Jira")
self.auth_server = jira.get("auth_server")
self.domain = jira.get("domain")
self.username = jira.get("username")
self.password = jira.get("password")
def get(self, issue_id):
url = "%s/rest/api/latest/issue/%s" % (self.auth_server, issue_id)
return request.get(url, verify=False,
auth=(self.username, self.password))
class Jira(plugin.Plugin):
"""Provide access to the Jira API."""
def __init__(self, session):
self.session = session
self.name = self.__class__.__name__
self.client = JiraClient()
@plugin.hook_add_msg_regex("([A-Z]{2}-[0-9]{3,5})")
def regex_match_issue(self, message, match, **kwargs):
"""Retrieve Jira ticket information (ex: AB-1234)."""
try:
issue_id = match.group(0)
self._find_issue(message, issue_id)
except Exception:
return
@utils.spawn
def _find_issue(self, message, issue_id):
"""Find and display a Jira issue."""
try:
issue = self.client.get(issue_id).json()["fields"]
except KeyError:
return
assignee = issue.get("assignee")
if assignee:
assignee = assignee.get("displayName")
msg = "%s: %s [Status: %s, Priority: %s, Assignee: %s] %s"
message.dispatch(msg % (
issue_id,
issue["summary"],
issue["status"]["name"],
issue["priority"]["name"],
assignee,
"%s/jira/browse/%s" % (self.client.domain, issue_id)
))
```
#### File: pyhole/plugins/kernel.py
```python
import re
from pyhole.core import plugin
from pyhole.core import request
from pyhole.core import utils
class Kernel(plugin.Plugin):
"""Provide access to kernel.org data."""
@plugin.hook_add_command("kernel")
@utils.spawn
def kernel(self, message, params=None, **kwargs):
"""Retrieve the current kernel version (ex: .kernel)"""
url = "https://www.kernel.org/kdist/finger_banner"
response = request.get(url)
if response.status_code != 200:
return
r = re.compile("(.* mainline .*)")
m = r.search(response.content)
kernel = m.group(1).replace(" ", "")
message.dispatch(kernel)
``` |
{
"source": "JK117/Pi-Displayer",
"score": 3
} |
#### File: Pi-Displayer/em/EventManager.py
```python
from queue import Queue, Empty
from threading import *
class EventManager:
def __init__(self):
"""初始化事件管理器"""
# 事件对象列表
self.__eventQueue = Queue()
# 事件管理器开关
self.__active = False
# 事件处理线程
self.__thread = Thread(target=self.run)
self.__count = 0
# 这里的__handlers是一个字典,用来保存对应的事件的响应函数
# 其中每个键对应的值是一个列表,列表中保存了对该事件监听的响应函数,一对多
self.__handlers = {}
def run(self):
"""引擎运行"""
print('{}_run'.format(self.__count))
while self.__active:
try:
# 获取事件的阻塞时间设为1秒
event = self.__eventQueue.get(block=True, timeout=1)
self.event_process(event)
except Empty:
pass
self.__count += 1
def event_process(self, event):
"""处理事件"""
print('{}_EventProcess'.format(self.__count))
# 检查是否存在对该事件进行监听的处理函数
if event.type_ in self.__handlers:
# 若存在,则按顺序将事件传递给处理函数执行
for handler in self.__handlers[event.type_]:
handler(event)
self.__count += 1
def start(self):
"""启动"""
print('{}_Start'.format(self.__count))
# 将事件管理器设为启动
self.__active = True
# 启动事件处理线程
self.__thread.start()
self.__count += 1
def stop(self):
"""停止"""
print('{}_Stop'.format(self.__count))
# 将事件管理器设为停止
self.__active = False
# 等待事件处理线程退出
self.__thread.join()
self.__count += 1
def add_event_listener(self, type_, handler):
"""绑定事件和监听器处理函数"""
print('{}_AddEventListener'.format(self.__count))
# 尝试获取该事件类型对应的处理函数列表,若无则创建
try:
handler_list = self.__handlers[type_]
except KeyError:
handler_list = []
self.__handlers[type_] = handler_list
if handler not in handler_list:
handler_list.append(handler)
print(self.__handlers)
self.__count += 1
def remove_event_listener(self, type_, handler):
"""移除监听器的处理函数"""
print('{}_RemoveEventListener'.format(self.__count))
try:
handler_list = self.__handlers[type_]
# 如果该函数存在于列表中,则移除
if handler in handler_list:
handler_list.remove(handler)
# 如果函数列表为空,则从引擎中移除该事件类型
if not handler_list:
del self.__handlers[type_]
except KeyError:
pass
self.__count += 1
def send_event(self, event):
"""发送事件,向事件队列中存入事件"""
print('{}_SendEvent'.format(self.__count))
self.__eventQueue.put(event)
self.__count += 1
class Event:
def __init__(self, type_=None):
self.type_ = type_ # 事件类型
self.dict = {} # 字典用于保存具体的事件数据
```
#### File: Pi-Displayer/em/PublicAccount.py
```python
from em.EventManager import *
EVENT_ARTICLE = "Event_Article"
# 事件源 公众号
class PublicAccounts:
def __init__(self, event_manager):
self.__eventManager = event_manager
def write_new_article(self):
# 事件对象,写了新文章
event = Event(type_=EVENT_ARTICLE)
event.dict["article"] = u'如何写出更优雅的代码\n'
# 发送事件
self.__eventManager.send_event(event)
print(u'公众号发送新文章\n')
def start_event():
public_acc = PublicAccounts(event_manager)
timer = Timer(2, public_acc.write_new_article)
timer.start()
``` |
{
"source": "JK19/Python-Neural-Networks",
"score": 3
} |
#### File: JK19/Python-Neural-Networks/RNA.py
```python
import random
import math
class RNA(object):
def __init__(self,
entradas=3,
arquitectura=[2, 1],
alpha=0.5,
aleatoria=None,
permitir_logs=False):
self.red = []
self.arquitectura = arquitectura
self.n_capas = len(arquitectura)
self.n_entradas = entradas
self.alpha = alpha
self.aleatoria = aleatoria
self.permitir_logs = permitir_logs
def f_aleatoria(self):
try:
if self.aleatoria is None:
return random.uniform(-1, 1)
else:
return self.aleatoria()
except Exception as exc:
print('RN: Ha ocurrido un error en la funcion aleatoria')
raise exc
def generar(self):
for c in range(self.n_capas):
self.red.append([])
for i in range(self.arquitectura[c]):
if c == 0:
self.red[c].append({'pesos': [self.f_aleatoria() for j in range(self.n_entradas + 1)]})
else:
self.red[c].append({'pesos': [self.f_aleatoria() for j in range(self.arquitectura[c - 1] + 1)]})
def mult(self, entrada, pesos):
ret = 0
for i in range(len(entrada)):
ret += entrada[i] * pesos[i]
return ret
def activacion(self, raw): # raw = salida en bruto de la neurona
return 1 / (1 + math.exp(-raw))
def activacion_der(self, out): # out = salida de activacion de la neurona
return out * (1 - out)
def fw_prop(self, ejemplo): # propagar hacia adelante
salida_capa = [1] + ejemplo # bias
for capa in self.red:
buffer = [1] # bias
for n in capa:
salida = self.mult(salida_capa, n['pesos'])
n['salida'] = self.activacion(salida)
buffer.append(n['salida'])
salida_capa = buffer
return salida_capa[1:] # resultado de la capa de salida
def bw_prop(self, deseada): # retro-propagar la salida
for c in reversed(range(len(self.red))):
if c == len(self.red) - 1: # capa de salida
for n in range(len(self.red[c])):
error = (deseada[n] - self.red[c][n]['salida'])
self.red[c][n]['delta'] = error * self.activacion_der(self.red[c][n]['salida'])
else: # capas ocultas
for n in range(len(self.red[c])):
error_buffer = 0.0
for neurona in self.red[c + 1]: # sumatorio de pesos*deltas anteriores
error_buffer += (neurona['pesos'][n + 1] * neurona['delta']) # bias
self.red[c][n]['delta'] = error_buffer * self.activacion_der(self.red[c][n]['salida'])
def corregir_pesos(self, ejemplo_entrada): # calculo de incrementos de peso
for c in range(len(self.red)):
if c == 0:
salida_anterior = [1] + ejemplo_entrada
else:
salida_anterior = [1] + [n['salida'] for n in self.red[c - 1]]
for neurona in self.red[c]:
for i in range(len(salida_anterior)):
neurona['pesos'][i] += self.alpha * neurona['delta'] * salida_anterior[i]
def RMSE(self, buffer, n_ejemplos):
return math.sqrt(buffer / n_ejemplos)
def GRMSE(self, buffers, n_salidas):
return sum(buffers) / n_salidas
def entrenar(self, ejemplos_entrada, ejemplos_salida, epochs):
for epoch in range(epochs):
informe = self.single_epoch(ejemplos_entrada, ejemplos_salida)
informe['numero'] = epoch
if self.permitir_logs:
print('--> Epoch numero {}:'.format(informe['numero']))
for i in range(len(informe['rmse'])):
print('\tRMSE salida {}: {}'.format(i, informe['rmse'][i]))
print('\tGRMSE de la red: {}'.format(informe['grmse']))
def single_epoch(self, ejemplos_entrada, ejemplos_salida):
informe_epoch = {'deseado': [], 'obtenido': [], 'rmse': [], 'grmse': None}
error_buffer = [0 for i in range(len(ejemplos_salida[0]))] # one buffer per net output
for i in range(len(ejemplos_entrada)):
salida = self.fw_prop(ejemplos_entrada[i])
self.bw_prop(ejemplos_salida[i])
self.corregir_pesos(ejemplos_entrada[i])
# sumatorio de errores por salida
for j in range(len(salida)):
error_buffer[j] += sum([(ejemplos_salida[i][j] - salida[j]) ** 2])
informe_epoch['deseado'].append(ejemplos_salida[i])
informe_epoch['obtenido'].append(salida)
# fin de epoch
for k in range(len(error_buffer)): # calculo de errores
informe_epoch['rmse'].append(self.RMSE(error_buffer[k], len(ejemplos_entrada)))
informe_epoch['grmse'] = self.GRMSE(informe_epoch['rmse'], len(informe_epoch['rmse']))
return informe_epoch
if __name__ == '__main__':
red = RNA(entradas=2,
alpha=0.5,
arquitectura=[2, 1],
permitir_logs=True)
red.generar()
XOR_in_samples = [
[0, 0],
[0, 1],
[1, 0],
[1, 1]
]
XOR_out_samples = [
[0],
[1],
[1],
[0]
]
red.entrenar(XOR_in_samples, XOR_out_samples, 100000)
for e in XOR_in_samples:
print('Input: {}'.format(e))
res = red.fw_prop(e) # propagar hacia delante la entrada
print('Output: {}'.format(res))
print('\n')
input('Press Enter to continue')
# Ejemplo usando clases de salida
red = RNA(entradas=2,
alpha=0.7,
arquitectura=[2, 2], # 2 neuronas en la capa de salida
permitir_logs=True)
red.generar()
XOR_in_samples = [
[0, 0],
[0, 1],
[1, 0],
[1, 1]
]
# La primera neurona representa la clase (1) y la segunda a (0)
XOR_out_class_samples = [
# [(1), (0)]
[0, 1],
[1, 0],
[1, 0],
[0, 1]
]
red.entrenar(XOR_in_samples, XOR_out_class_samples, 100000)
for e in XOR_in_samples:
print('Entrada: {}'.format(e))
true_val, false_val = red.fw_prop(e)
if true_val < 0.01 and false_val > 0.99:
res = 0
else:
res = 1
print('Salida: {}'.format(res))
print('\n')
``` |
{
"source": "jk1ng/python-evtx",
"score": 2
} |
#### File: python-evtx/scripts/find_bugs.py
```python
import sys
import mmap
import contextlib
from Evtx.Evtx import FileHeader
from Evtx.Views import evtx_record_xml_view
def main():
with open(sys.argv[1], 'r') as f:
with contextlib.closing(mmap.mmap(f.fileno(), 0,
access=mmap.ACCESS_READ)) as buf:
fh = FileHeader(buf, 0x0)
for chunk in fh.chunks():
for record in chunk.records():
try:
evtx_record_xml_view(record).encode("utf-8")
except Exception as e:
print str(e)
print repr(e)
print evtx_record_xml_view(record).encode("utf-8")
return
if __name__ == "__main__":
main()
``` |
{
"source": "JK2109/web-scraping-challenge",
"score": 3
} |
#### File: web-scraping-challenge/Missions_to_Mars/scrape_mars.py
```python
from bs4 import BeautifulSoup as bs
import pandas as pd
import time
from webdriver_manager.chrome import ChromeDriverManager
from splinter import Browser
# to scrape news data from mars news site
def scrape_mars_data():
# Set up Splinter
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
mars_data_dict = {}
# Visit https://redplanetscience.com
url1 = 'https://redplanetscience.com/'
browser.visit(url1)
time.sleep(1)
# Scrape page into Soup
html = browser.html
soup = bs(html, "html.parser")
# Get the latest news title and the paragraph and save it to dict
mars_data_dict["news_title"] = soup.find_all('div', class_='content_title')[0].text
mars_data_dict["news_paragraph"] = soup.find_all('div', class_='article_teaser_body')[0].text
# Visit https://spaceimages-mars.com/
url2 = 'https://spaceimages-mars.com/'
browser.visit(url2)
time.sleep(1)
# Scrape page into Soup
html = browser.html
soup = bs(html, "html.parser")
featured_image_path = soup.find_all('img')[1]["src"]
mars_data_dict["featured_image_url"] = url2 + featured_image_path
# visit url and save the table from the link into dataframe
url3 = "https://galaxyfacts-mars.com/"
tables= pd.read_html(url3)
# first by the first table from the result
df = tables[0]
# renaming columns and setting description as index
df = df.rename(columns={0:"Description", 1:"Mars",2:"Earth"})
df = df.set_index("Description")
df_html = (df.to_html()).replace('\n', '')
mars_data_dict["mars_fact"] = df_html
# visit url and save the table from the link into dataframe
url4 = "https://marshemispheres.com/"
browser.visit(url4)
time.sleep(1)
hemisphere_image_urls = []
for i in range (0,4):
hemisphere_dict={}
html = browser.html
soup = bs(html, 'html.parser')
#finding the title and appending it to a list
hemisphere_title = soup.find_all("h3")[i].text
hemisphere_dict["title"] = hemisphere_title.replace(" Enhanced","")
# click the link to find the full resolution image
browser.links.find_by_partial_text(hemisphere_title).click()
html = browser.html
soup = bs(html, 'html.parser')
# finding the path for the full resolution imagae
full_image_path = soup.find_all('a')[3]["href"]
hemisphere_dict["img_url"] = url4 + full_image_path
# appending dictionary to a list
hemisphere_image_urls.append(hemisphere_dict)
# click back button to return to the main page
browser.links.find_by_partial_text('Back').click()
mars_data_dict["hemisphere_image_urls"] = hemisphere_image_urls
# Close the browser after scraping
browser.quit()
# Return results
return mars_data_dict
``` |
{
"source": "jk4837/ShowDefinition",
"score": 2
} |
#### File: jk4837/ShowDefinition/ShowDefinitionEx.py
```python
import sublime
import sublime_plugin
import re
import time
import html
lastSymbol = None
lastStartTime = time.time()
settings = {}
hover_view = None
hide_view_ex = None
def load_all_settings():
global hover_view, global_settings, settings, DEBUG, SHOW_PATH, MAX_LEN_TO_WRAP, MAX_LIST_ITEM
if hover_view is not None:
global_settings = hover_view.settings()
settings = sublime.load_settings('show_definition_ex.sublime-settings')
DEBUG = settings.get('DEBUG', False)
SHOW_PATH = settings.get('show_path', True)
MAX_LEN_TO_WRAP = settings.get('max_len_to_wrap', 60)
MAX_LIST_ITEM = settings.get('max_list_item', 20)
def plugin_loaded():
load_all_settings()
def symplify_path(path):
s = path.split('/');
count = 2
if count*2 < len(s):
return '/'.join(s[0:count] + ['..'] + s[-count:])
return path
def get_file_ex(path):
path_l = path.lower()
if 'makefile' in path_l:
return 'makefile'
return path_l[path_l.rfind('.')+1:]
def file_related(ex1, ex2):
global settings
if ex1 == ex2:
return True
for r_list in settings.get('related_list',[]):
rt1 = True if ex1 in r_list else False
rt2 = True if ex2 in r_list else False
if rt1 and rt2:
return True
return False
hide_view = None
hide_view_filename = None
def show_hide_view():
global hide_view
if hide_view is not None:
sublime.active_window().run_command('show_panel', {'panel': 'output._show_definition_ex'})
sublime.active_window().focus_view(hide_view)
def get_lint_file(filename, own_file = None):
global DEBUG, hide_view, hide_view_filename, hide_view_ex, settings, global_settings
if hide_view_filename == filename:
return hide_view, hide_view_ex
ex = get_file_ex(filename)
if ex in settings.get('exclude_files',[]): # Not index file extension
if DEBUG:
print(' skip exclude_files file')
return None, None
hide_view_ex = ex
if own_file:
own_ex = get_file_ex(own_file)
if not file_related(hide_view_ex, own_ex):
if DEBUG:
print(' skip none related file', (hide_view_ex, own_ex))
return None, None
if hide_view is None:
hide_view = sublime.active_window().create_output_panel("_show_definition_ex", True)
if DEBUG:
show_hide_view()
content = None
try:
with open(filename, 'r', encoding = 'utf8') as file:
content = file.read()
except Exception as e:
print(e, 'Cant open file:', filename)
return None, None
hide_view_filename = filename
hide_view.run_command("select_all")
hide_view.run_command("left_delete")
hide_view.run_command("append", {"characters": content})
syntax_lists = settings.get('syntax_lists')
syntax = None
if syntax_lists:
for syntax_list in syntax_lists:
if hide_view_ex in syntax_list[1]: # ['h', 'hpp', 'c', 'cpp'],
syntax = syntax_list[0] # "C++.sublime-syntax"
if syntax is None:
syntax = global_settings.get('syntax', {'extension': hide_view_ex})
if DEBUG:
print(' filename:', filename, ', hide_view_ex:', hide_view_ex, ', syntax:', syntax)
hide_view.assign_syntax(syntax)
return hide_view, hide_view_ex
def get_indent(view, point):
indent_len = 1
line_start = view.find_by_class(point, False, sublime.CLASS_LINE_START)
line_end = view.find_by_class(point, True, sublime.CLASS_LINE_END)
loc_indent = view.find('^\t+', line_start)
if -1 == loc_indent.a or -1 == loc_indent.b:
loc_indent = view.find('^( )+', line_start)
indent_len = 4
if loc_indent.a > line_end:
return 0
return (loc_indent.b - loc_indent.a) / indent_len
def ensure_func_in_class_by_indent(view, class_point, function_point):
class_indent = get_indent(view, class_point)
function_indent = get_indent(view, function_point)
if class_indent != function_indent - 1:
return False
next_class_indent = view.find('^( ){0' + ',' + str(class_indent) + '}[^\t \r\n]+', class_point)
if -1 == next_class_indent.a or -1 == next_class_indent.b:
next_class_indent = view.find('^\t{0' + ',' + str(class_indent) + '}[^\t \r\n]+', class_point)
if -1 == next_class_indent.a or -1 == next_class_indent.b:
return True
return next_class_indent.b > function_point
def ensure_func_in_class_by_parans(view, class_point, function_point):
first_semicolon = view.find(';', class_point).a
first_parentheses = view.find('{', class_point).a
if first_semicolon < first_parentheses:
return False
parentheses_l = 1
loc = first_parentheses + 1
while True:
loc = view.find('{', loc)
if -1 == loc.a or -1 == loc.b:
break
if loc.b > function_point:
break
loc = loc.b
parentheses_l += 1
parentheses_r = 0
loc = first_parentheses + 1
while True:
loc = view.find('}', loc)
if -1 == loc.a or -1 == loc.b:
break
if loc.b > function_point:
break
loc = loc.b
parentheses_r += 1
return parentheses_r < parentheses_l
def ensure_func_in_class(view, class_point, function_point):
if 'python' in view.settings().get('syntax').lower():
return ensure_func_in_class_by_indent(view, class_point, function_point)
return ensure_func_in_class_by_parans(view, class_point, function_point)
def parse_scope_full_name(view, region_row = None, region_col = None):
global DEBUG, hide_view_ex
if region_row is None or region_col is None:
pt = view.sel()[0].begin()
region_row, region_col = view.rowcol(pt)
else:
pt = view.text_point(region_row, region_col)
# skip calling
prec = view.substr(pt-1)
if 'js' == hide_view_ex:
prec_list = {'>', '!', '\(', '\{'}
else:
prec_list = {'.', '>', '!', '\(', '\{'}
if prec in prec_list:
if DEBUG:
print(' skip prefix char:', prec)
return
if hide_view_ex in {'c', 'cpp', 'h', 'hpp'}:
split_char = '::'
else:
split_char = '.'
is_class = view.match_selector(pt, 'entity.name.class | entity.name.struct')
if DEBUG:
view.sel().clear()
view.sel().add(sublime.Region(pt, pt))
view.show(pt)
s = ''
found = False
# Look for any classes
class_point = None
class_regions = view.find_by_selector('entity.name.class | entity.name.struct')
class_name = ''
for r in reversed(class_regions):
row, col = view.rowcol(r.a)
if row <= region_row:
class_point = r.a
r.b = view.find("[ \n\r\{\[\(;,'\"]", r.a).a
class_name = view.substr(r).strip(':')
found = True
break;
function_point = None
function_name = ''
param_name = ''
if not is_class:
# Look for any functions
function_regions = view.find_by_selector('entity.name.function')
if function_regions:
for r in reversed(function_regions):
if r.contains(pt):
function_point = r.begin()
s = view.substr(view.split_by_newlines(r)[-1])
function_name = s
found = True
if '::' is split_char and split_char in s:
sp = s.rsplit('::')
class_point = None
class_name = sp[0].strip()
function_name = '::'.join(sp[1:]).strip()
elif '.' is split_char:
line = view.substr(view.expand_by_class(pt, sublime.CLASS_LINE_START | sublime.CLASS_LINE_END, " "))
pattern = re.compile(r'([A-Z][._0-9A-Za-z]*)\.' + s)
match = pattern.match(line)
if (match):
if DEBUG:
print(' line:', line)
print(' match:', match.string)
class_point = None
class_name = match.group(1)
break
# for parens wrap
if function_point:
function_params = view.find_by_selector('meta.function.parameters | meta.method.parameters | punctuation.section.group')
if function_params:
for r in function_params:
if function_point < r.begin():
param_name = view.substr(r)
break;
if DEBUG:
print(' class_point:', class_point, ', class_name:', class_name, ', function_point:', function_point, ', function_name:', function_name, ', param_name', param_name, ', s:', s)
if class_point is not None and function_point is not None:
if not ensure_func_in_class(view, class_point, function_point):
if DEBUG:
print(' ',function_name, 'not in', class_name)
class_name = ''
if '' != class_name and '' != function_name:
s = class_name + split_char + function_name
else:
s = class_name + function_name
if '' != param_name:
param_name = param_name if 0 < len(param_name) and param_name[0] != '(' else param_name[1:]
param_name = param_name if 0 < len(param_name) and param_name[-1] != ')' else param_name[:-1]
s = s + '(' + param_name + ')'
if found:
s = ('O ' if class_point == pt or function_point == pt else 'X ') + s
else:
# Not found, just capture the line and do something
length = view.line(pt).end() - pt + 1
s = view.substr(view.line(pt)).strip(' ;\t{}(')
s_next = s
while len(s_next) > length and '' != s_next:
s, _, s_next = s_next.partition(' ')
if s == s_next:
s_next = ''
s = '? ' + s + ' ' + s_next
s = s.strip();
s = re.sub(r'[\n\r\t]+', '', s)
s = re.sub(r'[ ]+', ' ', s)
s = re.sub(r',(\w)', ', \\1', s)
s = s.replace('( ', '(')
s = s.replace(' )', ')')
if DEBUG:
print(' result:', s)
return s
class ShowDefinitionExTestCommand(sublime_plugin.WindowCommand):
def run(self):
global hover_view
hover_view = self.window.active_view()
load_all_settings()
base_dir = sublime.packages_path() + '\\ShowDefinitionEx\\'
file = open(base_dir + "tests\\list.txt", "r")
has_fail = False
line_num = 0
if file:
line = file.readline().strip()
line_num += 1
while line:
if line.startswith('#'):
line = file.readline().strip()
line_num += 1
continue
ans = file.readline().strip()
line_num += 1
loc = line.split(':')
view, _ = get_lint_file(base_dir + loc[0])
scope_name = parse_scope_full_name(view, int(loc[1])-1, int(loc[2])-1)
scope_name = scope_name.partition(' ')[2]
if scope_name != ans:
print('Error!!!!')
print('#', line_num - 1, ' : ', line)
print(' ans : ', ans)
print('parse : ', scope_name)
has_fail = True
view.sel().clear()
view.sel().add(view.text_point(int(loc[1])-1, int(loc[2])-1))
view.show(view.sel())
show_hide_view()
break;
line = file.readline().strip()
line_num += 1
if False == has_fail:
sublime.message_dialog('All test pass!')
else:
sublime.message_dialog('Test failed at line ' + str(line_num - 1) + '!')
file.close()
class ShowDefinitionExSelCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
max_popup_width, max_popup_height = view.viewport_extent()
self.scope_name = parse_scope_full_name(view)
view.show_popup(html.escape(self.scope_name, False) + ' <a href=1>Copy</a>', sublime.HIDE_ON_MOUSE_MOVE_AWAY, max_width= max_popup_width, max_height= max_popup_height, on_navigate= self.on_navigate)
def on_navigate(self, idx):
if '1' == idx:
sublime.set_clipboard(self.scope_name[2:])
class ShowDefinitionExCommand(sublime_plugin.WindowCommand):
def run(self, startTime, symbol, point):
global lastStartTime, hover_view
# skip update
if startTime != lastStartTime:
return
self.startTime = startTime
self.symbol = symbol
self.point = point
self.symbol_list = hover_view.window().lookup_symbol_in_index(self.symbol)
if 0 == len(self.symbol_list):
print('no symbol_list of', self.symbol)
sublime.status_message("")
return
load_all_settings()
self.em = hover_view.em_width()
self.own_file = hover_view.file_name()
self.own_row, _ = hover_view.rowcol(point)
self.max_popup_width, self.max_popup_height = hover_view.viewport_extent()
self.start = 0
self.had_wrap = False
self.first_show = True
self.show()
def show(self):
global hover_view, lastStartTime, DEBUG, SHOW_PATH, MAX_LEN_TO_WRAP, MAX_LIST_ITEM
max_len = 0
has_more = False
content_list = []
self.display_list = []
idx_range = range(self.start, len(self.symbol_list))
for idx in idx_range:
# skip update
if self.startTime != lastStartTime:
return
loc = self.symbol_list[idx]
if self.own_file == loc[0] and self.own_row == loc[2][0]-1:
if DEBUG:
print('skip own_file')
continue
if DEBUG:
print('parse #%d:' % (idx))
view, ex = get_lint_file(loc[0], self.own_file)
scope_name = None
if view:
scope_name = parse_scope_full_name(view, loc[2][0]-1, loc[2][1]-1)
if scope_name:
max_len = max(max_len, len(scope_name))
self.display_list.append({'name': scope_name[1:], 'ex': ex, 'loc': loc})
if MAX_LIST_ITEM <= len(self.display_list):
has_more = idx != len(self.symbol_list) - 1
self.start = idx + 1
break
self.display_list.sort(key = lambda x: x['name'])
if 0 != len(self.display_list):
if self.startTime != lastStartTime:
print('skip update')
if SHOW_PATH:
if max_len >= MAX_LEN_TO_WRAP or self.had_wrap:
self.had_wrap = True
str_tpl = '<a href=%d><code><i>%s</i>%s</code><br /><small style="padding-left:%dpx">%s:%d</small></a>'
content = '<br />'.join([str_tpl % (idx, self.display_list[idx]['ex'][0].upper(), html.escape(self.display_list[idx]['name'], False).replace(self.symbol, '<m>' + self.symbol + '</m>', 1), 2 * self.em, html.escape(symplify_path(self.display_list[idx]['loc'][1]), False), self.display_list[idx]['loc'][2][0]) for idx in range(len(self.display_list))])
else:
str_tpl = '<a href=%d><code><i>%s</i>%s</code><small style="padding-left:%dpx">%s:%d</small></a>'
content = '<br />'.join([str_tpl % (idx, self.display_list[idx]['ex'][0].upper(), html.escape(self.display_list[idx]['name'], False).replace(self.symbol, '<m>' + self.symbol + '</m>', 1), (max_len-len(self.display_list[idx]['name']))*self.em + 5, html.escape(symplify_path(self.display_list[idx]['loc'][1]), False), self.display_list[idx]['loc'][2][0]) for idx in range(len(self.display_list))])
else:
str_tpl = '<a href=%d><code><i>%s</i>%s</code></a>'
content = '<br />'.join([str_tpl % (idx, self.display_list[idx]['ex'][0].upper(), html.escape(self.display_list[idx]['name'], False).replace(self.symbol, '<m>' + self.symbol + '</m>', 1)) for idx in range(len(self.display_list))])
if has_more:
content += '<br /><br /><a href=more><n>click to see more ...</n></a>'
body = """
<body id=show-definitions>
<style>
body {
font-family: system;
}
h1 {
font-size: 1.1rem;
font-weight: bold;
margin: 0 0 0.25em 0;
}
m {
color: #FFFB9D;
text-decoration: none;
}
code {
font-family: monospace;
color: #FFFFFF;
text-decoration: none;
}
i {
color: #73AE86;
font-weight: bold;
font-style: normal;
text-decoration: none;
width: 30px;
}
n {
font-weight: bold;
padding: 0px 0px 0px %dpx;
}
</style>
<h1>Definition of <m>%s</m> %s:</h1>
<p>%s</p>
</body>
""" % (self.em*2, self.symbol, '' if not has_more else '%d/%d' % (self.start, len(self.symbol_list)), content)
if self.first_show:
hover_view.show_popup(body, sublime.HIDE_ON_MOUSE_MOVE_AWAY, location= self.point, max_width= self.max_popup_width, max_height= self.max_popup_height, on_navigate= self.on_navigate)
else:
hover_view.update_popup(body)
sublime.status_message("")
def on_navigate(self, idx):
global MAX_LIST_ITEM
if 'more' == idx:
self.first_show = False
self.show()
else:
idx = int(idx)
self.window.open_file('%s:%d:%d' % (self.display_list[idx]['loc'][0], self.display_list[idx]['loc'][2][0], self.display_list[idx]['loc'][2][1]), sublime.ENCODED_POSITION)
def toggle_setting(settings, name):
if True == settings.get(name):
print('Disable system', name)
settings.set(name, False)
else:
print('Enable system', name)
settings.set(name, True)
# toggle "show_definitions"
class ShowDefinitionExToggleCommand(sublime_plugin.ApplicationCommand):
def run(self):
sublime.active_window().run_command('hide_popup')
s = sublime.load_settings("Preferences.sublime-settings")
toggle_setting(s, 'show_definitions')
sublime.save_settings("Preferences.sublime-settings")
def lookup_symbol(window, symbol):
if len(symbol.strip()) == 0:
return []
index_locations = window.lookup_symbol_in_index(symbol)
open_file_locations = window.lookup_symbol_in_open_files(symbol)
def file_in_location_list(fname, locations):
for l in locations:
if l[0] == fname:
return True
return False
# Combine the two lists, overriding results in the index with results
# from open files, while trying to preserve the order of the files in
# the index.
locations = []
ofl_ignore = []
for l in index_locations:
if file_in_location_list(l[0], open_file_locations):
if not file_in_location_list(l[0], ofl_ignore):
for ofl in open_file_locations:
if l[0] == ofl[0]:
locations.append(ofl)
ofl_ignore.append(ofl)
else:
locations.append(l)
for ofl in open_file_locations:
if not file_in_location_list(ofl[0], ofl_ignore):
locations.append(ofl)
return locations
def symbol_at_point(view, pt):
symbol = view.substr(view.expand_by_class(pt, sublime.CLASS_WORD_START | sublime.CLASS_WORD_END, "[]{}()<>:."))
locations = lookup_symbol(view.window(), symbol)
if len(locations) == 0:
symbol = view.substr(view.word(pt))
locations = lookup_symbol(view.window(), symbol)
return symbol, locations
def filter_current_symbol(view, point, symbol, locations):
"""
Filter the point specified from the list of symbol locations. This
results in a nicer user experience so the current symbol doesn't pop up
when hovering over a class definition. We don't just skip all class and
function definitions for the sake of languages that split the definition
and implementation.
"""
def match_view(path, view):
fname = view.file_name()
if fname is None:
if path.startswith('<untitled '):
path_view = view.window().find_open_file(path)
return path_view and path_view.id() == view.id()
return False
return path == fname
new_locations = []
for l in locations:
if match_view(l[0], view):
symbol_begin_pt = view.text_point(l[2][0] - 1, l[2][1])
symbol_end_pt = symbol_begin_pt + len(symbol)
if point >= symbol_begin_pt and point <= symbol_end_pt:
continue
new_locations.append(l)
return new_locations
class ShowDefinitionExHoverCommand(sublime_plugin.EventListener):
def on_hover(self, view, point, hover_zone):
global hover_view, lastStartTime, lastSymbol, DEBUG
load_all_settings()
if sublime.HOVER_TEXT is not hover_zone or not self.is_enabled():
return
# decide to show or not to show by built-in logic
def score(scopes):
return view.score_selector(point, scopes)
# Limit where we show the hover popup
if score('text.html') and not score('text.html source'):
is_class = score('meta.attribute-with-value.class')
is_id = score('meta.attribute-with-value.id')
if not is_class and not is_id:
return
else:
if not score('source'):
return
if score('comment'):
return
# Only show definitions in a string if there is interpolated source
if score('string') and not score('string source'):
return
# decide to show or not to show by this package
symbol, locations = symbol_at_point(view, point)
locations = filter_current_symbol(view, point, symbol, locations)
if not locations:
if DEBUG:
print('skip by symbol check')
return
track = True
for select in ['constant.language', 'meta.statement']: # may not track
if view.match_selector(point, select):
track = False
break
if not track:
for select in ['meta.function-call']: # may track
if view.match_selector(point, select):
track = True
break
if track:
for select in ['meta.string', 'comment', 'storage.modifier', 'storage.type', 'keyword']: # must not track
if view.match_selector(point, select):
track = False
break
if not track:
if DEBUG:
print('Finally decided to skip, select:', select)
return
timeout = 5
if symbol is None or symbol == lastSymbol and lastStartTime + timeout > time.time():
if DEBUG:
print('symbol not change skip update')
return
sublime.status_message("Parse definitions of " + symbol + "... 0/" + str(len(view.window().lookup_symbol_in_index(symbol))))
lastSymbol = symbol
lastStartTime = time.time()
hover_view = view
sublime.set_timeout_async(lambda: view.window().run_command('show_definition_ex', {'symbol': symbol, 'point': point, 'startTime': lastStartTime}), 0)
def is_enabled(self):
return not sublime.load_settings("Preferences.sublime-settings").get('show_definitions')
```
#### File: ShowDefinition/tests/test.py
```python
def funcN1(self):
aabbcc
class ClassA(sublime_plugin.WindowCommand):
def funcA1(self):
aabbcc
def funcA2(self):
aabbcc
def funcN2(self):
aabbcc
class ClassB(sublime_plugin.WindowCommand):
class ClassC(sublime_plugin.WindowCommand):
def funcC1(self):
aabbcc
def funcC2(self):
aabbcc
def funcB1(self):
aabbcc
def funcN3(self):
aabbcc
``` |
{
"source": "jk4as/Microdonation",
"score": 2
} |
#### File: Microdonation/microDonation/forms.py
```python
from django import forms
from address.forms import AddressField
from .models import Order, CharityOrg, Cause, User
class CartForm(forms.Form):
value = forms.DecimalField(initial='1.00')
cause_id = forms.IntegerField(widget=forms.HiddenInput)
def __init__(self, request, *args, **kwargs):
self.request = request
super(CartForm, self).__init__(*args, **kwargs)
class CheckoutForm(forms.ModelForm):
class Meta:
model = Order
exclude = ('paid','order_id','charities')
class CharityForm(forms.ModelForm):
authorized_users = forms.CharField(required=False)
charity_image = forms.FileField(required=True)
class Meta:
model = CharityOrg
fields = [
'name',
'description',
'contact_email',
'paypal_email',
'tags',
'authorized_users',
'charity_image',
]
class CauseForm(forms.ModelForm):
cause_image = forms.FileField(required=True)
class Meta:
model = Cause
fields = [
'name',
'description',
'tags',
'cause_image',
]
class DeleteForm(forms.Form):
delete = forms.BooleanField(initial=False, required=False)
class BasicSearchForm(forms.Form):
search_val = forms.CharField(widget=forms.TextInput(attrs={'placeholder':'Search and Explore'}))
class CharitySearchForm(forms.Form):
search_types = [
('name', 'charity name'),
('min_causes', 'at least _ causes'),
('tags', 'containing one or more tags'),
]
search_type = forms.CharField(label="What should be used to search for the charity?", widget=forms.Select(choices=search_types))
search_value = forms.CharField(max_length=255)
```
#### File: Microdonation/microDonation/models.py
```python
from django.db import models
from taggit.managers import TaggableManager
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.conf import settings
from django.utils import timezone
from address.models import AddressField
# Create your models here.
class UserManager(BaseUserManager):
def _create_user(self, email, password, is_staff, is_superuser, **kwargs):
if not email:
raise ValueError('Email is required for registration')
now = timezone.now()
email = self.normalize_email(email)
user = self.model(
email=email,
is_active=True,
is_staff=is_staff,
is_superuser=is_superuser,
date_joined=now,
)
user.set_password(password)
user.save()
return user
def create_user(self, email, password, **kwargs):
return self._create_user(email, password, False, False, **kwargs)
def create_superuser(self, email, password, **kwargs):
return self._create_user(email, password, True, True, **kwargs)
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=127, unique=True)
first_name = models.CharField(max_length=127)
last_name = models.CharField(max_length=127)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
date_joined = models.DateTimeField(auto_now_add=True)
# charities = models.ManyToManyField(CharityOrg)
USERNAME_FIELD = 'email'
EMAIL_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def get_absolute_url(self):
return "/users/%i" % (self.pk)
class CharityOrg(models.Model):
name = models.CharField(max_length=50)
description = models.TextField()
# photo = models.ImageField()
contact_email = models.EmailField()
paypal_email = models.EmailField()
slug = models.SlugField(unique=False, max_length=50)
tags = TaggableManager()
is_deleted = models.BooleanField(default=False)
authenticated_users = models.ManyToManyField(User)
charity_image = models.FileField(upload_to='images/', null=True, verbose_name="", default="http://12172.16.17.32:8000/media/https:/i.pinimg.com/originals/fa/98/67/fa9867a39c2ec093bad63e91fed2bacb.jpg")
def __str__(self):
return self.name
class Cause(models.Model):
name = models.CharField(max_length=50) # 191 is max SQL length
description = models.TextField()
charity = models.ForeignKey(CharityOrg, on_delete=models.CASCADE)
slug = models.SlugField(unique=False, max_length=50)
tags = TaggableManager()
cause_image = models.ImageField(upload_to='images/', default="https://i.pinimg.com/originals/fa/98/67/fa9867a39c2ec093bad63e91fed2bacb.jpg")
is_deleted = models.BooleanField(default=False)
def __str__(self):
return "{}:{}".format(self.charity.name, self.name)
class CartItem(models.Model):
cart_id = models.CharField(max_length=50)
value = models.DecimalField(max_digits=7, decimal_places=2)
cause = models.ForeignKey(Cause, on_delete=models.CASCADE)
def __str__(self):
return "{}:{}".format(self.cause.name, self.id)
def update_value(self, newValue):
self.value = newValue
self.save()
class Order(models.Model):
address = AddressField()
contact_email = models.EmailField()
date = models.DateTimeField(auto_now_add=True)
paid = models.BooleanField(default=False)
order_id = models.CharField(max_length=50)
charities = models.ManyToManyField(CharityOrg, related_name='charity_orders')
def __str__(self):
return "{}:{}".format(self.id, self.contact_email)
def total_cost(self):
return sum([ l.value for l in self.lineitem_set.all() ])
def charity_cost(self, charity):
relevant_lineitems = []
for l in self.lineitem_set.all():
if l.cause.charity == charity:
relevant_lineitems.append(l)
return sum([ l.value for l in relevant_lineitems])
class LineItem(models.Model):
order = models.ForeignKey(Order, on_delete=models.CASCADE)
value = models.DecimalField(max_digits=7, decimal_places=2)
cause = models.ForeignKey(Cause, on_delete=models.CASCADE)
def __str__(self):
return "{}:{}:{}".format(self.charity.name, self.cause.name, self.id)
``` |
{
"source": "jk-5/python-wasteapi",
"score": 3
} |
#### File: python-wasteapi/wasteapi/wasteapi.py
```python
import asyncio
import json
import socket
from datetime import datetime, timedelta
from typing import Dict, Optional
import aiohttp
import async_timeout
from yarl import URL
from .__version__ import __version__
from .const import API_BASE_URI, API_HOST, API_TO_WASTE_TYPE
from .exceptions import (
WasteApiAddressError,
WasteApiConnectionError,
WasteApiError,
)
class WasteApi:
"""Main class for handling connections with WasteAPI."""
def __init__(
self,
company_code: str,
post_code: str,
house_number: str,
house_letter: str = None,
loop=None,
request_timeout: int = 10,
session=None,
user_agent: str = None,
):
"""Initialize connection with WasteAPI."""
self._loop = loop
self._session = session
self._close_session = False
self.company_code = company_code
self.post_code = post_code
self.house_number = house_number
self.house_letter = house_letter
self.request_timeout = request_timeout
self.user_agent = user_agent
self._unique_id = None
self._pickup = {} # type: Dict[str, datetime]
if self._loop is None:
self._loop = asyncio.get_event_loop()
if self._session is None:
self._session = aiohttp.ClientSession(loop=self._loop)
self._close_session = True
if self.user_agent is None:
self.user_agent = "PythonWasteAPI/{}".format(__version__)
async def _request(self, uri: str, method: str = "POST", data=None):
"""Handle a request to WasteAPI."""
url = URL.build(
scheme="https", host=API_HOST, port=443, path=API_BASE_URI
).join(URL(uri))
headers = {
"User-Agent": self.user_agent,
"Accept": "application/json, text/plain, */*",
}
try:
with async_timeout.timeout(self.request_timeout):
response = await self._session.request(
method, url, data=data, headers=headers, ssl=True
)
except asyncio.TimeoutError as exception:
raise WasteApiConnectionError(
"Timeout occurred while connecting to WasteAPI."
) from exception
except (aiohttp.ClientError, socket.gaierror) as exception:
raise WasteApiConnectionError(
"Error occurred while communicating with WasteAPI."
) from exception
content_type = response.headers.get("Content-Type", "")
if (response.status // 100) in [4, 5]:
contents = await response.read()
response.close()
if content_type == "application/json":
raise WasteApiError(
response.status, json.loads(contents.decode("utf8"))
)
raise WasteApiError(
response.status, {"message": contents.decode("utf8")}
)
if "application/json" in response.headers["Content-Type"]:
return await response.json()
return await response.text()
async def unique_id(self) -> Optional[int]:
"""Return unique address ID."""
if self._unique_id is None:
response = await self._request(
"FetchAdress",
data={
"companyCode": self.company_code,
"postCode": self.post_code,
"houseNumber": self.house_number,
"houseLetter": self.house_letter or "",
},
)
if not response.get("dataList"):
raise WasteApiAddressError(
"Address not found in WasteAPI service area"
)
self._unique_id = response["dataList"][0]["UniqueId"]
return self._unique_id
async def update(self) -> None:
"""Fetch data from WasteAPI."""
await self.unique_id()
today = datetime.now().date()
response = await self._request(
"GetCalendar",
data={
"companyCode": self.company_code,
"uniqueAddressID": self._unique_id,
"startDate": today.isoformat(),
"endDate": (today + timedelta(days=100)).isoformat(),
},
)
for pickup in response["dataList"]:
waste_type = API_TO_WASTE_TYPE.get(pickup["_pickupTypeText"])
pickup_date = None
if pickup["pickupDates"]:
pickup_date = datetime.strptime(
min(pickup["pickupDates"]), "%Y-%m-%dT%H:%M:%S"
)
self._pickup.update({waste_type: pickup_date}) # type: ignore
async def next_pickup(self, waste_type: str) -> Optional[datetime]:
"""Return date of next pickup of the requested waste type."""
return self._pickup.get(waste_type)
async def close(self) -> None:
"""Close open client session."""
if self._close_session:
await self._session.close()
async def __aenter__(self) -> "WasteApi":
"""Async enter."""
return self
async def __aexit__(self, *exc_info) -> None:
"""Async exit."""
await self.close()
``` |
{
"source": "jkabalar/3DSSG",
"score": 2
} |
#### File: 3DSSG/data_processing/gen_data_scannet.py
```python
if __name__ == '__main__' and __package__ is None:
from os import sys
sys.path.append('../')
import argparse, trimesh
import open3d as o3d
import numpy as np
from tqdm import tqdm
from pathlib import Path
from utils import util, util_label, define
from utils.util_search import SAMPLE_METHODS,find_neighbors
from utils import dataLoaderScanNet
debug = True
debug = False
name_same_segment = define.NAME_SAME_PART
def Parser(add_help=True):
parser = argparse.ArgumentParser(description='Process some integers.', formatter_class = argparse.ArgumentDefaultsHelpFormatter,
add_help=add_help,conflict_handler='resolve')
parser.add_argument('--type', type=str, default='train', choices=['train', 'test', 'validation'], help="allow multiple rel pred outputs per pair",required=False)
parser.add_argument('--label_type', type=str,default='ScanNet20',
choices=['3RScan', '3RScan160', 'NYU40', 'Eigen13', 'RIO27', 'RIO7','ScanNet20'], help='label',required=False)
parser.add_argument('--pth_out', type=str,default='../data/tmp', help='pth to output directory',required=False)
parser.add_argument('--target_scan', type=str, default='', help='')
parser.add_argument('--scan_name', type=str, default='inseg.ply', help='what is the name of the output filename of the ply generated by your segmentation method.')
## options
parser.add_argument('--verbose', type=bool, default=False, help='verbal',required=False)
parser.add_argument('--debug', type=int, default=0, help='debug',required=False)
## neighbor search parameters
parser.add_argument('--search_method', type=str, choices=['BBOX','KNN'],default='BBOX',help='How to split the scene.')
parser.add_argument('--radius_receptive', type=float,default=0.5,help='The receptive field of each seed.')
# # Correspondence Parameters
parser.add_argument('--max_dist', type=float,default=0.1,help='maximum distance to find corresopndence.')
parser.add_argument('--min_seg_size', type=int,default=512,help='Minimum number of points of a segment.')
parser.add_argument('--corr_thres', type=float,default=0.5,help='How the percentage of the points to the same target segment must exceeds this value.')
parser.add_argument('--occ_thres', type=float,default=0.75,help='2nd/1st must smaller than this.')
return parser
def load_inseg(pth_ply):
# if pth_ply.find('inseg.ply') >=0:
cloud_pd = trimesh.load(pth_ply, process=False)
points_pd = cloud_pd.vertices
segments_pd = cloud_pd.metadata['ply_raw']['vertex']['data']['label'].flatten()
# elif pth_ply.find('cvvseg.ply') >=0:
return cloud_pd, points_pd, segments_pd
def process(pth_scan, scan_id, label_type, verbose=False) -> list:
# some params
max_distance = args.max_dist
filter_segment_size = args.min_seg_size # if the num of points within a segment below this threshold, discard this
filter_corr_thres = args.corr_thres # if percentage of the corresponding label must exceed this value to accept the correspondence
filter_occ_ratio = args.occ_thres
pth_pd = os.path.join(define.SCANNET_DATA_PATH,scan_id,args.scan_name)
pth_ply = os.path.join(define.SCANNET_DATA_PATH,scan_id,scan_id+define.SCANNET_PLY_SUBFIX)
pth_agg = os.path.join(define.SCANNET_DATA_PATH,scan_id,scan_id+define.SCANNET_AGGRE_SUBFIX)
pth_seg = os.path.join(define.SCANNET_DATA_PATH,scan_id,scan_id+define.SCANNET_SEG_SUBFIX)
cloud_gt, points_gt, labels_gt, segments_gt = dataLoaderScanNet.load_scannet(pth_ply, pth_agg, pth_seg)
cloud_pd, points_pd, segments_pd = load_inseg(pth_pd)
# get num of segments
segment_ids = np.unique(segments_pd)
segment_ids = segment_ids[segment_ids!=0]
if args.verbose: print('filtering input segments.. (ori num of segments:',len(segment_ids),')')
segments_pd_filtered=list()
for seg_id in segment_ids:
pts = points_pd[np.where(segments_pd==seg_id)]
if len(pts) > filter_segment_size:
segments_pd_filtered.append(seg_id)
segment_ids = segments_pd_filtered
if args.verbose: print('there are',len(segment_ids), 'segemnts after filtering:\n', segment_ids)
segs_neighbors = find_neighbors(points_pd, segments_pd, search_method,receptive_field=args.radius_receptive,selected_keys=segment_ids)
''' Check GT segments and labels '''
_, label_names, _ = util_label.getLabelMapping(args.label_type)
# print('label_names:',label_names)
instance2labelName = dict()
size_segments_gt = dict()
uni_seg_gt_ids = np.unique(segments_gt).tolist()
for seg_id in uni_seg_gt_ids:
indices = np.where(segments_gt == seg_id)
seg = segments_gt[indices]
labels = labels_gt[indices]
uq_label = np.unique(labels).tolist()
if len(uq_label) > 1:
if verbose or debug:
print('segment',seg_id,'has multiple labels (',uq_label,') in GT. Try to remove other labels.')
max_id=0
max_value=0
for id in uq_label:
if verbose or debug:
print(id, len(labels[labels==id]), '{:1.3f}'.format(len(labels[labels==id])/len(labels)))
if len(labels[labels==id])>max_value:
max_value = len(labels[labels==id])
max_id = id
for label in uq_label:
if label == max_id: continue
if len(labels[labels==id]) > filter_segment_size: # try to generate new segment
new_seg_idx = max(uni_seg_gt_ids)+1
uni_seg_gt_ids.append(new_seg_idx)
for idx in indices[0]:
if labels_gt[idx] == label:
segments_gt[idx] = new_seg_idx
else:
for idx in indices[0]:
if labels_gt[idx] == label:
segments_gt[idx] = 0
labels_gt[idx] = 0 # set other label to 0
seg = segments_gt[indices]
labels = labels_gt[indices]
uq_label = [max_id]
if uq_label[0] == 0 or uq_label[0] > 40:
name = 'none'
else:
name = util_label.NYU40_Label_Names[uq_label[0]-1]
# print(name)
if name not in label_names.values():
name = 'none'
# if label_type == 'ScanNet20':
# if name not in util_label.SCANNET20_Label_Names:
# name = 'none'
size_segments_gt[seg_id] = len(seg)
instance2labelName[seg_id] = name
if verbose:
print('instance2labelNames:')
print(instance2labelName)
''' Save as ply '''
if debug:
colors = util_label.get_NYU40_color_palette()
cloud_gt.visual.vertex_colors = [0,0,0,255]
for seg, label_name in instance2labelName.items():
segment_indices = np.where(segments_gt == seg)[0]
if label_name == 'none':continue
label = util_label.NYU40_Label_Names.index(label_name)+1
for index in segment_indices:
cloud_gt.visual.vertex_colors[index][:3] = colors[label]
cloud_gt.export('tmp_gtcloud.ply')
size_segments_pd = dict()
''' Find and count all corresponding segments'''
tree = o3d.geometry.KDTreeFlann(points_gt.transpose())
count_seg_pd_2_corresponding_seg_gts = dict() # counts each segment_pd to its corresonding segment_gt
for segment_id in segment_ids:
segment_indices = np.where(segments_pd == segment_id)[0]
segment_points = points_pd[segment_indices]
size_segments_pd[segment_id] = len(segment_points)
if filter_segment_size > 0:
if size_segments_pd[segment_id] < filter_segment_size:
continue
for i in range(len(segment_points)):
point = segment_points[i]
k, idx, distance = tree.search_knn_vector_3d(point,1)
if distance[0] > max_distance: continue
segment_gt = segments_gt[idx][0]
if segment_gt not in instance2labelName: continue
if instance2labelName[segment_gt] == 'none': continue
if segment_id not in count_seg_pd_2_corresponding_seg_gts:
count_seg_pd_2_corresponding_seg_gts[segment_id] = dict()
if segment_gt not in count_seg_pd_2_corresponding_seg_gts[segment_id]:
count_seg_pd_2_corresponding_seg_gts[segment_id][segment_gt] = 0
count_seg_pd_2_corresponding_seg_gts[segment_id][segment_gt] += 1
if verbose or debug:
print('There are {} out of {} segments have found their correponding GT segments.'.\
format(len(count_seg_pd_2_corresponding_seg_gts),len(segment_ids)))
for k,i in count_seg_pd_2_corresponding_seg_gts.items():
print('\t{}: {}'.format(k,len(i)))
''' Find best corresponding segment '''
map_segment_pd_2_gt = dict() # map segment_pd to segment_gt
gt_segments_2_pd_segments = dict() # how many segment_pd corresponding to this segment_gt
for segment_id, cor_counter in count_seg_pd_2_corresponding_seg_gts.items():
size_pd = size_segments_pd[segment_id]
if verbose or debug: print('segment_id', segment_id, size_pd)
max_corr_ratio = -1
max_corr_seg = -1
list_corr_ratio = list()
for segment_gt, count in cor_counter.items():
size_gt = size_segments_gt[segment_gt]
corr_ratio = count/size_pd
list_corr_ratio.append(corr_ratio)
if corr_ratio > max_corr_ratio:
max_corr_ratio = corr_ratio
max_corr_seg = segment_gt
if verbose or debug: print('\t{0:s} {1:3d} {2:8d} {3:2.3f} {4:2.3f}'.\
format(instance2labelName[segment_gt],segment_gt,count, count/size_gt, corr_ratio))
if len(list_corr_ratio ) > 2:
list_corr_ratio = sorted(list_corr_ratio,reverse=True)
occ_ratio = list_corr_ratio[1]/list_corr_ratio[0]
else:
occ_ratio = 0
if max_corr_ratio > filter_corr_thres and occ_ratio < filter_occ_ratio:
'''
This is to prevent a segment is almost equally occupied two or more gt segments.
'''
if verbose or debug: print('add correspondence of segment {:s} {:4d} to label {:4d} with the ratio {:2.3f} {:1.3f}'.\
format(instance2labelName[segment_gt],segment_id,max_corr_seg,max_corr_ratio,occ_ratio))
map_segment_pd_2_gt[segment_id] = max_corr_seg
if max_corr_seg not in gt_segments_2_pd_segments:
gt_segments_2_pd_segments[max_corr_seg] = list()
gt_segments_2_pd_segments[max_corr_seg].append(segment_id)
else:
if verbose or debug: print('filter correspondence segment {:s} {:4d} to label {:4d} with the ratio {:2.3f} {:1.3f}'.\
format(instance2labelName[segment_gt],segment_id,max_corr_seg,max_corr_ratio,occ_ratio))
if verbose:
print('final correspondence:')
print(' pd gt')
for segment, label in sorted(map_segment_pd_2_gt.items()):
print("{:4d} {:4d}".format(segment,label))
print('final pd segments within the same gt segment')
for gt_segment, pd_segments in sorted(gt_segments_2_pd_segments.items()):
print('{} :'.format(gt_segment),end='')
for pd_segment in pd_segments:
print('{} '.format(pd_segment),end='')
print('')
''' Save as ply '''
if debug:
colors = util_label.get_NYU40_color_palette()
cloud_pd.visual.vertex_colors = [0,0,0,255]
for segment_pd, segment_gt in map_segment_pd_2_gt.items():
segment_indices = np.where(segments_pd == segment_pd)[0]
label = util_label.NYU40_Label_Names.index(instance2labelName[segment_gt])+1
color = colors[label]
for index in segment_indices:
cloud_pd.visual.vertex_colors[index][:3] = color
cloud_pd.export('tmp_corrcloud.ply')
'''' Save as relationship_*.json '''
list_relationships = list()
relationships = gen_relationship(0, map_segment_pd_2_gt, instance2labelName,
gt_segments_2_pd_segments)
if len(relationships["objects"]) != 0 and len(relationships['relationships']) != 0:
list_relationships.append(relationships)
return list_relationships, segs_neighbors
def gen_relationship(split:int, map_segment_pd_2_gt:dict,instance2labelName:dict,gt_segments_2_pd_segments:dict,
target_segments:list=None) -> dict:
'''' Save as relationship_*.json '''
relationships = dict()
relationships["scan"] = scan_id
relationships["split"] = split
objects = dict()
for seg, segment_gt in map_segment_pd_2_gt.items():
if target_segments is not None:
if seg not in target_segments: continue
name = instance2labelName[segment_gt]
assert(name != '-' and name != 'none')
objects[int(seg)] = name
relationships["objects"] = objects
split_relationships = list()
''' Build "same part" relationship '''
idx_in_txt_new = 0
for _, groups in gt_segments_2_pd_segments.items():
if target_segments is not None:
filtered_groups = list()
for g in groups:
if g in target_segments:
filtered_groups.append(g)
groups = filtered_groups
if len(groups) <= 1: continue
for i in range(len(groups)):
for j in range(i+1,len(groups)):
split_relationships.append([int(groups[i]),int(groups[j]), idx_in_txt_new, name_same_segment])
split_relationships.append([int(groups[j]),int(groups[i]), idx_in_txt_new, name_same_segment])
relationships["relationships"] = split_relationships
return relationships
if __name__ == '__main__':
args = Parser().parse_args()
debug |= args.debug
args.verbose |= args.debug
if debug:
args.verbose=True
if args.search_method == 'BBOX':
search_method = SAMPLE_METHODS.BBOX
elif args.search_method == 'KNN':
search_method = SAMPLE_METHODS.RADIUS
util.set_random_seed(2020)
import os,json
label_names, _, _ = util_label.getLabelMapping(args.label_type)
classes_json = list()
for key,value in label_names.items():
if value == '-':continue
classes_json.append(value)
target_scan=[]
if args.target_scan != '':
target_scan = util.read_txt_to_list(args.target_scan)
scan_ids = target_scan
else:
if args.type == 'train':
scan_ids = util.read_txt_to_list(define.SCANNET_SPLIT_TRAIN)
elif args.type == 'validation':
scan_ids = util.read_txt_to_list(define.SCANNET_SPLIT_VAL)
valid_scans=list()
relationships_new = dict()
relationships_new["scans"] = list()
relationships_new['neighbors'] = dict()
counter= 0
for scan_id in tqdm(sorted(scan_ids)):
# if len(target_scan) != 0: if scan_id not in target_scan: continue
if debug or args.verbose: print(scan_id)
relationships, segs_neighbors = process(define.SCANNET_DATA_PATH,scan_id,label_type = args.label_type, verbose = args.verbose)
valid_scans.append(scan_id)
relationships_new["scans"] += relationships
relationships_new['neighbors'][scan_id] = segs_neighbors
if debug: break
Path(args.pth_out).mkdir(parents=True, exist_ok=True)
pth_args = os.path.join(args.pth_out,'args.json')
with open(pth_args, 'w') as f:
tmp = vars(args)
json.dump(tmp, f, indent=2)
pth_relationships_json = os.path.join(args.pth_out, "relationships_" + args.type + ".json")
with open(pth_relationships_json, 'w') as f:
json.dump(relationships_new, f)
pth_classes = os.path.join(args.pth_out, 'classes.txt')
with open(pth_classes,'w') as f:
for name in classes_json:
if name == '-': continue
f.write('{}\n'.format(name))
pth_relation = os.path.join(args.pth_out, 'relationships.txt')
with open(pth_relation,'w') as f:
f.write('{}\n'.format(name_same_segment))
pth_split = os.path.join(args.pth_out, args.type+'_scans.txt')
with open(pth_split,'w') as f:
for name in valid_scans:
f.write('{}\n'.format(name))
``` |
{
"source": "jkabalar/gradslam",
"score": 3
} |
#### File: gradslam/datasets/rio10.py
```python
import os
import warnings
from collections import OrderedDict
from typing import Optional, Union
import imageio
import cv2
import numpy as np
import torch
from torch.utils import data
import datautils
__all__ = ["RIO10"]
class RIO10(data.Dataset):
r"""Expecting the RIO10 Dataset to be in this format:
.. code-block::
| ├── RIO10
| │ ├── seq02
| │ │ ├── seq02_01/
| │ │ ├──────seq
| │ │ ├──────── camera.yaml
| │ │ ├──────── frame-000000.color.jpg
| │ │ ├──────── frame-000000.pose.txt
| │ │ ├──────── frame-000000.rendered.depth.png
| │ │ ├──────── frame-000001.color.jpg
| │ │ ├──────── frame-000001.pose.txt
| │ │ ├──────── frame-000001.rendered.depth.png
| │ │ ├──────── .....
| │ │ ├──────instances
| │ │ ├──────── frame-000000.color.jpg
| │ │ ├──────── frame-000001.color.jpg
| │ │ ├── seq02_02/
| │ │ ├── .....
| │ │ └── intrinsics.txt
| │ ├── seq03
| │ ├── ...
|
|
"""
def __init__(self,
basedir: str,
scenes: Union[tuple, str, None],
start: Optional[int] = 0,
end: Optional[int] = -1,
height: int = 540,
width: int = 960,
seg_classes: str = "nyu40",
channels_first: bool = False,
normalize_color: bool = False,
*,
return_depth: bool = True,
return_intrinsics: bool = True,
return_pose: bool = False,
return_transform: bool = False,
return_names: bool = False,
return_labels: bool = False,
):
super(RIO10, self).__init__()
#height: 960
#width: 540
basedir = (os.path.join(basedir))
self.height = height
self.width = width
self.height_downsample_ratio = float(height) / 480
self.width_downsample_ratio = float(width) / 640
self.seg_classes = seg_classes
self.channels_first = channels_first
self.normalize_color = normalize_color
self.return_depth = return_depth
self.return_intrinsics = return_intrinsics
self.return_pose = return_pose
self.return_transform = return_transform
self.return_names = return_names
self.return_labels = return_labels
self.color_encoding = get_color_encoding(self.seg_classes)
self.intrinsics = dict()
# Start and end frames. Used to determine sequence length.
self.start = start
self.end = end
full_sequence = self.end == -1
if start < 0:
raise ValueError("Start frame cannot be less than 0.")
if not (end == -1 or end > start):
raise ValueError(
"End frame ({}) should be equal to -1 or greater than start ({})".format(
end, start
)
)
self.seqlen = self.end - self.start
# scenes should be a tuple
if isinstance(scenes, str):
if os.path.isfile(scenes):
with open(scenes, "r") as f:
scenes = tuple(f.read().split("\n"))
else:
raise ValueError("incorrect filename: {} doesn't exist".format(scenes))
elif not (scenes is None or isinstance(scenes, tuple)):
msg = "scenes should either be path to split.txt or tuple of scenes or None, but was of type %r instead"
raise TypeError(msg % type(scenes))
# Get a list of all color, depth, pose, label and intrinsics files.
colorfiles, depthfiles, posefiles = [], [], []
labelfiles, intrinsicsfiles, scene_names = [], [], []
scene_names = os.listdir(basedir)
print(scene_names)
for index, tmp in enumerate(scene_names):
if tmp =="intrinsics.txt":
print("Found intrinsics")
self._get_intrinsics(basedir+"/"+tmp)
scene_names.pop(index)
if len(self.intrinsics) == 74:
print("Loaded Intrinsics correctly")
print(self.intrinsics)
for seqpath in scene_names:
if scenes is not None:
if seqpath not in scenes:
continue
try:
types = os.listdir(basedir+"/"+seqpath+"/")
except OSError:
print("Invalid directory")
break
type_files = "seq"
type_instance = "instance"
if type_files not in types:
print("no files found in this scene")
continue
seq_colorfiles, seq_depthfiles, seq_posefiles = [], [], []
seq_labelfiles, seq_intrinsicsfiles = [], []
files = []
try:
files = os.listdir(os.path.join(basedir+"/"+seqpath+"/"+type_files+"/"))
files.sort()
except OSError:
print("Invalid directory")
break
for i in range(len(files)):
prefix, extension = os.path.splitext(files[i])
extension = extension[1:]
if extension=="jpg":
#colorfiles
seq_colorfiles.append(os.path.join(basedir+"/"+seqpath+"/"+type_files+"/"+files[i]))
if extension=="png":
#depthfiles
seq_depthfiles.append(os.path.join(basedir+"/"+seqpath+"/"+type_files+"/"+files[i]))
if extension=="txt":
#posefiles
seq_posefiles.append(os.path.join(basedir+"/"+seqpath+"/"+type_files+"/"+files[i]))
#if extension=="yaml":
#intrinsicsfiles
# seq_intrinsicsfiles.append(files[i])
colorfiles.append(seq_colorfiles)
depthfiles.append(seq_depthfiles)
posefiles.append(seq_posefiles)
#intrinsicsfiles.append(seq_intrinsicsfiles[0])
if type_instance not in types:
print("no Instance files registered in this scene")
continue
try:
files = os.listdir(os.path.join(basedir+"/"+seqpath+"/"+type_instance+"/"))
files.sort()
except OSError:
print("Invalid directory")
break
for i in range(len(files)):
prefix, extension = os.path.splitext(files[i])
extension = extension[1:]
if extension=="jpg" or extension=="png":
#labelfiles
seq_labelfiles.append(os.path.join(basedir+"/"+seqpath+"/"+type_instance+"/"+files[i]))
else:
print("Folder contains files of wrong type")
break
labelfiles.append(seq_labelfiles)
self.num_sequences = len(colorfiles)
# Class members to store the list of valid filepaths.
self.colorfiles = colorfiles
self.depthfiles = depthfiles
self.posefiles = posefiles
self.labelfiles = labelfiles
self.intrinsicsfiles = intrinsicsfiles
self.seqnames = scene_names
# Scaling factor for depth images
self.scaling_factor = 1000.0
def __len__(self):
r"""Returns the length of the dataset. """
return self.num_sequences
def __getitem__(self, idx: int):
r"""Returns the data from the sequence at index idx.
Returns:
color_seq (torch.Tensor): Sequence of rgb images of each frame
depth_seq (torch.Tensor): Sequence of depths of each frame
pose_seq (torch.Tensor): Sequence of poses of each frame
transform_seq (torch.Tensor): Sequence of transformations between each frame in the sequence and the
previous frame. Transformations are w.r.t. the first frame in the sequence having identity pose
(relative transformations with first frame's pose as the reference transformation). First
transformation in the sequence will always be `torch.eye(4)`.
label_seq (torch.Tensor): Sequence of semantic segmentation labels
intrinsics (torch.Tensor): Intrinsics for the current sequence
seqname (str): Name of the sequence
Shape:
- color_seq: :math:`(L, H, W, 3)` if `channels_first` is False, else :math:`(L, 3, H, W)`. `L` denotes
sequence length.
- depth_seq: :math:`(L, H, W, 1)` if `channels_first` is False, else :math:`(L, 1, H, W)`. `L` denotes
sequence length.
- pose_seq: :math:`(L, 4, 4)` where `L` denotes sequence length.
- transform_seq: :math:`(L, 4, 4)` where `L` denotes sequence length.
- label_seq: :math:`(L, H, W)` where `L` denotes sequence length.
- intrinsics: :math:`(1, 4, 4)`
"""
# Read in the color, depth, pose, label and intrinstics info.
color_seq_path = self.colorfiles[idx]
depth_seq_path = self.depthfiles[idx]
pose_seq_path = self.posefiles[idx]
if self.return_labels:
label_seq_path = self.labelfiles[idx]
intrinsics_path = self.intrinsicsfiles[idx]
seqname = self.seqnames[idx]
print("Getting scene"+seqname)
color_seq, depth_seq, pose_seq, label_seq = [], [], [], []
poses = []
self.seqlen = 8
output = []
if self.return_intrinsics:
intrinsics = self.intrinsics[seqname]
intrinsics = torch.from_numpy(intrinsics).float()
output.append(intrinsics)
for i in range(self.seqlen):
color = np.asarray(imageio.imread(color_seq_path[i]), dtype=float)
color = self._preprocess_color(color)
color = torch.from_numpy(color)
color_seq.append(color)
if self.return_depth:
depth = np.asarray(imageio.imread(depth_seq_path[i]), dtype=np.int64)
depth = self._preprocess_depth(depth)
depth = torch.from_numpy(depth)
depth_seq.append(depth)
# if self.return_pose or self.return_transform:
# pose = np.loadtxt(pose_seq_path[i]).astype(float)
# poses.append(pose)
# pose = torch.from_numpy(pose)
# pose_seq.append(pose)
# if self.return_labels:
# label = np.asarray(imageio.imread(label_seq_path[i]), dtype=np.uint8)
# label = self._preprocess_label(label)
# label = torch.from_numpy(label)
# label_seq.append(label)
color_seq = torch.stack(color_seq, 0).float()
output.append(color_seq)
if self.return_depth:
depth_seq = torch.stack(depth_seq, 0).float()
output.append(depth_seq)
# if self.return_pose:
# pose_seq = torch.stack(pose_seq, 0).float()
# pose_seq = self._preprocess_poses(pose_seq)
# output.append(pose_seq)
# if self.return_transform:
# transform_seq = datautils.poses_to_transforms(poses)
# transform_seq = [torch.from_numpy(x).float() for x in transform_seq]
# transform_seq = torch.stack(transform_seq, 0).float()
# output.append(transform_seq)
if self.return_names:
output.append(seqname)
# if self.return_labels:
# label_seq = torch.stack(label_seq, 0).float()
# output.append(label_seq)
return tuple(output)
def _get_intrinsics(self, datapath: str):
with open(datapath, "r") as f:
lines = f.readlines()
for line in lines:
line = line.strip().split()
seq_name = line[0]
fx, fy, cx, cy = line[1], line[2], line[3], line[4]
intrinsics = np.zeros((3, 3))
intrinsics[0,0] = fx
intrinsics[1,1] = fy
intrinsics[0,2] = cx
intrinsics[0,2] = cy
intrinsics[2,2] = 1
self.intrinsics[seq_name] = self._preprocess_intrinsics(intrinsics)
def _preprocess_intrinsics(self, intrinsics: Union[torch.Tensor, np.ndarray]):
r"""Preprocesses the intrinsics by scaling `fx`, `fy`, `cx`, `cy` based on new frame size and expanding the
0-th dimension.
Args:
intrinsics (torch.Tensor or np.ndarray): Intrinsics matrix to be preprocessed
Returns:
Output (torch.Tensor or np.ndarray): Preprocessed intrinsics
Shape:
- intrinsics: :math:`(4, 4)`
- Output: :math:`(1, 4, 4)`
"""
scaled_intrinsics = datautils.scale_intrinsics(
intrinsics, self.height_downsample_ratio, self.width_downsample_ratio
)
if torch.is_tensor(scaled_intrinsics):
return scaled_intrinsics.unsqueeze(0)
elif isinstance(scaled_intrinsics, np.ndarray):
return np.expand_dims(scaled_intrinsics, 0)
def _preprocess_color(self, color: np.ndarray):
r"""Preprocesses the color image by resizing to :math:`(H, W, C)`, (optionally) normalizing values to
:math:`[0, 1]`, and (optionally) using channels first :math:`(C, H, W)` representation.
Args:
color (np.ndarray): Raw input rgb image
Retruns:
np.ndarray: Preprocessed rgb image
Shape:
- Input: :math:`(H_\text{old}, W_\text{old}, C)`
- Output: :math:`(H, W, C)` if `self.channels_first == False`, else :math:`(C, H, W)`.
"""
color = cv2.resize(
color, (self.width, self.height), interpolation=cv2.INTER_LINEAR
)
if self.normalize_color:
color = datautils.normalize_image(color)
if self.channels_first:
color = datautils.channels_first(color)
return color
def _preprocess_depth(self, depth: np.ndarray):
r"""Preprocesses the depth image by resizing, adding channel dimension, and scaling values to meters. Optionally
converts depth from channels last :math:`(H, W, 1)` to channels first :math:`(1, H, W)` representation.
Args:
depth (np.ndarray): Raw depth image
Returns:
np.ndarray: Preprocessed depth
Shape:
- depth: :math:`(H_\text{old}, W_\text{old})`
- Output: :math:`(H, W, 1)` if `self.channels_first == False`, else :math:`(1, H, W)`.
"""
depth = cv2.resize(
depth.astype(float),
(self.width, self.height),
interpolation=cv2.INTER_NEAREST,
)
depth = np.expand_dims(depth, -1)
if self.channels_first:
depth = datautils.channels_first(depth)
return depth / self.scaling_factor
def get_color_encoding(seg_classes):
r"""Gets the color palette for different sets of labels (`"nyu40"` or `"scannet20"`)
Args:
seg_classes (str): Determines whether to use `"nyu40"` labels or `"scannet20"`
Returns:
Output (OrderedDict): Label names as keys and color palettes as values.
"""
if seg_classes.lower() == "nyu40":
# Color palette for nyu40 labels
return OrderedDict(
[
("unlabeled", (0, 0, 0)),
("wall", (174, 199, 232)),
("floor", (152, 223, 138)),
("cabinet", (31, 119, 180)),
("bed", (255, 187, 120)),
("chair", (188, 189, 34)),
("sofa", (140, 86, 75)),
("table", (255, 152, 150)),
("door", (214, 39, 40)),
("window", (197, 176, 213)),
("bookshelf", (148, 103, 189)),
("picture", (196, 156, 148)),
("counter", (23, 190, 207)),
("blinds", (178, 76, 76)),
("desk", (247, 182, 210)),
("shelves", (66, 188, 102)),
("curtain", (219, 219, 141)),
("dresser", (140, 57, 197)),
("pillow", (202, 185, 52)),
("mirror", (51, 176, 203)),
("floormat", (200, 54, 131)),
("clothes", (92, 193, 61)),
("ceiling", (78, 71, 183)),
("books", (172, 114, 82)),
("refrigerator", (255, 127, 14)),
("television", (91, 163, 138)),
("paper", (153, 98, 156)),
("towel", (140, 153, 101)),
("showercurtain", (158, 218, 229)),
("box", (100, 125, 154)),
("whiteboard", (178, 127, 135)),
("person", (120, 185, 128)),
("nightstand", (146, 111, 194)),
("toilet", (44, 160, 44)),
("sink", (112, 128, 144)),
("lamp", (96, 207, 209)),
("bathtub", (227, 119, 194)),
("bag", (213, 92, 176)),
("otherstructure", (94, 106, 211)),
("otherfurniture", (82, 84, 163)),
("otherprop", (100, 85, 144)),
]
)
elif seg_classes.lower() == "scannet20":
# Color palette for scannet20 labels
return OrderedDict(
[
("unlabeled", (0, 0, 0)),
("wall", (174, 199, 232)),
("floor", (152, 223, 138)),
("cabinet", (31, 119, 180)),
("bed", (255, 187, 120)),
("chair", (188, 189, 34)),
("sofa", (140, 86, 75)),
("table", (255, 152, 150)),
("door", (214, 39, 40)),
("window", (197, 176, 213)),
("bookshelf", (148, 103, 189)),
("picture", (196, 156, 148)),
("counter", (23, 190, 207)),
("desk", (247, 182, 210)),
("curtain", (219, 219, 141)),
("refrigerator", (255, 127, 14)),
("showercurtain", (158, 218, 229)),
("toilet", (44, 160, 44)),
("sink", (112, 128, 144)),
("bathtub", (227, 119, 194)),
("otherfurniture", (82, 84, 163)),
]
)
def nyu40_to_scannet20(label):
r"""Remaps a label image from the `"nyu40"` class palette to the `"scannet20"` class palette"""
# Ignore indices 13, 15, 17, 18, 19, 20, 21, 22, 23, 25, 26. 27. 29. 30. 31. 32, 35. 37. 38, 40
# Because, these classes from 'nyu40' are absent from 'scannet20'. Our label files are in
# 'nyu40' format, hence this 'hack'. To see detailed class lists visit:
# http://kaldir.vc.in.tum.de/scannet_benchmark/labelids_all.txt ('nyu40' labels)
# http://kaldir.vc.in.tum.de/scannet_benchmark/labelids.txt ('scannet20' labels)
# The remaining labels are then to be mapped onto a contiguous ordering in the range [0,20]
# The remapping array comprises tuples (src, tar), where 'src' is the 'nyu40' label, and 'tar' is the
# corresponding target 'scannet20' label
remapping = [
(0, 0),
(13, 0),
(15, 0),
(17, 0),
(18, 0),
(19, 0),
(20, 0),
(21, 0),
(22, 0),
(23, 0),
(25, 0),
(26, 0),
(27, 0),
(29, 0),
(30, 0),
(31, 0),
(32, 0),
(35, 0),
(37, 0),
(38, 0),
(40, 0),
(14, 13),
(16, 14),
(24, 15),
(28, 16),
(33, 17),
(34, 18),
(36, 19),
(39, 20),
]
for src, tar in remapping:
label[np.where(label == src)] = tar
return label
``` |
{
"source": "jkabalar/kapture-localization",
"score": 2
} |
#### File: kapture_localization/colmap/colmap_command.py
```python
import logging
import os
import os.path as path
import subprocess
from typing import List, Optional
import kapture
from kapture.utils.paths import safe_remove_file
from kapture.converter.colmap.database_extra import save_match_list
logger = logging.getLogger('colmap')
# point_triangulator / image_registrator options
CONFIGS = [
# config 0
[],
# config 1
['--Mapper.ba_refine_focal_length', '0',
'--Mapper.ba_refine_principal_point', '0',
'--Mapper.ba_refine_extra_params', '0'],
# config 2
['--Mapper.ba_refine_focal_length', '0',
'--Mapper.ba_refine_principal_point', '0',
'--Mapper.ba_refine_extra_params', '0',
'--Mapper.min_num_matches', '4',
'--Mapper.init_min_num_inliers', '4',
'--Mapper.abs_pose_min_num_inliers', '4',
'--Mapper.abs_pose_min_inlier_ratio', '0.05',
'--Mapper.ba_local_max_num_iterations', '50',
'--Mapper.abs_pose_max_error', '20',
'--Mapper.filter_max_reproj_error', '12'],
# config 3
['--Mapper.ba_refine_focal_length', '1',
'--Mapper.ba_refine_principal_point', '0',
'--Mapper.ba_refine_extra_params', '0',
'--Mapper.min_num_matches', '4',
'--Mapper.init_min_num_inliers', '4',
'--Mapper.abs_pose_min_num_inliers', '4',
'--Mapper.abs_pose_min_inlier_ratio', '0.05',
'--Mapper.ba_local_max_num_iterations', '50',
'--Mapper.abs_pose_max_error', '20',
'--Mapper.filter_max_reproj_error', '12']
]
def run_colmap_command(colmap_binary_path: str, args: List[str]) -> None:
"""
run any colmap command
:param colmap_binary_path: path to colmap executable
:type colmap_binary_path: str
:param args: list of arguments that will be passed to the colmap command
:type args: List[str]
:raises ValueError: colmap subprocess did not return 0
"""
args.insert(0, colmap_binary_path)
logger.info(args)
colmap_process = subprocess.Popen(args)
colmap_process.wait()
if colmap_process.returncode != 0:
raise ValueError(
'\nSubprocess Error (Return code:'
f' {colmap_process.returncode} )')
def run_feature_extractor(colmap_binary_path: str,
colmap_use_cpu: bool,
colmap_gpu_index: str,
colmap_db_path: str,
images_path: str,
image_list_path: str,
colmap_options: List[str] = None) -> None:
"""
run colmap feature_extractor:
Perform feature extraction or import features for a set of images
:param colmap_binary_path: path to colmap executable
:type colmap_binary_path: str
:param colmap_use_cpu: add --SiftExtraction.use_gpu 0
:type colmap_use_cpu: bool
:param colmap_gpu_index: add --SiftExtraction.gpu_index {colmap_gpu_index}
:type colmap_gpu_index: str
:param colmap_db_path: value for --database_path
:type colmap_db_path: str
:param images_path: value for --image_path
:type images_path: str
:param image_list_path: value for --image_list_path
:type image_list_path: str
:param colmap_options: list of additional parameters to add to the command, defaults to None
:type colmap_options: List[str], optional
"""
feature_args = ["feature_extractor",
"--database_path",
colmap_db_path,
"--image_path",
images_path,
"--image_list_path",
image_list_path]
if colmap_options is not None and len(colmap_options) > 0:
feature_args += colmap_options
if colmap_use_cpu:
feature_args += [
"--SiftExtraction.use_gpu",
"0"
]
elif colmap_gpu_index:
feature_args += [
"--SiftExtraction.gpu_index",
colmap_gpu_index
]
run_colmap_command(colmap_binary_path, feature_args)
def run_vocab_tree_matcher(colmap_binary_path: str,
colmap_use_cpu: bool,
colmap_gpu_index: str,
colmap_db_path: str,
vocab_tree_path: str,
images_path: str = "") -> None:
"""
run colmap vocab_tree_matcher:
Perform feature matching after performing feature extraction
:param colmap_binary_path: path to colmap executable
:type colmap_binary_path: str
:param colmap_use_cpu: add --SiftExtraction.use_gpu 0
:type colmap_use_cpu: bool
:param colmap_gpu_index: add --SiftExtraction.gpu_index {colmap_gpu_index}
:type colmap_gpu_index: str
:param colmap_db_path: value for --database_path
:type colmap_db_path: str
:param vocab_tree_path: value for --VocabTreeMatching.vocab_tree_path
:type vocab_tree_path: str
:param images_path: value for --VocabTreeMatching.match_list_path, defaults to ""
:type images_path: str, optional
"""
vocab_tree_matcher_args = ["vocab_tree_matcher",
"--database_path",
colmap_db_path,
"--VocabTreeMatching.vocab_tree_path",
vocab_tree_path]
if images_path != "":
vocab_tree_matcher_args += ["--VocabTreeMatching.match_list_path", images_path]
if colmap_use_cpu:
vocab_tree_matcher_args += [
"--SiftMatching.use_gpu",
"0"
]
elif colmap_gpu_index:
vocab_tree_matcher_args += [
"--SiftMatching.gpu_index",
colmap_gpu_index
]
run_colmap_command(colmap_binary_path, vocab_tree_matcher_args)
def run_matches_importer(colmap_binary_path: str,
colmap_use_cpu: bool,
colmap_gpu_index: Optional[str],
colmap_db_path: str,
match_list_path: str) -> None:
"""
run colmap matches_importer:
Perform geometric verification on matches
:param colmap_binary_path: path to colmap executable
:type colmap_binary_path: str
:param colmap_use_cpu: add --SiftExtraction.use_gpu 0
:type colmap_use_cpu: bool
:param colmap_gpu_index: add --SiftExtraction.gpu_index {colmap_gpu_index}
:type colmap_gpu_index: str
:param colmap_db_path: value for --database_path
:type colmap_db_path: str
:param match_list_path: value for --match_list_path
:type match_list_path: str
"""
matches_importer_args = ["matches_importer",
"--database_path",
colmap_db_path,
"--match_list_path",
match_list_path,
"--match_type",
'pairs']
if colmap_use_cpu:
matches_importer_args += [
"--SiftMatching.use_gpu",
"0"
]
elif colmap_gpu_index:
matches_importer_args += [
"--SiftMatching.gpu_index",
colmap_gpu_index
]
run_colmap_command(colmap_binary_path, matches_importer_args)
def run_matches_importer_from_kapture_matches(colmap_binary_path: str,
colmap_use_cpu: bool,
colmap_gpu_index: Optional[str],
colmap_db_path: str,
kapture_matches: kapture.Matches,
force: bool = True,
clean: bool = True) -> None:
"""
export list of matches from kapture data then run colmap matches_importer
:param colmap_binary_path: path to colmap executable
:type colmap_binary_path: str
:param colmap_use_cpu: add --SiftExtraction.use_gpu 0
:type colmap_use_cpu: bool
:param colmap_gpu_index: add --SiftExtraction.gpu_index {colmap_gpu_index}
:type colmap_gpu_index: str
:param colmap_db_path: value for --database_path
:type colmap_db_path: str
:param kapture_matches: kapture data that contains the matches (that are already in the colmap database) to verify
:type kapture_matches: kapture.Matches
:param force: do not ask before overwriting match_list.txt, defaults to True
:type force: bool, optional
:param clean: remove match_list.txt before exiting, defaults to True
:type clean: bool, optional
"""
db_dir = path.dirname(colmap_db_path)
match_list_path = path.join(db_dir, 'match_list.txt')
safe_remove_file(match_list_path, force)
save_match_list(kapture_matches, match_list_path)
run_matches_importer(colmap_binary_path, colmap_use_cpu, colmap_gpu_index, colmap_db_path, match_list_path)
if clean:
os.remove(match_list_path)
def run_point_triangulator(colmap_binary_path: str,
colmap_db_path: str,
images_path: str,
input_path: str,
output_path: str,
point_triangulator_options: List[str]) -> None:
"""
run colmap point_triangulator:
Triangulate all observations of registered images in an existing model using the feature matches in a database
:param colmap_binary_path: path to colmap executable
:type colmap_binary_path: str
:param colmap_db_path: value for --database_path
:type colmap_db_path: str
:param images_path: value for --image_path
:type images_path: str
:param input_path: value for --input_path
:type input_path: str
:param output_path: value for --output_path
:type output_path: str
:param point_triangulator_options: list of additional parameters to add to the command
:type point_triangulator_options: List[str]
"""
point_triangulator_args = ["point_triangulator",
"--database_path",
colmap_db_path,
"--image_path",
images_path,
"--input_path",
input_path,
"--output_path",
output_path]
if point_triangulator_options is not None and len(point_triangulator_options) > 0:
point_triangulator_args += point_triangulator_options
run_colmap_command(colmap_binary_path, point_triangulator_args)
def run_mapper(colmap_binary_path: str,
colmap_db_path: str,
images_path: str,
input_path: Optional[str],
output_path: str,
mapper_options: List[str]) -> None:
"""
run colmap mapper:
Sparse 3D reconstruction / mapping of the dataset using SfM after performing feature extraction and matching
:param colmap_binary_path: path to colmap executable
:type colmap_binary_path: str
:param colmap_db_path: value for --database_path
:type colmap_db_path: str
:param images_path: value for --image_path
:type images_path: str
:param input_path: value for --input_path
:type input_path: Optional[str]
:param output_path: value for --output_path
:type output_path: str
:param mapper_options: list of additional parameters to add to the command
:type mapper_options: List[str]
"""
mapper_args = ["mapper",
"--database_path",
colmap_db_path,
"--image_path",
images_path,
"--output_path",
output_path]
if input_path is not None:
mapper_args += [
"--input_path",
input_path]
if mapper_options is not None and len(mapper_options) > 0:
mapper_args += mapper_options
run_colmap_command(colmap_binary_path, mapper_args)
def run_bundle_adjustment(colmap_binary_path: str,
input_path: str,
output_path: str,
output_rig_path: str = "") -> None:
"""
run colmap bundle_adjuster or colmap rig_bundle_adjuster (if output_rig_path is provided)
:param colmap_binary_path: path to colmap executable
:type colmap_binary_path: str
:param input_path: value for --input_path
:type input_path: str
:param output_path: value for --output_path
:type output_path: str
:param output_rig_path: value for --rig_config_path, if set, run rig_bundle_adjuster instead of bundle_adjuster
:type output_rig_path: str, optional
"""
if output_rig_path:
logging.info("Run bundle adjuster with rig")
args = ["rig_bundle_adjuster", "--rig_config_path", output_rig_path]
else:
args = ["bundle_adjuster"]
args.extend(["--input_path", input_path,
"--output_path", output_path])
run_colmap_command(colmap_binary_path, args)
def run_image_registrator(colmap_binary_path: str,
colmap_db_path: str,
input_path: str,
output_path: str,
image_registrator_options: List[str]) -> None:
"""
run colmap image_registrator:
Register new images in the database against an existing model
:param colmap_binary_path: path to colmap executable
:type colmap_binary_path: str
:param colmap_db_path: value for --database_path
:type colmap_db_path: str
:param input_path: value for --input_path
:type input_path: str
:param output_path: value for --output_path
:type output_path: str
:param image_registrator_options: list of additional parameters to add to the command
:type image_registrator_options: List[str]
"""
image_registrator_args = ["image_registrator",
"--database_path",
colmap_db_path,
"--input_path",
input_path,
"--output_path",
output_path]
if image_registrator_options is not None and len(image_registrator_options) > 0:
image_registrator_args += image_registrator_options
run_colmap_command(colmap_binary_path, image_registrator_args)
def run_model_converter(colmap_binary_path: str,
input_path: str,
output_path: str,
output_type: str = 'TXT') -> None:
"""
run colmap model_converter with --output_type TXT:
convert reconstruction from binary files to TXT files
:param colmap_binary_path: path to colmap executable
:type colmap_binary_path: str
:param input_path: value for --input_path
:type input_path: str
:param output_path: value for --output_path
:type output_path: str
"""
model_converter_args = ["model_converter",
"--input_path",
input_path,
"--output_path",
output_path,
"--output_type",
output_type]
run_colmap_command(colmap_binary_path, model_converter_args)
def run_image_undistorter(colmap_binary_path: str,
image_path: str,
input_path: str,
output_path: str) -> None:
"""
run colmap image_undistorter:
Undistort images and/or export them for MVS or to external dense reconstruction software, such as CMVS/PMVS
:param colmap_binary_path: path to colmap executable
:type colmap_binary_path: str
:param image_path: value for -image_path
:type image_path: str
:param input_path: value for --input_path
:type input_path: str
:param output_path: value for --output_path
:type output_path: str
"""
image_undistorter_args = ["image_undistorter",
"--image_path",
image_path,
"--input_path",
input_path,
"--output_path",
output_path]
run_colmap_command(colmap_binary_path, image_undistorter_args)
def run_patch_match_stereo(colmap_binary_path: str,
workspace_path: str) -> None:
"""
run colmap patch_match_stereo:
Dense 3D reconstruction / mapping using MVS after running the image_undistorter to initialize the workspace
:param colmap_binary_path: path to colmap executable
:type colmap_binary_path: str
:param workspace_path: value for --workspace_path
:type workspace_path: str
"""
patch_match_stereo_args = ["patch_match_stereo",
"--workspace_path",
workspace_path]
run_colmap_command(colmap_binary_path, patch_match_stereo_args)
def run_stereo_fusion(colmap_binary_path: str,
workspace_path: str,
output_path: str) -> None:
"""
run colmap stereo_fusion
:param colmap_binary_path: path to colmap executable
:type colmap_binary_path: str
:param workspace_path: value for --workspace_path
:type workspace_path: str
:param output_path: value for --output_path
:type output_path: str
"""
stereo_fusion_args = ["stereo_fusion",
"--workspace_path",
workspace_path,
"--output_path",
output_path]
run_colmap_command(colmap_binary_path, stereo_fusion_args)
```
#### File: kapture_localization/localization/reprojection_error.py
```python
import numpy as np
import quaternion
from typing import List
import cv2
import kapture_localization.utils.path_to_kapture # noqa: F401
import kapture
def compute_reprojection_error(pose: kapture.PoseTransform, num_inliers: int, inliers: List,
points2D, points3D, K, distortion):
"""
compute reprojection error from a pose, a list of inlier indexes, the full list of 2D points and 3D points
and camera parameters
"""
obs_2D = np.empty((num_inliers, 2), dtype=np.float)
obs_3D = np.empty((1, num_inliers, 3), dtype=np.float)
for i, index in enumerate(inliers):
obs_2D[i, :] = points2D[index]
obs_3D[0, i, :] = points3D[index]
rvec, _ = cv2.Rodrigues(quaternion.as_rotation_matrix(pose.r))
tvec = pose.t
estimated_points, _ = cv2.projectPoints(objectPoints=obs_3D,
rvec=rvec, tvec=tvec, cameraMatrix=K, distCoeffs=distortion)
estimated_points = estimated_points.reshape(obs_2D.shape)
diff = estimated_points - obs_2D
error = np.linalg.norm(diff, axis=1)
residuals = np.sum(error)
reprojection_error = residuals / num_inliers
return reprojection_error
```
#### File: kapture_localization/matching/matching.py
```python
from abc import ABC, abstractmethod
import torch
import numpy as np
from kapture_localization.utils.logging import getLogger
class MatchPairGenerator(ABC):
@abstractmethod
def match_descriptors(self, descriptors_1, descriptors_2):
raise NotImplementedError()
class MatchPairNnTorch(MatchPairGenerator):
def __init__(self, use_cuda=True):
super().__init__()
self._device = torch.device("cuda:0"
if use_cuda and torch.cuda.is_available()
else "cpu")
def match_descriptors(self, descriptors_1, descriptors_2):
if descriptors_1.shape[0] == 0 or descriptors_2.shape[0] == 0:
return np.zeros((0, 3))
# send data to GPU
descriptors1_torch = torch.from_numpy(descriptors_1).to(self._device)
descriptors2_torch = torch.from_numpy(descriptors_2).to(self._device)
# make sure its double (because CUDA tensors only supports floating-point)
descriptors1_torch = descriptors1_torch.float()
descriptors2_torch = descriptors2_torch.float()
# sanity check
if not descriptors1_torch.device == self._device:
getLogger().debug('descriptor on device {} (requested {})'.format(descriptors1_torch.device, self._device))
if not descriptors2_torch.device == self._device:
getLogger().debug('descriptor on device {} (requested {})'.format(descriptors2_torch.device, self._device))
simmilarity_matrix = descriptors1_torch @ descriptors2_torch.t()
scores = torch.max(simmilarity_matrix, dim=1)[0]
nearest_neighbor_idx_1vs2 = torch.max(simmilarity_matrix, dim=1)[1]
nearest_neighbor_idx_2vs1 = torch.max(simmilarity_matrix, dim=0)[1]
ids1 = torch.arange(0, simmilarity_matrix.shape[0], device=descriptors1_torch.device)
# cross check
mask = ids1 == nearest_neighbor_idx_2vs1[nearest_neighbor_idx_1vs2]
matches_torch = torch.stack(
[ids1[mask].type(torch.float), nearest_neighbor_idx_1vs2[mask].type(torch.float), scores[mask]]).t()
# retrieve data back from GPU
matches = matches_torch.data.cpu().numpy()
matches = matches.astype(np.float)
return matches
```
#### File: kapture_localization/pairing/observations.py
```python
import logging
from itertools import combinations
import multiprocessing
from typing import Dict, List, Optional, Tuple, Set
import gc
from tqdm import tqdm
from kapture_localization.utils.logging import getLogger
import kapture_localization.utils.path_to_kapture # noqa: F401
import kapture
def _child_process_get_pairs(kdata_observations: List[Tuple[str, int]],
imgs_map: Set[str],
imgs_query: Optional[Set[str]]):
result_pairs = {}
pairs = list(combinations(kdata_observations, r=2)) # get all pairs from 3D point observations
if len(pairs) > 1:
for p in pairs:
img1 = p[0][0]
img2 = p[1][0]
if img1 == img2:
# skip pair if both images are the same
continue
if imgs_query is not None:
# if query images are different from mapping images, i.e. if query kapture was provided
if img1 in imgs_map and img1 not in imgs_query and img2 in imgs_map and img2 not in imgs_query:
# skip if both images are from the mapping kapture
# (because we only want to keep query-mapping pairs)
continue
if img1 in imgs_query and img2 in imgs_query:
# skip if both images are from the query kapture (because we do not want to match query-query)
continue
# ensure query-mapping order in the pair file
if img1 in imgs_query:
pair = (img1, img2)
else:
pair = (img2, img1)
if not pair[0] in result_pairs:
result_pairs[pair[0]] = {}
if not pair[1] in result_pairs[pair[0]]:
result_pairs[pair[0]][pair[1]] = 0
result_pairs[pair[0]][pair[1]] += 1
else:
if img1 not in imgs_map or img2 not in imgs_map:
continue
# ensure lexicographic order of the pairs
if img1 < img2:
pair = (img1, img2)
else:
pair = (img2, img1)
if not pair[0] in result_pairs:
result_pairs[pair[0]] = {}
if not pair[1] in result_pairs[pair[0]]:
result_pairs[pair[0]][pair[1]] = 0
result_pairs[pair[0]][pair[1]] += 1
return result_pairs
def get_observation_image_pairs(keypoints_type: str,
kdata: kapture.Kapture,
kdata_query: Optional[kapture.Kapture],
max_number_of_threads: Optional[int] = None):
"""
get observations pairs as dictionary
"""
assert kdata.records_camera is not None
imgs_map = kdata.records_camera.data_list()
if kdata_query is not None:
assert kdata_query.records_camera is not None
imgs_query = kdata_query.records_camera.data_list()
else:
imgs_query = None
all_pairs = {}
number_of_threads = multiprocessing.cpu_count() if max_number_of_threads is None else max_number_of_threads
def update_all_pairs_and_progress_bar(result):
for img1 in result:
if img1 not in all_pairs:
all_pairs[img1] = {}
for img2 in result[img1]:
if img2 not in all_pairs[img1]:
all_pairs[img1][img2] = 0
all_pairs[img1][img2] += result[img1][img2]
progress_bar.update(1)
def error_callback(e):
getLogger().critical(e)
getLogger().debug(f'computing all possible pairs from observations, max-threads={number_of_threads}')
assert kdata.observations is not None
progress_bar = tqdm(total=len(kdata.observations),
disable=getLogger().level >= logging.CRITICAL)
imgs_map_set = set(imgs_map)
imgs_query_set = set(imgs_query) if imgs_query is not None else None
with multiprocessing.Pool(number_of_threads) as pool:
for point3d_id in kdata.observations.keys():
if keypoints_type not in kdata.observations[point3d_id]:
progress_bar.update(1)
continue
pool.apply_async(_child_process_get_pairs, args=(kdata.observations[point3d_id, keypoints_type],
imgs_map_set,
imgs_query_set),
callback=update_all_pairs_and_progress_bar,
error_callback=error_callback)
pool.close()
pool.join()
progress_bar.close()
return all_pairs
def _child_process_get_observation_images(kdata_observations: List[Tuple[str, int]],
imgs_map: Set[str],
imgs_query: Optional[Set[str]]):
result_observations = {}
for image_name, _ in kdata_observations:
if image_name not in imgs_map and (imgs_query is None or image_name not in imgs_query):
continue
if image_name not in result_observations:
result_observations[image_name] = 0
result_observations[image_name] += 1
return result_observations
def get_observation_images(keypoints_type: str,
kdata: kapture.Kapture,
kdata_query: Optional[kapture.Kapture],
max_number_of_threads: Optional[int] = None):
"""
get a dictionary image -> number of observations
"""
assert kdata.records_camera is not None
imgs_map = kdata.records_camera.data_list()
if kdata_query is not None:
assert kdata_query.records_camera is not None
imgs_query = kdata_query.records_camera.data_list()
else:
imgs_query = None
result_observations = {}
number_of_threads = multiprocessing.cpu_count() if max_number_of_threads is None else max_number_of_threads
def update_observations_and_progress_bar(result):
for img1, count in result.items():
if img1 not in result_observations:
result_observations[img1] = 0
result_observations[img1] += count
progress_bar.update(1)
def error_callback(e):
getLogger().critical(e)
getLogger().debug(f'computing all possible pairs from observations, max-threads={number_of_threads}')
assert kdata.observations is not None
progress_bar = tqdm(total=len(kdata.observations),
disable=getLogger().level >= logging.CRITICAL)
imgs_map_set = set(imgs_map)
imgs_query_set = set(imgs_query) if imgs_query is not None else None
with multiprocessing.Pool(number_of_threads) as pool:
for point3d_id in kdata.observations.keys():
if keypoints_type not in kdata.observations[point3d_id]:
progress_bar.update(1)
continue
pool.apply_async(_child_process_get_observation_images,
args=(kdata.observations[point3d_id, keypoints_type],
imgs_map_set,
imgs_query_set),
callback=update_observations_and_progress_bar,
error_callback=error_callback)
pool.close()
pool.join()
progress_bar.close()
return result_observations
def get_topk_observation_pairs(all_pairs: Dict[str, Dict[str, int]],
records_camera: kapture.RecordsCamera,
topk: int):
"""
convert pairs dict to list
"""
image_pairs = []
for img1 in sorted(records_camera.data_list()):
if img1 not in all_pairs:
getLogger().debug(f'{img1} has no images sharing observations')
continue
sorted_pairs = list(sorted(all_pairs[img1].items(), key=lambda item: item[1], reverse=True))
for img2, score in sorted_pairs[0:topk]:
image_pairs.append((img1, img2, score))
return image_pairs
def get_pairs_observations(kdata: kapture.Kapture,
kdata_query: Optional[kapture.Kapture],
keypoints_type: str,
max_number_of_threads: Optional[int],
iou: bool,
topk: int):
"""
get observations pairs as list
"""
if iou:
individual_observations = get_observation_images(keypoints_type,
kdata, kdata_query,
max_number_of_threads)
gc.collect()
else:
individual_observations = None
all_pairs = get_observation_image_pairs(keypoints_type,
kdata, kdata_query,
max_number_of_threads)
if iou:
assert individual_observations is not None
final_pairs = {}
for img1 in all_pairs.keys():
for img2 in all_pairs[img1].keys():
if img1 not in final_pairs:
final_pairs[img1] = {}
union = individual_observations[img1] + individual_observations[img2] - all_pairs[img1][img2]
if union == 0:
final_pairs[img1][img2] = 0
else:
final_pairs[img1][img2] = all_pairs[img1][img2] / union
all_pairs = final_pairs
getLogger().info('ranking co-observation pairs...')
assert kdata.records_camera is not None
image_pairs = get_topk_observation_pairs(all_pairs, kdata.records_camera, topk)
return image_pairs
```
#### File: kapture_localization/pose_approximation/weight_estimation.py
```python
import numpy as np
import cvxpy as cp
from .PoseApproximationMethods import PoseApproximationMethods
from kapture_localization.image_retrieval.pairing import StackedGlobalFeatures, get_similarity_matrix
from kapture_localization.utils.logging import getLogger
def get_interpolation_weights(method: PoseApproximationMethods,
query_gfeat: StackedGlobalFeatures,
map_gfeat: StackedGlobalFeatures,
topk: int,
additional_parameters: dict):
"""
compute the pose weights for the given method as a dict { query image name -> list(map image name, weight) }
:param method: pose approximation method to use
:type method: PoseApproximationMethods
:param query_gfeat: global features for the query images
:type query_gfeat: StackedGlobalFeatures
:param map_gfeat: global features for the map images
:type map_gfeat: StackedGlobalFeatures
:param topk: the max number of top retained images
:type topk: int
:param additional_parameters: method specific parameters
:type additional_parameters: dict
"""
similarity_matrix = get_similarity_matrix(query_gfeat, map_gfeat)
local_topk = min(topk, similarity_matrix.shape[1])
if local_topk != topk:
getLogger().warning(f'topk was set to {local_topk} instead of {topk} because there were not enough map data')
similarity_sorted = np.empty((similarity_matrix.shape[0], local_topk), dtype=int)
for i, scores in enumerate(similarity_matrix):
indexes = np.argsort(-scores)
similarity_sorted[i, :] = indexes[:local_topk]
if method == PoseApproximationMethods.equal_weighted_barycenter:
weights = _get_EWB_weights(similarity_matrix.shape[0], local_topk)
elif method == PoseApproximationMethods.barycentric_descriptor_interpolation:
weights = _get_BDI_weights(similarity_sorted, query_gfeat, map_gfeat)
elif method == PoseApproximationMethods.cosine_similarity:
assert 'alpha' in additional_parameters
weights = _get_CSI_weights(similarity_matrix, similarity_sorted, additional_parameters['alpha'])
else:
raise NotImplementedError(f'{method} - unknown PoseApproximationMethods')
weights_dict = {}
for i, indexes in enumerate(similarity_sorted):
query_name = query_gfeat.index[i]
weights_dict[query_name] = list(zip(map_gfeat.index[indexes], weights[i, :]))
return weights_dict
def _get_EWB_weights(number_of_queries: int, topk: int):
"""
get equal weighted barycenter weights
"""
weights = np.zeros((number_of_queries, topk))
weights[:, :] = 1.0 / topk
return weights
def _get_BDI_weights(similarity_sorted: np.ndarray,
query_gfeat: StackedGlobalFeatures,
map_gfeat: StackedGlobalFeatures):
"""
barycentric descriptor interpolation : interpolating baseline of http://openaccess.thecvf.com/content_CVPR_2019/papers/Sattler_Understanding_the_Limitations_of_CNN-Based_Absolute_Camera_Pose_Regression_CVPR_2019_paper.pdf
"""
np.random.seed(0)
weights = np.zeros(similarity_sorted.shape)
topk = similarity_sorted.shape[1]
for i in range(similarity_sorted.shape[0]):
query_descriptor = query_gfeat.stacked_features[i]
interpolating_descriptors = map_gfeat.stacked_features[similarity_sorted[i]]
A = interpolating_descriptors.T
b = query_descriptor
w = cp.Variable(topk)
objective = cp.Minimize(cp.sum_squares(A@w - b))
constraints = [cp.sum(w) == 1]
prob = cp.Problem(objective, constraints)
prob.solve()
weights[i, :] = w.value
return weights
def _get_CSI_weights(similarity_matrix: np.ndarray,
similarity_sorted: np.ndarray,
alpha: float):
"""
cosine similarity
"""
weights = np.zeros(similarity_sorted.shape)
for i in range(similarity_sorted.shape[0]):
weights[i, :] = similarity_matrix[i, similarity_sorted[i, :]]**(alpha)
weights[i, :] = weights[i, :] / np.sum(weights[i, :])
return weights
```
#### File: kapture_localization/utils/subprocess.py
```python
import subprocess
import sys
import os
import os.path as path
from typing import List, Optional
from kapture_localization.utils.logging import getLogger
def find_in_PATH(filename: str):
"""
Look for file in current directory and all PATH directories
:param filename: name of the file to look for
:type filename: str
:raises FileNotFoundError: Could not find file in any of the paths
:return: the path for which path.isfile returned true
:rtype: str
"""
if path.isfile(filename):
return path.normpath(filename)
os_paths = os.environ['PATH'].split(path.pathsep)
for os_path in os_paths:
fullpath_file = path.join(os_path, filename)
if path.isfile(fullpath_file):
return path.normpath(fullpath_file)
raise FileNotFoundError(f'could not find {filename}')
def run_python_command(local_path: str, args: List[str], python_binary: Optional[str] = None):
"""
run a python subprocess
:param local_path: path where you expect the file to be
:type local_path: str
:param args: the arguments of the python process
:type args: List[str]
:param python_binary: path to the python binary, optional, when None, the .py file is called directly
:type python_binary: Optional[str]
:raises ValueError: subprocess crashed
"""
if python_binary is None:
if path.isfile(local_path):
compute_image_pairs_bin = path.normpath(local_path)
else:
# maybe the script was installed through pip
compute_image_pairs_bin = path.basename(local_path)
args.insert(0, compute_image_pairs_bin)
else:
if path.isfile(local_path):
compute_image_pairs_bin = path.normpath(local_path)
else:
# maybe the script was installed through pip
# with the direct binary, we need to get the full path
compute_image_pairs_bin = find_in_PATH(path.basename(local_path))
args.insert(0, compute_image_pairs_bin)
args.insert(0, python_binary)
getLogger().debug(f'run_python_command : {args}')
use_shell = sys.platform.startswith("win")
python_process = subprocess.Popen(args, shell=use_shell)
python_process.wait()
if python_process.returncode != 0:
raise ValueError('\nSubprocess Error (Return code:' f' {python_process.returncode} )')
```
#### File: kapture-localization/pipeline/kapture_hloc_pipeline_from_kapture_dataset.py
```python
import argparse
import logging
import os
import os.path as path
import sys
from typing import List, Optional
from pathlib import Path
import numpy as np # noqa: F401
import quaternion
try:
from pprint import pformat # noqa: F401
import h5py # noqa: F401
import cv2 # noqa: F401
from hloc import extract_features, match_features, pairs_from_covisibility # noqa: F401
from hloc import triangulation, localize_sfm, visualization # noqa: F401
except Exception as e:
raise ImportError(f' {e} hloc have additional requirements compared to kapture-localization, '
'please see https://github.com/cvg/Hierarchical-Localization/blob/master/requirements.txt '
'and add Hierarchical-Localization to your PYTHONPATH')
import pipeline_import_paths # noqa: F401
import kapture_localization.utils.logging
from kapture_localization.utils.subprocess import run_python_command
from kapture_localization.colmap.colmap_command import run_model_converter
from kapture_localization.utils.BenchmarkFormatStyle import BenchmarkFormatStyle, get_benchmark_format_command
import kapture_localization.utils.path_to_kapture # noqa: F401
import kapture.utils.logging
import kapture
from kapture.io.csv import table_from_file, kapture_from_dir, kapture_to_dir, get_csv_fullpath
from kapture.io.records import get_record_fullpath
from kapture.converter.colmap.export_colmap import export_colmap
logger = logging.getLogger('hloc_pipeline_from_kapture_dataset')
def convert_pairs_to_hloc_format(pairsfile_path_kapture: str, pairsfile_path_hloc: str):
"""
convert kapture pairsfile to hloc pairsfile
"""
with open(pairsfile_path_kapture, 'r') as fid:
table = list(table_from_file(fid))
os.makedirs(os.path.dirname(os.path.abspath(pairsfile_path_hloc)), exist_ok=True)
with open(pairsfile_path_hloc, 'w') as fid:
for query_name, map_name, _ in table:
fid.write(f'{query_name} {map_name}\n')
def convert_kapture_to_hloc_image_list(kapture_path: str, output_path: str):
"""
convert kapture records_camera to hloc image list
"""
skip_heavy_useless = [kapture.Trajectories,
kapture.RecordsLidar, kapture.RecordsWifi,
kapture.Keypoints, kapture.Descriptors, kapture.GlobalFeatures,
kapture.Matches, kapture.Points3d, kapture.Observations]
kapture_to_convert = kapture_from_dir(kapture_path, skip_list=skip_heavy_useless)
output_content = []
for _, sensor_id, filename in kapture.flatten(kapture_to_convert.records_camera, is_sorted=True):
line = filename
output_content.append(line)
with open(output_path, 'w') as fid:
fid.write('\n'.join(output_content))
def export_image_list(kapture_path: str, output_path: str):
"""
export from kapture to image list with camera params
"""
skip_heavy_useless = [kapture.Trajectories,
kapture.RecordsLidar, kapture.RecordsWifi,
kapture.Keypoints, kapture.Descriptors, kapture.GlobalFeatures,
kapture.Matches, kapture.Points3d, kapture.Observations]
kapture_to_export = kapture_from_dir(kapture_path, skip_list=skip_heavy_useless)
output_content = []
for _, sensor_id, filename in kapture.flatten(kapture_to_export.records_camera, is_sorted=True):
line = filename
camera = kapture_to_export.sensors[sensor_id]
line += ' ' + ' '.join(camera.sensor_params)
output_content.append(line)
with open(output_path, 'w') as fid:
fid.write('\n'.join(output_content))
def convert_results_format(image_list_with_intrinsics_path: str, results_file_in: str, results_file_out: str):
"""
convert hloc result file (with basename for images) to the same but with the full relative path
"""
with open(image_list_with_intrinsics_path) as fid:
images_list = fid.readlines()
# remove end line char and empty lines
images_list = [line.rstrip() for line in images_list if line != '\n']
images_list = [line.split()[0] for line in images_list]
with open(results_file_in) as fid:
lines = fid.readlines()
lines = [line.rstrip() for line in lines if line != '\n']
with open(results_file_out, 'w') as fid:
for i, line in enumerate(lines):
line_array = line.split()
line_array[0] = images_list[i]
fid.write(' '.join(line_array) + '\n')
def convert_results_to_kapture(query_path: str, results: str, outpath: str):
"""
convert file with name qw qx qy qz tx ty tz to kapture
"""
skip_heavy_useless = [kapture.Trajectories,
kapture.RecordsLidar, kapture.RecordsWifi,
kapture.Keypoints, kapture.Descriptors, kapture.GlobalFeatures,
kapture.Matches, kapture.Points3d, kapture.Observations]
kapture_query = kapture_from_dir(query_path, skip_list=skip_heavy_useless)
inverse_records_camera = {image_name: (timestamp, sensor_id) for timestamp,
sensor_id, image_name in kapture.flatten(kapture_query.records_camera)}
trajectories = kapture.Trajectories()
with open(results) as fid:
lines = fid.readlines()
lines = [line.rstrip().split() for line in lines if line != '\n']
for line in lines:
image_name = line[0]
rotation = quaternion.quaternion(float(line[1]), float(line[2]), float(line[3]), float(line[4]))
translation = [float(line[5]), float(line[6]), float(line[7])]
timestamp, sensor_id = inverse_records_camera[image_name]
trajectories[timestamp, sensor_id] = kapture.PoseTransform(rotation, translation)
kapture_query.trajectories = trajectories
kapture_to_dir(outpath, kapture_query)
def hloc_pipeline_from_kapture_dataset(kapture_path_map: str,
kapture_path_query: str,
pairsfile_path_map: str,
pairsfile_path_query: str,
output_dir: str,
feature_conf_str: str,
matcher_conf_str: str,
covisibility_clustering: bool,
bins_as_str: List[str],
benchmark_format_style: BenchmarkFormatStyle,
colmap_binary: str,
python_binary: Optional[str],
skip_list: List[str]) -> None:
"""
run hloc on kapture data
"""
feature_conf = extract_features.confs[feature_conf_str]
matcher_conf = match_features.confs[matcher_conf_str]
images_map = get_record_fullpath(kapture_path_map)
images_query = get_record_fullpath(kapture_path_query)
os.makedirs(output_dir, exist_ok=True)
if "convert_pairsfile_map" not in skip_list:
map_pairs_hloc = path.join(output_dir, 'pairfiles/db_pairs', path.basename(pairsfile_path_map) + "_hloc.txt")
convert_pairs_to_hloc_format(pairsfile_path_map, map_pairs_hloc)
pairsfile_path_map = map_pairs_hloc
if "convert_pairsfile_query" not in skip_list:
query_pairs_hloc = path.join(output_dir, 'pairfiles/query', path.basename(pairsfile_path_query) + "_hloc.txt")
convert_pairs_to_hloc_format(pairsfile_path_query, query_pairs_hloc)
pairsfile_path_query = query_pairs_hloc
feature_path = Path(output_dir, feature_conf['output']+'.h5')
if "extract_features_map" not in skip_list:
image_list_map_path = path.join(output_dir, 'image_list_map.txt')
convert_kapture_to_hloc_image_list(kapture_path_map, image_list_map_path)
feature_path_map = extract_features.main(feature_conf, Path(
images_map), Path(output_dir), image_list=Path(image_list_map_path))
assert feature_path_map.resolve() == feature_path.resolve()
if "extract_features_query" not in skip_list:
image_list_query_path = path.join(output_dir, 'image_list_query.txt')
convert_kapture_to_hloc_image_list(kapture_path_query, image_list_query_path)
feature_path_query = extract_features.main(feature_conf, Path(
images_query), Path(output_dir), image_list=Path(image_list_query_path))
assert feature_path_query.resolve() == feature_path.resolve()
pairsfile_path_map_pathlib = Path(pairsfile_path_map)
match_name_map = feature_conf['output'] + '_' + matcher_conf["output"] + f'_{pairsfile_path_map_pathlib.stem}'
map_match_path = Path(output_dir, match_name_map+'.h5')
if 'match_map_pairs' not in skip_list:
map_match_path_actual = match_features.main(matcher_conf, pairsfile_path_map_pathlib,
feature_conf['output'], Path(output_dir))
assert map_match_path_actual.resolve() == map_match_path.resolve()
exported_mapping_path = path.join(output_dir, '3D-models/exported_from_kapture')
if 'kapture_export_map_to_colmap' not in skip_list:
export_colmap(kapture_path_map, path.join(exported_mapping_path, 'colmap.db'), exported_mapping_path,
force_overwrite_existing=True)
# convert .txt to .bin
run_model_converter(colmap_binary, exported_mapping_path, exported_mapping_path, 'BIN')
triangulate_path = path.join(output_dir, 'sfm_' + feature_conf_str + '_' + matcher_conf_str)
if 'triangulate' not in skip_list:
triangulation.main(
Path(triangulate_path),
Path(exported_mapping_path),
Path(images_map),
pairsfile_path_map_pathlib,
feature_path,
map_match_path,
colmap_binary)
pairsfile_path_query_pathlib = Path(pairsfile_path_query)
match_name_query = feature_conf['output'] + '_' + matcher_conf["output"] + f'_{pairsfile_path_query_pathlib.stem}'
query_match_path = Path(output_dir, match_name_query+'.h5')
if 'match_query_pairs' not in skip_list:
query_match_path_actual = match_features.main(matcher_conf, pairsfile_path_query_pathlib,
feature_conf['output'], Path(output_dir))
assert query_match_path_actual.resolve() == query_match_path.resolve()
query_as_txt = path.join(output_dir, 'image_list_with_intrinsics.txt')
export_image_list(kapture_path_query, query_as_txt)
results_file = path.join(output_dir, f'results_{feature_conf_str}_{matcher_conf_str}.txt')
if 'localize' not in skip_list:
localize_sfm.main(
Path(triangulate_path),
Path(query_as_txt),
pairsfile_path_query_pathlib,
feature_path,
query_match_path,
Path(results_file),
covisibility_clustering=covisibility_clustering)
results_full = path.join(output_dir, f'results_{feature_conf_str}_{matcher_conf_str}_fullnames.txt')
results_kapture = path.join(output_dir, f'results_{feature_conf_str}_{matcher_conf_str}_kapture')
if 'convert_results' not in skip_list:
convert_results_format(query_as_txt, results_file, results_full)
convert_results_to_kapture(kapture_path_query, results_full, results_kapture)
if 'evaluate' not in skip_list and path.isfile(get_csv_fullpath(kapture.Trajectories, kapture_path_query)):
local_evaluate_path = path.join(pipeline_import_paths.HERE_PATH, '../tools/kapture_evaluate.py')
evaluate_args = ['-v', str(logger.level),
'-i', results_kapture,
'--labels', f'hloc_{feature_conf_str}_{matcher_conf_str}',
'-gt', kapture_path_query,
'-o', path.join(results_kapture, 'eval')]
evaluate_args += ['--bins'] + bins_as_str
evaluate_args.append('-f')
run_python_command(local_evaluate_path, evaluate_args, python_binary)
LTVL2020_output_path = path.join(output_dir, f'results_{feature_conf_str}_{matcher_conf_str}_LTVL2020_style.txt')
if 'export_LTVL2020' not in skip_list:
export_LTVL2020_script_name, export_LTVL2020_args = get_benchmark_format_command(
benchmark_format_style,
results_kapture,
LTVL2020_output_path,
True,
logger
)
local_export_LTVL2020_path = path.join(pipeline_import_paths.HERE_PATH,
f'../../kapture/tools/{export_LTVL2020_script_name}')
run_python_command(local_export_LTVL2020_path, export_LTVL2020_args, python_binary)
def hloc_pipeline_from_kapture_dataset_get_parser():
"""
get the argparse object for the kapture_hloc_pipeline_from_kapture_dataset.py command
"""
parser = argparse.ArgumentParser(description=('create a Colmap model (map) from data specified in kapture format.'))
parser_verbosity = parser.add_mutually_exclusive_group()
parser_verbosity.add_argument('-v', '--verbose', nargs='?', default=logging.WARNING, const=logging.INFO,
action=kapture.utils.logging.VerbosityParser,
help='verbosity level (debug, info, warning, critical, ... or int value) [warning]')
parser_verbosity.add_argument('-q', '--silent', '--quiet', action='store_const',
dest='verbose', const=logging.CRITICAL)
parser.add_argument('-i', '--kapture-map', required=True,
help='path to the kapture map directory')
parser.add_argument('--query', required=True,
help='input path to kapture mapping data root directory')
parser.add_argument('--pairsfile-map', required=True,
help='input path to mapping pairs')
parser.add_argument('--pairsfile-query', required=True,
help='input path to query pairs')
parser.add_argument('-o', '--output', required=True,
help='output directory.')
parser.add_argument('--feature-conf',
default='superpoint_max', choices=list(extract_features.confs.keys()),
type=str,
help='features to use in hloc')
parser.add_argument('--matcher-conf',
default='superglue', choices=list(match_features.confs.keys()),
type=str,
help='matcher to use in hloc')
parser.add_argument('--covisibility-clustering', action='store_true', default=False, required=False,
help='use covisibility_clustering=True in hloc localize')
parser.add_argument('--bins', nargs='+', default=["0.25 2", "0.5 5", "5 10"],
help='the desired positions/rotations thresholds for bins'
'format is string : position_threshold_in_m space rotation_threshold_in_degree')
parser.add_argument('--benchmark-style',
default=BenchmarkFormatStyle.Default,
type=BenchmarkFormatStyle,
choices=list(BenchmarkFormatStyle),
help=('select which output format to use for the export_LTVL2020 part.'
' Default is the https://www.visuallocalization.net default.'
' RobotCar_Seasons, Gangnam_Station, Hyundai_Department_Store,'
' ETH_Microsoft are also part of'
' https://www.visuallocalization.net but require a different format.'
' RIO10 is for http://vmnavab26.in.tum.de/RIO10/'))
parser.add_argument('-colmap', '--colmap_binary', required=False,
default="colmap",
help='full path to colmap binary '
'(default is "colmap", i.e. assume the binary'
' is in the user PATH).')
parser_python_bin = parser.add_mutually_exclusive_group()
parser_python_bin.add_argument('-python', '--python_binary', required=False,
default=None,
help='full path to python binary '
'(default is "None", i.e. assume the os'
' can infer the python binary from the files itself, shebang or extension).')
parser_python_bin.add_argument('--auto-python-binary', action='store_true', default=False,
help='use sys.executable as python binary.')
parser.add_argument('-s', '--skip', choices=['convert_pairsfile_map',
'convert_pairsfile_query',
'extract_features_map',
'extract_features_query',
'match_map_pairs',
'kapture_export_map_to_colmap',
'triangulate',
'match_query_pairs',
'localize',
'convert_results',
'evaluate',
'export_LTVL2020'],
nargs='+', default=[],
help='steps to skip')
return parser
def hloc_pipeline_from_kapture_dataset_command_line():
"""
Parse the command line arguments to build a colmap map and localize using the given kapture data.
"""
parser = hloc_pipeline_from_kapture_dataset_get_parser()
args = parser.parse_args()
logger.setLevel(args.verbose)
if args.verbose <= logging.INFO:
# also let kapture express its logs
kapture.utils.logging.getLogger().setLevel(args.verbose)
kapture_localization.utils.logging.getLogger().setLevel(args.verbose)
args_dict = vars(args)
logger.debug('kapture_hloc_pipeline_from_kapture_dataset.py \\\n' + ' \\\n'.join(
'--{:20} {:100}'.format(k, str(v)) for k, v in args_dict.items()))
python_binary = args.python_binary
if args.auto_python_binary:
python_binary = sys.executable
logger.debug(f'python_binary set to {python_binary}')
hloc_pipeline_from_kapture_dataset(args.kapture_map,
args.query,
args.pairsfile_map,
args.pairsfile_query,
args.output,
args.feature_conf,
args.matcher_conf,
args.covisibility_clustering,
args.bins,
args.benchmark_style,
args.colmap_binary,
python_binary,
args.skip)
if __name__ == '__main__':
hloc_pipeline_from_kapture_dataset_command_line()
```
#### File: kapture-localization/tools/kapture_colmap_localize.py
```python
import argparse
import logging
import os
import os.path as path
import shutil
from typing import List, Optional
import path_to_kapture_localization # noqa: F401
import kapture_localization.utils.logging
import kapture_localization.colmap.colmap_command as colmap_lib
import kapture_localization.utils.path_to_kapture # noqa: F401
import kapture
import kapture.io.csv
import kapture.utils.logging
from kapture.utils.paths import safe_remove_file, safe_remove_any_path
from kapture.core.Trajectories import rigs_remove_inplace
from kapture.io.tar import TarCollection
from kapture.utils.Collections import try_get_only_key_from_collection
from kapture.converter.colmap.database import COLMAPDatabase
import kapture.converter.colmap.database_extra as database_extra
logger = logging.getLogger('colmap_localize')
def colmap_localize(kapture_path: str,
colmap_path: str,
input_database_path: str,
input_reconstruction_path: str,
colmap_binary: str,
pairs_file_path: Optional[str],
keypoints_type: Optional[str],
use_colmap_matches_importer: bool,
image_registrator_options: List[str],
skip_list: List[str],
force: bool) -> None:
"""
Localize images on a colmap model using default SIFT features with the kapture data.
:param kapture_path: path to the kapture to use
:param colmap_path: path to the colmap build
:param input_database_path: path to the map colmap.db
:param input_database_path: path to the map colmap.db
:param input_reconstruction_path: path to the map reconstruction folder
:param colmap_binary: path to the colmap binary executable
:param pairs_file_path: Optional[str],
:param keypoints_type: type of keypoints, name of the keypoints subfolder
:param use_colmap_matches_importer: bool,
:param image_registrator_options: options for the image registrator
:param skip_list: list of steps to skip
:param force: Silently overwrite kapture files if already exists.
"""
# Load input files first to make sure it is OK
logger.info('loading kapture files...')
with kapture.io.csv.get_all_tar_handlers(kapture_path) as tar_handlers:
kapture_data = kapture.io.csv.kapture_from_dir(kapture_path, pairs_file_path, tar_handlers=tar_handlers)
colmap_localize_from_loaded_data(kapture_data,
kapture_path,
tar_handlers,
colmap_path,
input_database_path,
input_reconstruction_path,
colmap_binary,
keypoints_type,
use_colmap_matches_importer,
image_registrator_options,
skip_list,
force)
def colmap_localize_from_loaded_data(kapture_data: kapture.Kapture,
kapture_path: str,
tar_handlers: Optional[TarCollection],
colmap_path: str,
input_database_path: str,
input_reconstruction_path: str,
colmap_binary: str,
keypoints_type: Optional[str],
use_colmap_matches_importer: bool,
image_registrator_options: List[str],
skip_list: List[str],
force: bool) -> None:
"""
Localize images on a colmap model using default SIFT features with the kapture data.
:param kapture_data: kapture data to use
:param kapture_path: path to the kapture to use
:param tar_handler: collection of preloaded tar archives
:param colmap_path: path to the colmap build
:param input_database_path: path to the map colmap.db
:param input_database_path: path to the map colmap.db
:param input_reconstruction_path: path to the map reconstruction folder
:param colmap_binary: path to the colmap binary executable
:param keypoints_type: type of keypoints, name of the keypoints subfolder
:param use_colmap_matches_importer: bool,
:param image_registrator_options: options for the image registrator
:param skip_list: list of steps to skip
:param force: Silently overwrite kapture files if already exists.
"""
os.makedirs(colmap_path, exist_ok=True)
if not (kapture_data.records_camera and kapture_data.sensors and kapture_data.keypoints and kapture_data.matches):
raise ValueError('records_camera, sensors, keypoints, matches are mandatory')
if kapture_data.trajectories:
logger.warning("Input data contains trajectories: they will be ignored")
kapture_data.trajectories.clear()
else:
kapture_data.trajectories = kapture.Trajectories()
# COLMAP does not fully support rigs.
if kapture_data.rigs is not None and kapture_data.trajectories is not None:
# make sure, rigs are not used in trajectories.
logger.info('remove rigs notation.')
rigs_remove_inplace(kapture_data.trajectories, kapture_data.rigs)
kapture_data.rigs.clear()
# Prepare output
# Set fixed name for COLMAP database
colmap_db_path = path.join(colmap_path, 'colmap.db')
image_list_path = path.join(colmap_path, 'images.list')
reconstruction_path = path.join(colmap_path, "reconstruction")
if 'delete_existing' not in skip_list:
safe_remove_file(colmap_db_path, force)
safe_remove_file(image_list_path, force)
safe_remove_any_path(reconstruction_path, force)
os.makedirs(reconstruction_path, exist_ok=True)
# Copy colmap db to output
if not os.path.exists(colmap_db_path):
shutil.copy(input_database_path, colmap_db_path)
# find correspondences between the colmap db and the kapture data
images_all = {image_path: (ts, cam_id)
for ts, shot in kapture_data.records_camera.items()
for cam_id, image_path in shot.items()}
colmap_db = COLMAPDatabase.connect(colmap_db_path)
colmap_image_ids = database_extra.get_colmap_image_ids_from_db(colmap_db)
colmap_images = database_extra.get_images_from_database(colmap_db)
colmap_db.close()
# dict ( kapture_camera -> colmap_camera_id )
colmap_camera_ids = {images_all[image_path][1]: colmap_cam_id
for image_path, colmap_cam_id in colmap_images if image_path in images_all}
images_to_add = {image_path: value
for image_path, value in images_all.items()
if image_path not in colmap_image_ids}
flatten_images_to_add = [(ts, kapture_cam_id, image_path)
for image_path, (ts, kapture_cam_id) in images_to_add.items()]
if 'import_to_db' not in skip_list:
logger.info("Step 1: Add precomputed keypoints and matches to colmap db")
if keypoints_type is None:
keypoints_type = try_get_only_key_from_collection(kapture_data.keypoints)
assert keypoints_type is not None
assert keypoints_type in kapture_data.keypoints
assert keypoints_type in kapture_data.matches
cameras_to_add = kapture.Sensors()
for _, (_, kapture_cam_id) in images_to_add.items():
if kapture_cam_id not in colmap_camera_ids:
kapture_cam = kapture_data.sensors[kapture_cam_id]
cameras_to_add[kapture_cam_id] = kapture_cam
colmap_db = COLMAPDatabase.connect(colmap_db_path)
colmap_added_camera_ids = database_extra.add_cameras_to_database(cameras_to_add, colmap_db)
colmap_camera_ids.update(colmap_added_camera_ids)
colmap_added_image_ids = database_extra.add_images_to_database_from_flatten(
colmap_db, flatten_images_to_add, kapture_data.trajectories, colmap_camera_ids)
colmap_image_ids.update(colmap_added_image_ids)
colmap_image_ids_reversed = {v: k for k, v in colmap_image_ids.items()} # colmap_id : name
# add new features
colmap_keypoints = database_extra.get_keypoints_set_from_database(colmap_db, colmap_image_ids_reversed)
keypoints_all = kapture_data.keypoints[keypoints_type]
keypoints_to_add = {name for name in keypoints_all if name not in colmap_keypoints}
keypoints_to_add = kapture.Keypoints(keypoints_all.type_name, keypoints_all.dtype, keypoints_all.dsize,
keypoints_to_add)
database_extra.add_keypoints_to_database(colmap_db, keypoints_to_add,
keypoints_type, kapture_path,
tar_handlers,
colmap_image_ids)
# add new matches
colmap_matches = kapture.Matches(database_extra.get_matches_set_from_database(colmap_db,
colmap_image_ids_reversed))
colmap_matches.normalize()
matches_all = kapture_data.matches[keypoints_type]
matches_to_add = kapture.Matches({pair for pair in matches_all if pair not in colmap_matches})
# print(list(matches_to_add))
database_extra.add_matches_to_database(colmap_db, matches_to_add,
keypoints_type, kapture_path,
tar_handlers,
colmap_image_ids,
export_two_view_geometry=not use_colmap_matches_importer)
colmap_db.close()
if use_colmap_matches_importer:
logger.info('Step 2: Run geometric verification')
logger.debug('running colmap matches_importer...')
if keypoints_type is None:
keypoints_type = try_get_only_key_from_collection(kapture_data.matches)
assert keypoints_type is not None
assert keypoints_type in kapture_data.matches
# compute two view geometry
colmap_lib.run_matches_importer_from_kapture_matches(
colmap_binary,
colmap_use_cpu=True,
colmap_gpu_index=None,
colmap_db_path=colmap_db_path,
kapture_matches=kapture_data.matches[keypoints_type],
force=force)
else:
logger.info('Step 2: Run geometric verification - skipped')
if 'image_registrator' not in skip_list:
logger.info("Step 3: Run image_registrator")
# run image_registrator
colmap_lib.run_image_registrator(
colmap_binary,
colmap_db_path,
input_reconstruction_path,
reconstruction_path,
image_registrator_options
)
# run model_converter
if 'model_converter' not in skip_list:
logger.info("Step 4: Export reconstruction results to txt")
colmap_lib.run_model_converter(
colmap_binary,
reconstruction_path,
reconstruction_path
)
def colmap_localize_command_line():
"""
Parse the command line arguments to localize images on an existing colmap map using the given kapture data.
"""
parser = argparse.ArgumentParser(description=('localize images on a colmap model (map) '
'from data specified in kapture format.'
'Only images and cameras are taken into account '
' (no features or matches)'))
parser_verbosity = parser.add_mutually_exclusive_group()
parser_verbosity.add_argument('-v', '--verbose', nargs='?', default=logging.WARNING, const=logging.INFO,
action=kapture.utils.logging.VerbosityParser,
help='verbosity level (debug, info, warning, critical, ... or int value) [warning]')
parser_verbosity.add_argument('-q', '--silent', '--quiet', action='store_const',
dest='verbose', const=logging.CRITICAL)
parser.add_argument('-f', '-y', '--force', action='store_true', default=False,
help='silently delete database if already exists.')
parser.add_argument('-i', '--input', required=True,
help='input path to kapture data root directory')
parser.add_argument('-db', '--database',
help='path to COLMAP database file.')
parser.add_argument('-txt', '--reconstruction',
help='path to COLMAP reconstruction triplet text file.')
parser.add_argument('-o', '--output', required=True,
help='output directory (colmap directory).')
parser.add_argument('-colmap', '--colmap_binary', required=False,
default="colmap",
help='full path to colmap binary '
'(default is "colmap", i.e. assume the binary'
' is in the user PATH).')
parser.add_argument('--use-colmap-matches-importer', action='store_true', default=False,
help='Use colmap matches_importer instead of manually filling the two_view_geometry table')
parser.add_argument('--pairs-file-path',
default=None,
type=str,
help=('text file in the csv format; where each line is image_name1, image_name2, score '
'which contains the image pairs to match, can be used to filter loaded matches'))
parser.add_argument('-kpt', '--keypoints-type', default=None, help='kapture keypoints type.')
parser.add_argument('-s', '--skip', choices=['delete_existing',
'import_to_db',
'image_registrator',
'model_converter'],
nargs='+', default=[],
help='steps to skip')
args, image_registrator_options = parser.parse_known_args()
logger.setLevel(args.verbose)
logging.getLogger('colmap').setLevel(args.verbose)
if args.verbose <= logging.INFO:
# also let kapture express its logs
kapture.utils.logging.getLogger().setLevel(args.verbose)
kapture_localization.utils.logging.getLogger().setLevel(args.verbose)
args_dict = vars(args)
args_dict['image_registrator_options'] = image_registrator_options
logger.debug('colmap_localize.py \\\n' + ' \\\n'.join(
'--{:20} {:100}'.format(k, str(v)) for k, v in args_dict.items()))
colmap_localize(args.input, args.output,
args.database, args.reconstruction,
args.colmap_binary,
args.pairs_file_path,
args.keypoints_type,
args.use_colmap_matches_importer,
image_registrator_options,
args.skip, args.force)
if __name__ == '__main__':
colmap_localize_command_line()
```
#### File: kapture-localization/tools/kapture_compute_distance_pairs.py
```python
import argparse
import logging
import os
import pathlib
from typing import Optional
import math
from tqdm import tqdm
import path_to_kapture_localization # noqa: F401
import kapture_localization.utils.logging
from kapture_localization.pairing.distance import get_pairs_distance
import kapture_localization.utils.path_to_kapture # noqa: F401
import kapture
import kapture.utils.logging
from kapture.io.csv import kapture_from_dir, table_to_file
logger = logging.getLogger('compute_distance_pairs')
def compute_distance_pairs(mapping_path: str,
query_path: Optional[str],
output_path: str,
topk: int,
block_size: int,
min_distance: float,
max_distance: float,
max_angle: float,
keep_rejected: bool,
max_number_of_threads: Optional[int] = None):
"""
compute image pairs from distance, and write the result in a text file
"""
skip_heavy = [kapture.RecordsLidar, kapture.RecordsWifi,
kapture.Keypoints, kapture.Descriptors, kapture.GlobalFeatures,
kapture.Matches, kapture.Points3d, kapture.Observations]
logger.info(f'compute_distance_pairs. loading mapping: {mapping_path}')
kdata = kapture_from_dir(mapping_path, skip_list=skip_heavy)
assert kdata.sensors is not None
assert kdata.records_camera is not None
assert kdata.trajectories is not None
if query_path is None or mapping_path == query_path:
logger.info('computing mapping pairs from distance...')
kdata_query = None
else:
logger.info('computing query pairs from distance...')
kdata_query = kapture_from_dir(query_path, skip_list=skip_heavy)
assert kdata_query.sensors is not None
assert kdata_query.records_camera is not None
assert kdata_query.trajectories is not None
os.umask(0o002)
p = pathlib.Path(output_path)
os.makedirs(str(p.parent.resolve()), exist_ok=True)
with open(output_path, 'w') as fid:
if block_size == float('inf'):
image_pairs = get_pairs_distance(kdata, kdata_query, topk,
min_distance, max_distance, max_angle,
keep_rejected, max_number_of_threads)
table_to_file(fid, image_pairs, header='# query_image, map_image, score')
else:
if kdata_query is None:
kdata_query = kdata
if kdata_query.rigs is not None:
assert kdata_query.trajectories is not None # for ide
kapture.rigs_remove_inplace(kdata_query.trajectories, kdata_query.rigs)
records_camera_list = [k
for k in sorted(kapture.flatten(kdata_query.records_camera),
key=lambda x: x[2])]
number_of_iteration = math.ceil(len(records_camera_list) / block_size)
table_to_file(fid, [], header='# query_image, map_image, score')
for i in tqdm(range(number_of_iteration), disable=logging.getLogger().level >= logging.CRITICAL):
sliced_records = kapture.RecordsCamera()
for ts, sensor_id, img_name in records_camera_list[i * block_size:(i+1)*block_size]:
if (ts, sensor_id) not in kdata_query.trajectories:
continue
sliced_records[(ts, sensor_id)] = img_name
kdata_slice_query = kapture.Kapture(
sensors=kdata_query.sensors,
records_camera=sliced_records,
trajectories=kdata_query.trajectories
)
image_pairs = get_pairs_distance(kdata, kdata_slice_query, topk,
min_distance, max_distance, max_angle,
keep_rejected, max_number_of_threads)
table_to_file(fid, image_pairs)
logger.info('all done')
def compute_distance_pairs_command_line():
parser = argparse.ArgumentParser(
description=('Create image pairs files from distance. '
'Pairs are computed between query <-> mapping or mapping <-> mapping'))
parser_verbosity = parser.add_mutually_exclusive_group()
parser_verbosity.add_argument('-v', '--verbose', nargs='?', default=logging.WARNING, const=logging.INFO,
action=kapture.utils.logging.VerbosityParser,
help='verbosity level (debug, info, warning, critical, ... or int value) [warning]')
parser_verbosity.add_argument('-q', '--silent', '--quiet',
action='store_const', dest='verbose', const=logging.CRITICAL)
parser.add_argument('--mapping', required=True, help='input path to kapture input root directory')
parser.add_argument('--query', default=None,
help=('input path to a kapture root directory containing query images, '
'keep to default None when mapping\n'))
parser.add_argument('--max-distance', type=float, default=25.0,
help='max distance to form a pair')
parser.add_argument('--max-angle', type=float, default=45.0,
help='max angle to form a pair')
parser_dist = parser.add_mutually_exclusive_group()
parser_dist.add_argument('--min-distance', type=float, default=0.0,
help='min distance to form a pair')
parser_dist.add_argument('--keep-rejected', action='store_true', default=False,
help='keep pairs that are not within the thresholds bounds')
parser.add_argument('--max-number-of-threads', default=None, type=int,
help='By default, use as many as cpus. But you can set a limit.')
parser.add_argument('--block-size', default=float('inf'), type=int,
help=('number of (query) images to process at once'))
parser.add_argument('-o', '--output', required=True,
help='output path to pairsfile')
parser.add_argument('--topk', default=None, type=int,
help='the max number of top retained images')
args = parser.parse_args()
logger.setLevel(args.verbose)
if args.verbose <= logging.DEBUG:
# also let kapture express its logs
kapture.utils.logging.getLogger().setLevel(args.verbose)
kapture_localization.utils.logging.getLogger().setLevel(args.verbose)
logger.debug(''.join(['\n\t{:13} = {}'.format(k, v)
for k, v in vars(args).items()]))
compute_distance_pairs(args.mapping, args.query, args.output, args.topk,
args.block_size,
args.min_distance, args.max_distance, args.max_angle,
args.keep_rejected, args.max_number_of_threads)
if __name__ == '__main__':
compute_distance_pairs_command_line()
```
#### File: kapture-localization/tools/kapture_create_kapture_proxy.py
```python
import argparse
import logging
from typing import List, Optional
import path_to_kapture_localization # noqa: F401
import kapture_localization.utils.logging
from kapture_localization.utils.symlink import can_use_symlinks, create_kapture_proxy
import kapture_localization.utils.path_to_kapture # noqa: F401
import kapture.utils.logging
logger = logging.getLogger('create_kapture_proxy')
def _convert_none_string(line: str):
if line.lower() == 'none':
return None
return line
def _convert_none_string_array(lines: Optional[List[str]]):
if lines is None:
return None
return [_convert_none_string(line) for line in lines]
def create_kapture_proxy_command_line():
parser = argparse.ArgumentParser(
description='Create a proxy kapture from a source kapture with only sensors data and orphan features.')
parser_verbosity = parser.add_mutually_exclusive_group()
parser_verbosity.add_argument('-v', '--verbose', nargs='?', default=logging.WARNING, const=logging.INFO,
action=kapture.utils.logging.VerbosityParser,
help='verbosity level (debug, info, warning, critical, ... or int value) [warning]')
parser_verbosity.add_argument('-q', '--silent', '--quiet',
action='store_const', dest='verbose', const=logging.CRITICAL)
parser.add_argument('-f', '-y', '--force', action='store_true', default=False,
help='silently output folder content if it already exist.')
parser.add_argument('-i', '--input', required=True, help=('input path to kapture input root directory'
' (only sensors will be used)'))
parser.add_argument('-o', '--output', required=True, help='output path to the proxy kapture')
parser.add_argument('-kpt', '--keypoints-path', default=None, nargs='+',
help='input path to the orphan keypoints folder')
parser.add_argument('-desc', '--descriptors-path', default=None, nargs='+',
help='input path to the orphan descriptors folder')
parser.add_argument('-gfeat', '--global-features-path', default=None, nargs='+',
help='input path to the orphan global features folder')
parser.add_argument('-matches', '--matches-path', default=None, nargs='+',
help=('input path to the orphan matches folder, '
'if both keypoints-path and matches-paths are given, '
'the order of the two list must be the same (same as keypoints-type), '
'use the none if necessary, it will be converted to None in code'))
parser.add_argument('--keypoints-type', default=None, nargs='+', help='kapture keypoints types.')
parser.add_argument('--descriptors-type', default=None, nargs='+', help='kapture descriptors types.')
parser.add_argument('--global-features-type', default=None, nargs='+', help='kapture global features types.')
args = parser.parse_args()
logger.setLevel(args.verbose)
if args.verbose <= logging.DEBUG:
# also let kapture express its logs
kapture.utils.logging.getLogger().setLevel(args.verbose)
kapture_localization.utils.logging.getLogger().setLevel(args.verbose)
logger.debug(''.join(['\n\t{:13} = {}'.format(k, v)
for k, v in vars(args).items()]))
if can_use_symlinks():
keypoints_paths = _convert_none_string_array(args.keypoints_path)
descriptors_paths = args.descriptors_path
global_features_path = args.global_features_path
matches_paths = _convert_none_string_array(args.matches_path)
keypoints_types = _convert_none_string_array(args.keypoints_type)
descriptors_types = _convert_none_string_array(args.descriptors_type)
global_features_types = _convert_none_string_array(args.global_features_type)
create_kapture_proxy(args.output, args.input,
keypoints_paths, descriptors_paths,
global_features_path, matches_paths,
keypoints_types, descriptors_types,
global_features_types,
args.force)
else:
raise EnvironmentError('Please restart this command as admin, it is required for os.symlink'
'see https://docs.python.org/3.6/library/os.html#os.symlink')
# need to find a way to redirect output, else it closes on error...
# logger.critical('Request UAC for symlink rights...')
# ctypes.windll.shell32.ShellExecuteW(None, "runas", sys.executable, " ".join(sys.argv), None, 1)
if __name__ == '__main__':
create_kapture_proxy_command_line()
```
#### File: kapture-localization/tools/kapture_pose_approximation_from_pairsfile.py
```python
import argparse
import os
import logging
from typing import Optional
import numpy as np
import path_to_kapture_localization # noqa: F401
import kapture_localization.utils.logging
from kapture_localization.utils.pairsfile import get_ordered_pairs_from_file
import kapture_localization.utils.path_to_kapture # noqa: F401
import kapture
from kapture.io.structure import delete_existing_kapture_files
import kapture.utils.logging
from kapture.io.csv import kapture_from_dir, kapture_to_dir
from kapture.algo.pose_operations import average_pose_transform_weighted
logger = logging.getLogger('pose_approximation')
METHOD_DESCRIPTIONS = {
'equal_weighted_barycenter': ("EWB: assigns the same weight to all of the top k retrieved "
"images with w_i = 1/k"),
'cosine_similarity': ("CSI: w_i=(1/z_i)*(transpose(d_q)*d_i)^alpha, "
"z_i=sum(transpose(d_q)*d_j)^alpha")
}
def pose_approximation_from_pairsfile(input_path: str,
pairsfile_path: str,
output_path: str,
query_path: Optional[str],
topk: Optional[int],
method: str,
additional_parameters: dict,
force: bool):
"""
localize from pairsfile
"""
os.makedirs(output_path, exist_ok=True)
delete_existing_kapture_files(output_path, force_erase=force)
logger.info(f'pose_approximation. loading mapping: {input_path}')
kdata = kapture_from_dir(input_path, None, skip_list=[kapture.Keypoints,
kapture.Descriptors,
kapture.GlobalFeatures,
kapture.Matches,
kapture.Points3d,
kapture.Observations])
if query_path is not None:
logger.info(f'pose_approximation. loading query: {query_path}')
kdata_query = kapture_from_dir(query_path, skip_list=[kapture.Keypoints,
kapture.Descriptors,
kapture.GlobalFeatures,
kapture.Matches,
kapture.Points3d,
kapture.Observations])
else:
kdata_query = kdata
logger.info(f'pose_approximation. loading pairs: {pairsfile_path}')
similarity_dict = get_ordered_pairs_from_file(pairsfile_path, kdata_query.records_camera,
kdata.records_camera, topk)
query_images = set(similarity_dict.keys())
kdata_result = kapture.Kapture(sensors=kapture.Sensors(),
records_camera=kapture.RecordsCamera(),
trajectories=kapture.Trajectories())
for timestamp, cam_id, image_name in kapture.flatten(kdata_query.records_camera):
if image_name not in query_images:
continue
if cam_id not in kdata_result.sensors:
kdata_result.sensors[cam_id] = kdata_query.sensors[cam_id]
kdata_result.records_camera[(timestamp, cam_id)] = image_name
if kdata.rigs is None:
map_trajectories = kdata.trajectories
else:
map_trajectories = kapture.rigs_remove(kdata.trajectories, kdata.rigs)
training_trajectories_reversed = {image_name: map_trajectories[(timestamp, cam_id)]
for timestamp, cam_id, image_name in kapture.flatten(kdata.records_camera)
if (timestamp, cam_id) in map_trajectories}
records_camera_reversed = {image_name: (timestamp, cam_id)
for timestamp, cam_id, image_name in kapture.flatten(kdata_result.records_camera)}
for image_name, similar_images in similarity_dict.items():
pose_inv_list = [training_trajectories_reversed[k].inverse() for k, _ in similar_images]
timestamp = records_camera_reversed[image_name][0]
cam_id = records_camera_reversed[image_name][1]
if method == 'equal_weighted_barycenter':
weight_list = [1.0/len(pose_inv_list) for _ in range(len(pose_inv_list))]
else:
assert 'alpha' in additional_parameters
alpha = additional_parameters['alpha']
weights = np.zeros((len(pose_inv_list),))
for i, (_, score) in enumerate(similar_images):
weights[i] = score
weights[:] = weights[:]**(alpha)
weights[:] = weights[:] / np.sum(weights[:])
weight_list = weights.tolist()
final_pose = average_pose_transform_weighted(pose_inv_list, weight_list).inverse()
kdata_result.trajectories[(timestamp, cam_id)] = final_pose
kapture_to_dir(output_path, kdata_result)
logger.info('all done')
def get_pose_approximation_method_argparser(method: str):
parser_method = argparse.ArgumentParser(description=METHOD_DESCRIPTIONS[method],
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_method.set_defaults(method=method)
# per method parameters
if method == 'cosine_similarity':
parser_method.add_argument('--alpha', default=8.0, type=float, help='alpha parameter of CSI')
return parser_method
def pose_approximation_from_pairsfile_command_line():
parser = argparse.ArgumentParser(description='localize from pairfile',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_verbosity = parser.add_mutually_exclusive_group()
parser_verbosity.add_argument('-v', '--verbose', nargs='?', default=logging.WARNING, const=logging.INFO,
action=kapture.utils.logging.VerbosityParser,
help='verbosity level (debug, info, warning, critical, ... or int value) [warning]')
parser_verbosity.add_argument('-q', '--silent', '--quiet',
action='store_const', dest='verbose', const=logging.CRITICAL)
parser.add_argument('--mapping', required=True,
help=('input path to kapture input root directory.\n'
'if query is left to None, it must contains all images'))
parser.add_argument('-o', '--output', required=True, help='output path to localized queries')
parser.add_argument('--query', default=None,
help='if left to None, timestamp, sensor_id will be taken from input, else from this')
parser.add_argument('--pairsfile-path', required=True, type=str,
help='text file which contains the image pairs and their score')
parser.add_argument('--topk', default=None, type=int,
help='override pairfile topk with this one (must be inferior or equal)')
parser.add_argument('-f', '-y', '--force', action='store_true', default=False,
help='Force delete output directory if already exists')
list_of_pose_approx_methods = ['equal_weighted_barycenter', 'cosine_similarity']
valid_subcommands = ', '.join(list_of_pose_approx_methods)
subparsers = parser.add_subparsers(title='subcommands',
description=f'valid subcommands: {valid_subcommands}',
help='additional help')
for method in list_of_pose_approx_methods:
subparsers.choices[method] = get_pose_approximation_method_argparser(method)
args = parser.parse_args()
logger.setLevel(args.verbose)
if args.verbose <= logging.DEBUG:
# also let kapture express its logs
kapture.utils.logging.getLogger().setLevel(args.verbose)
kapture_localization.utils.logging.getLogger().setLevel(args.verbose)
logger.debug('pose_approximation_from_pairsfile.py \\\n' + ''.join(['\n\t{:13} = {}'.format(k, v)
for k, v in vars(args).items()]))
pose_approximation_from_pairsfile(args.mapping, args.pairsfile_path, args.output,
args.query, args.topk,
args.method, vars(args),
args.force)
if __name__ == '__main__':
pose_approximation_from_pairsfile_command_line()
``` |
{
"source": "jkaberg/iBuff",
"score": 2
} |
#### File: jkaberg/iBuff/main.py
```python
__author__ = '<NAME> (jkaberg), <EMAIL>'
import os
from sqlite3 import dbapi2 as sqlite3
from datetime import datetime
from flask import Flask, request, session, g, redirect, url_for, render_template, flash
app = Flask(__name__)
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'ibuff.db'),
DEBUG=True,
SECRET_KEY='development key',
USERNAME='joel',
PASSWORD='<PASSWORD>'
))
app.config.from_envvar('IBUFF_SETTINGS', silent=True)
def connect_db():
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def init_db():
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.cli.command('initdb')
def initdb_command():
init_db()
print('Initialized the database.')
def get_db():
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
@app.route('/', methods=['GET'])
def show_workouts():
if not os.path.isfile(app.config['DATABASE']):
init_db()
db = get_db()
cur = db.execute('SELECT title, comment, id FROM workouts ORDER BY id ASC')
workouts = cur.fetchall()
return render_template('show_workouts.html', workouts=workouts)
@app.route('/addWorkout', methods=['POST'])
def add_workout():
if not session.get('logged_in'):
flash('You need to login.')
return redirect(url_for('login'))
db = get_db()
db.execute('INSERT INTO workouts (title, comment) VALUES (?, ?)',
[request.form['title'], request.form['comment']])
db.commit()
flash('New workout successfully added.')
return redirect(url_for('show_workouts'))
@app.route('/deleteWorkout/<wid>', methods=['GET'])
def delete_workout(wid):
if not session.get('logged_in'):
flash('You need to login.')
return redirect(url_for('login'))
db = get_db()
# TODO: Recursive deletion from bottom and up would be a lot better.
db.execute('DELETE FROM workouts WHERE id = (?)', (wid,))
db.execute('DELETE FROM exercises WHERE workout_id = (?)', (wid,))
db.execute('DELETE FROM history WHERE workout_id = (?)', (wid,))
db.commit()
flash('Successfully deleted workout.')
return redirect(url_for('show_workouts'))
@app.route('/showWorkout/<wid>', methods=['GET'])
def show_workout(wid):
if not session.get('logged_in'):
flash('You need to login.')
return redirect(url_for('login'))
db = get_db()
# TODO: Fix this mess, no need for 2 querys..
cur = db.execute('SELECT title, comment FROM workouts WHERE id=(?)', (wid,))
workout = cur.fetchone()
cur = db.execute('SELECT id, title, comment, sets, reps FROM exercises WHERE workout_id=(?) ORDER BY id ASC', (wid,))
exercises = cur.fetchall()
if exercises:
eid = exercises[0]['id']
else:
eid = 0
return render_template('show_workout.html', workout=workout, exercises=exercises, wid=wid, eid=eid)
@app.route('/playWorkout/<wid>/<eid>/<count>', methods=['GET', 'POST'])
def play_workout(wid, eid, count):
if not session.get('logged_in'):
flash('You need to login.')
return redirect(url_for('login'))
db = get_db()
count = int(count)
eid = int(eid)
last_eid = eid if count == 0 else eid - 1
# TODO: Fix this mess, too many querys.....
# Get the current exercise
cur = db.execute('SELECT id, title, comment, sets, reps FROM exercises WHERE workout_id = (?) ORDER BY id ASC', (wid,))
exercise = cur.fetchall()
# Add history
if request.method == 'POST' and count > 0:
db.execute('INSERT INTO history (exercise_id, workout_id, sets, reps, weight, date_time) values (?, ? , ?, ?, ?, ?)',
[last_eid, wid, request.form['sets'], request.form['reps'], request.form['weight'], datetime.now()])
db.commit()
# If count equals number of exercises then we're done!
if count == len(exercise):
flash('Workout finished, well done!')
return redirect(url_for('show_workout', wid=wid))
cur = db.execute('SELECT weight FROM history WHERE workout_id = (?) AND exercise_id = (?) ORDER BY id DESC', (wid, eid,))
weight = cur.fetchone()
return render_template('play_workout.html',
exercise=exercise[count],
wid=wid,
eid=eid + 1,
last_weight=weight[0] if weight else 1,
count=count + 1)
@app.route('/showExercise/<wid>/<eid>', methods=['GET'])
def show_exercise(wid, eid):
if not session.get('logged_in'):
flash('You need to login.')
return redirect(url_for('login'))
db = get_db()
cur = db.execute('SELECT id, title, comment, sets, reps FROM exercises WHERE workout_id = (?) AND id = (?)', (wid, eid,))
exercise = cur.fetchone()
return render_template('show_exercise.html', exercise=exercise, wid=wid)
@app.route('/addExercise/<wid>', methods=['POST'])
def add_exercise(wid):
if not session.get('logged_in'):
flash('You need to login.')
return redirect(url_for('login'))
db = get_db()
db.execute('INSERT INTO exercises (title, workout_id, comment, sets, reps) values (?, ?, ?, ?, ?)',
[request.form['title'], wid, request.form['comment'], request.form['sets'], request.form['reps']])
db.commit()
flash('New workout successfully added.')
return redirect(url_for('show_workout', wid=wid))
@app.route('/updateExercise/<wid>/<eid>', methods=['POST'])
def update_exercise(wid, eid):
if not session.get('logged_in'):
flash('You need to login.')
return redirect(url_for('login'))
db = get_db()
db.execute('UPDATE exercises SET title = (?), comment = (?), sets = (?), reps = (?) WHERE workout_id = (?) AND id = (?)',
(request.form['title'], request.form['comment'], request.form['sets'], request.form['reps'], wid, eid,))
db.commit()
flash('Exercise successfully updated.')
return redirect(url_for('show_workout', wid=wid))
@app.route('/deleteExercise/<eid>/<wid>', methods=['GET'])
def delete_exercise(eid, wid):
if not session.get('logged_in'):
flash('You need to login.')
return redirect(url_for('login'))
db = get_db()
db.execute('DELETE FROM exercises WHERE id = (?)', (eid,))
db.execute('DELETE FROM history WHERE exercise_id = (?)', (eid,))
db.commit()
flash('Successfully deleted exercise.')
return redirect(url_for('show_workout', wid=wid))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
# TODO: http://flask.pocoo.org/docs/0.10/quickstart/#cookies
session['logged_in'] = True
flash('You were logged in.')
return redirect(url_for('show_workouts'))
return render_template('login.html', error=error)
@app.route('/logout', methods=['GET'])
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_workouts'))
if __name__ == '__main__':
app.run(host='0.0.0.0')
``` |
{
"source": "jkachika/columbus-worker",
"score": 3
} |
#### File: colorker/service/email.py
```python
import logging
import threading
import traceback
import re
from colorker.comm import messaging
logger = logging.getLogger('worker')
def send_mail(receivers, subject, message, html=None):
"""
Sends an email to the recipients. Must be called from an EngineThread. This method will not raise any exception
if it fails to send a message to the recipients.
:param list(str) receivers: list of recipient email addresses
:param str subject: subject of the email
:param str message: plain text message
:param str html: HTML message
"""
if not isinstance(receivers, list):
raise ValueError('Invalid recipients. Must be a list of email addresses.')
try:
if not subject or not message:
raise ValueError('subject and message body are required to send the email')
sender = threading.current_thread().username
master = threading.current_thread().master
if html is None:
html = re.sub("\r?\n", "<br/>", message)
request = messaging.Request(messaging.RequestType.EMAIL, messaging.WorkerEmailRequest(
sender=sender, receivers=receivers, subject=subject, plain=message, html=html))
messaging.push(master, request)
except Exception as e:
logger.error(e.message)
logger.error(traceback.format_exc())
```
#### File: colorker/service/fusiontables.py
```python
import csv
import os
import threading
import traceback
import logging
import numpy as np
from area import area
from lxml import etree
from googleapiclient.http import MediaIoBaseUpload
from geojson import FeatureCollection
from pykml.factory import KML_ElementMaker as KML
from colorker.security import CredentialManager
from colorker.settings import STORAGE
logger = logging.getLogger('worker')
def create_table(name, description, columns, data=None, share_with=None, admin=None, user_settings=None):
"""
Creates a fusion table for the given data and returns the table id.
:param str name: Name of the fusion table to create
:param str description: Description of the table to be created
:param columns: List of dictionaries having properties name and type
:type columns: list(dict)
:param data: List of dictionaries (optional)
:type data: list(dict)
:param share_with: Single email addreess string or a List of user email addresses (gmail only)
to share the created fusion table
:type share_with: str or list(str)
:param str admin: email address of the administrator who should have edit access to the created fusion table
:param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.
If one is not provided, then this method must be invoked by an EngineThread
which defines the settings
:rtype: str
:return: the table id of the created fusion table
"""
ft_service = CredentialManager.get_fusion_tables_service(user_settings)
drive_service = CredentialManager.get_drive_service(user_settings)
# converting column type to fusion table supported type
for column in columns:
column["type"] = str(column["type"]).upper()
column["type"] = "NUMBER" if column["type"] in ["INTEGER", "FLOAT", "NUMBER"] \
else "DATETIME" if column["type"] in ["TIMESTAMP", "DATETIME", "DATE"] \
else "LOCATION" if column["type"] == "LOCATION" \
else "STRING"
body = dict(name=name, description=description, attribution="Created by Columbus Workflow Engine",
attributionLink="http://www.columbus.cs.colostate.edu", columns=columns, isExportable=True)
table = ft_service.table()
result = table.insert(body=body).execute(num_retries=3)
table_id = result["tableId"]
logger.info("table created with id - " + table_id)
permissions = drive_service.permissions()
# give write access to the admin for all the created fusion tables
if admin is not None:
permissions.create(fileId=table_id, body={"emailAddress": admin, "type": "user", "role": "writer"},
sendNotificationEmail=False).execute(num_retries=3)
permissions.create(fileId=table_id,
body={"type": "anyone", "role": "reader", "allowFileDiscovery": False}).execute(num_retries=3)
if share_with is not None:
if isinstance(share_with, list):
for user_email in share_with:
if user_email.endswith("gmail.com"):
logger.info("setting drive permissions for user - " + user_email)
permissions.create(fileId=table_id,
body={"emailAddress": user_email, "type": "user", "role": "reader"},
sendNotificationEmail=False).execute(num_retries=3)
if isinstance(share_with, str) and share_with.endswith("gmail.com"):
logger.info("setting drive permissions for user - " + share_with)
permissions.create(fileId=table_id,
body={"emailAddress": share_with, "type": "user", "role": "reader"},
sendNotificationEmail=False).execute(num_retries=3)
if data is not None:
keys = [column["name"] for column in columns]
if user_settings is None:
user_settings = threading.current_thread().settings
temp_dir_path = user_settings.get(STORAGE.TEMPORARY.LOCAL, None)
if not os.path.exists(temp_dir_path):
os.makedirs(temp_dir_path)
filename = temp_dir_path + str(table_id) + ".csv"
with open(filename, 'wb') as upload_file:
dict_writer = csv.DictWriter(upload_file, keys)
dict_writer.writeheader()
dict_writer.writerows(data)
logger.info("created temporary file for upload. making call to import rows.")
upload_fd = open(filename, 'rb')
media_body = MediaIoBaseUpload(fd=upload_fd, mimetype="application/octet-stream")
result = table.importRows(tableId=table_id, media_body=media_body, startLine=1, isStrict=True,
encoding="UTF-8", delimiter=",").execute(num_retries=3)
logger.info("imported - " + str(result["numRowsReceived"]) + " rows")
return table_id
def create_ft_from_ftc(name, description, ftc, parties=None, admin=None, user_settings=None):
if isinstance(ftc, FeatureCollection) and ftc.get("columns", None) and isinstance(ftc["columns"], dict):
fields = sorted(ftc["columns"].keys())
columns = [{"name": str(field), "type": str(ftc["columns"][field])} for field in fields]
columns.append(
{"name": "x__geometry__x", "type": "LOCATION"}) # special property to access fusion table from maps API
data = []
for feature in ftc["features"]:
if feature["type"] == "Feature":
ft_prop = feature["properties"]
if feature["geometry"]["type"] == "Point":
point = feature["geometry"]["coordinates"]
location = KML.Point(KML.coordinates(str(point[0]) + "," + str(point[1])))
ft_prop["x__geometry__x"] = etree.tostring(location)
elif feature["geometry"]["type"] == "MultiPoint":
multipoint = feature["geometry"]["coordinates"]
geometries = [KML.Point(KML.coordinates(str(point[0]) + "," + str(point[1]))) for point in
multipoint]
location = KML.MultiGeometry()
for geometry in geometries:
location.append(geometry)
ft_prop["x__geometry__x"] = etree.tostring(location)
elif feature["geometry"]["type"] == "Polygon":
polygon = feature["geometry"]["coordinates"]
location = KML.Polygon()
for index in range(len(polygon)):
if index == 0:
location.append(KML.outerBoundaryIs(KML.LinearRing(KML.coordinates(
" ".join([str(point[0]) + "," + str(point[1]) for point in polygon[index]])))))
else:
location.append(KML.innerBoundaryIs(KML.LinearRing(KML.coordinates(
" ".join([str(point[0]) + "," + str(point[1]) for point in polygon[index]])))))
ft_prop["x__geometry__x"] = etree.tostring(location)
elif feature["geometry"]["type"] == "MultiPolygon":
multipolygon = feature["geometry"]["coordinates"]
location = KML.MultiGeometry()
for polygon in multipolygon:
kml = KML.Polygon()
for index in range(len(polygon)):
if index == 0:
kml.append(KML.outerBoundaryIs(KML.LinearRing(KML.coordinates(
" ".join([str(point[0]) + "," + str(point[1]) for point in polygon[index]])))))
else:
kml.append(KML.innerBoundaryIs(KML.LinearRing(KML.coordinates(
" ".join([str(point[0]) + "," + str(point[1]) for point in polygon[index]])))))
location.append(kml)
ft_prop["x__geometry__x"] = etree.tostring(location)
elif feature["geometry"]["type"] == "LineString":
linestring = feature["geometry"]["coordinates"]
location = KML.LineString(
KML.coordinates(" ".join([str(point[0]) + "," + str(point[1]) for point in linestring])))
ft_prop["x__geometry__x"] = etree.tostring(location)
elif feature["geometry"]["type"] == "MultiLineString":
multilinestring = feature["geometry"]["coordinates"]
location = KML.MultiGeometry()
for linestring in multilinestring:
location.append(KML.LineString(
KML.coordinates(" ".join([str(point[0]) + "," + str(point[1]) for point in linestring]))))
ft_prop["x__geometry__x"] = etree.tostring(location)
str_prop = {}
for key in ft_prop.keys():
str_prop[str(key) if isinstance(key, unicode) else key] = str(ft_prop[key]) if isinstance(
ft_prop[key], unicode) else ft_prop[key]
data.append(str_prop)
return create_table(name=name, description=description, columns=columns, data=data, share_with=parties,
admin=admin, user_settings=user_settings)
return None
def delete_table(table_id, user_settings=None):
"""
Deletes a fusion table
:param str table_id: identifier of the fusion table
:param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.
If one is not provided, then this method must be invoked by an EngineThread
which defines the settings
:raises BaseException: Any exception resulting from this operation
"""
try:
ft_keys = str(table_id).split(',')
for key in ft_keys:
ft_service = CredentialManager.get_fusion_tables_service(user_settings)
table = ft_service.table()
table.delete(tableId=key).execute(num_retries=3)
except BaseException as e:
logger.error(traceback.format_exc())
raise e
def read_table(table_id, user_settings=None):
"""
Reads a fusion table and returns its contants as a list of dictionaries
:param str table_id: identifier of the fusion table
:param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.
If one is not provided, then this method must be invoked by an EngineThread
which defines the settings
:raises BaseException: Any exception resulting from this operation
"""
try:
ft_service = CredentialManager.get_fusion_tables_service(user_settings)
query = ft_service.query()
table = query.sql(sql='SELECT * FROM ' + str(table_id), hdrs=False).execute(num_retries=3)
result_rows = []
columns = [str(column) for column in table['columns']]
rows = table['rows']
for row in rows:
result_row = {}
for index, cell in enumerate(row):
result_row[columns[index]] = str(cell) if isinstance(cell, unicode) else cell
result_rows.append(result_row)
return result_rows
except BaseException as e:
logger.error(traceback.format_exc())
raise e
def get_polygons_from_ft(table_id, name_attr, geometry_attr, user_settings=None):
# finds only the first polygon with outer boundary
rows = read_table(table_id=table_id, user_settings=user_settings)
polygons = []
for row in rows:
polygon = dict(name=row[name_attr], geometry=[])
max_polygon = []
feature = row[geometry_attr]
if 'type' not in feature:
feature = feature['geometry']
if feature["type"] == "Polygon":
outer_boundary = feature["coordinates"][0]
for vertex in outer_boundary:
polygon['geometry'].append(dict(lon=vertex[0], lat=vertex[1]))
elif feature["type"] == "MultiPolygon":
for boundary in feature["coordinates"]:
max_polygon.append(area({"type": "Polygon", "coordinates": boundary}))
index = np.argmax(np.array(max_polygon))
for vertex in feature["coordinates"][index][0]:
polygon['geometry'].append(dict(lon=vertex[0], lat=vertex[1]))
elif feature["type"] == "GeometryCollection":
geometries = feature['geometries']
for geometry in geometries:
if geometry["type"] in ["Polygon", "MultiPolygon"]:
max_polygon.append(area(geometry))
else:
max_polygon.append(0)
index = np.argmax(np.array(max_polygon))
max_polygon = []
feature = geometries[index]
if feature["type"] == "Polygon":
outer_boundary = feature["coordinates"][0]
for vertex in outer_boundary:
polygon['geometry'].append(dict(lon=vertex[0], lat=vertex[1]))
elif feature["type"] == "MultiPolygon":
for boundary in feature["coordinates"]:
max_polygon.append(area({"type": "Polygon", "coordinates": boundary}))
index = np.argmax(np.array(max_polygon))
for vertex in feature["coordinates"][index][0]:
polygon['geometry'].append(dict(lon=vertex[0], lat=vertex[1]))
if len(polygon['geometry']) > 0:
polygons.append(polygon)
return polygons
```
#### File: columbus-worker/colorker/utils.py
```python
import collections
import math
import time
from datetime import datetime as dt
import numpy
def current_time_millis():
"""
Gets the current time in milliseconds
:rtype: int
:return: current time in milliseconds
"""
return int(round(time.time() * 1000))
# noinspection PyBroadException
def caught(try_function, *args):
"""
Tries a function and checks if it throws an exception.
:param Callable try_function: callable object representing the function that must be tried
:param list args: arguments to pass to the callable function
:rtype: bool
:return: True if an exception was caught, False otherwise
"""
try:
try_function(*args)
return False
except BaseException:
return True
def is_number(s):
"""
Checks if the argument is a number
:param str s: Any string
:rtype: bool
:return: True if the string is a number, False otherwise
"""
return False if caught(float, s) else True
# finds the mean of a feature collection for a given property
def mean(prop, ftc):
"""
Finds the mean of a property in the given feature collection. NaN values are treated as zero.
:param str prop: name of the property in the feature collection
:param geojson.FeatureCollection ftc: the feature collection containing that property
:return: mean value of the property
:rtype: float
"""
features = ftc['features']
result = [(float(feature['properties'][prop]) if is_number(feature['properties'][prop]) and not math.isnan(
float(feature['properties'][prop])) else 0.0) for feature in features]
return numpy.mean(numpy.array(result))
# finds the standard deviation of a feature collection for a given property
def std(prop, ftc):
"""
Finds the standard deviation of a property in the given feature collection. NaN values are treated as zero.
:param str prop: name of the property in the feature collection
:param geojson.FeatureCollection ftc: the feature collection containing that property
:return: standard deviation value of the property
:rtype: float
"""
features = ftc['features']
result = [(float(feature['properties'][prop]) if is_number(feature['properties'][prop]) and not math.isnan(
float(feature['properties'][prop])) else 0.0) for feature in features]
return numpy.std(numpy.array(result))
def json_serial(obj):
"""
JSON serializer for objects not serializable by default json code.
TODO - Add implementation for other types that are not serializable
:param object obj: The object that needs to be serialized
:return: json serialization of the given object
:rtype: str
"""
if isinstance(obj, dt):
s = obj.strftime('%Y-%m-%d %H:%M:%S.%f')
tail = s[-7:]
f = round(float(tail), 3)
temp = "%.3f" % f
return "%s%s" % (s[:-7], temp[1:])
if hasattr(obj, '__dict__'):
return obj.__dict__
raise TypeError("Type not serializable")
def deep_update(source, overrides):
"""
Updates a nested dictionary or similar mapping. Modifies ``source`` in place with the key-value pairs in overrides
:param dict source: a dictionary that needs to be updated
:param dict overrides: a dictionary that provides the new keys and values
:rtype: dict
:return: updated source dictionary
"""
for key, value in overrides.iteritems():
if isinstance(value, collections.Mapping) and value:
returned = deep_update(source.get(key, {}), value)
source[key] = returned
else:
source[key] = overrides[key]
return source
def lists_to_html_table(a_list):
"""
Converts a list of lists to a HTML table. First list becomes the header of the table.
Useful while sending email from the code
:param list(list) a_list: values in the form of list of lists
:return: HTML table representation corresponding to the values in the lists
:rtype: str
"""
header = "<tr><th>%s</th></tr>" % ("</th><th>".join(a_list[0]))
body = ""
if len(a_list) > 1:
for sub_list in a_list[1:]:
body += "<tr><td>%s</td></tr>\n" % ("</td><td>".join(sub_list))
return "<table>%s\n%s</table>" % (header, body)
def dicts_to_html_table(a_list):
"""
Converts a list of dictionaries to a HTML table. Keys become the header of the table.
Useful while sending email from the code
:param list(dict) a_list: values in the form of list of dictionaries
:return: HTML table representation corresponding to the values in the lists
:rtype: str
"""
keys = sorted(a_list[0].keys())
header = "<tr><th>%s</th></tr>" % ("</th><th>".join(keys))
body = ""
if len(a_list) > 1:
for sub_dict in a_list:
body += "<tr><td>%s</td></tr>\n" % ("</td><td>".join([sub_dict[key] for key in keys]))
return "<table>%s\n%s</table>" % (header, body)
def dict_to_html_table(a_dict):
"""
Converts a dictionary to a HTML table. Keys become the header of the table.
Useful while sending email from the code
:param dict a_dict: key value pairs in the form of a dictionary
:return: HTML table representation corresponding to the values in the dictionary
:rtype: str
"""
body = ""
keys = sorted(a_dict.keys())
for key in keys:
body += "<tr><th>%s</th><td>%s</td></tr>\n" % (str(key), str(a_dict[key]))
return "<table>%s</table>" % body
``` |
{
"source": "jkacou/AutoSSL",
"score": 3
} |
#### File: autossl/ca_manager/base.py
```python
import logging
logger = logging.getLogger(__name__)
class CaManager(object):
def __init__(self, ca_config, staging=True, storage_api=None, **kwargs):
"""Base instance of interface with CA to deliver signed certificates
:param ca_config: Certificate Authority configuration instance
:type ca_config: ssl.CertificateAuthorityConfig
:param staging: Testing mode. Use staging or test CA instance (when available).
:type staging: bool
:param storage_api: storage API instance
:type storage_api: storage.base.Storage
"""
self.ca_config = ca_config
self.staging = staging
self.storage_api = storage_api
@property
def is_automated_renewal_supported(self):
"""Check is current CA supports automated renewal
:return: True, if this CA implementation supports automated renewal
:rtype: bool
"""
return False
def get_signed_certificate(self, ssl_blueprint=None, csr_path=None, servers_api=None):
"""Get PEM encoded certificate using current Certificate Authority implementation
:param ssl_blueprint:
:type ssl_blueprint: ssl.SslBlueprint
:param csr_path: path to CSR file
:type csr_path: pathlib.Path
:param servers_api: list of api instances to each server
:type servers_api: list(server.base.Server)
:return: PEM encoded signed certificate as bytes
:rtype: bytes
"""
raise NotImplementedError("Method must be overridden in child class")
```
#### File: AutoSSL/autossl/exception.py
```python
import json
class AutoSslException(Exception):
"""Generic exception for autossl
Allow to chain exceptions keeping track of origin exception
"""
def __init__(self, msg, original_exception=None):
message = msg
if original_exception:
message += ": %s" % original_exception
super(AutoSslException, self).__init__(message)
self.__cause__ = original_exception
self.__suppress_context__ = True
class HttpCodeException(AutoSslException):
def __init__(self, request_exception):
"""Exception raised when received Http response has an invalid http code
:param request_exception: requests HTTP exception
:type request_exception: requests.exceptions.HTTPError
"""
try:
response_body_json = request_exception.response.json()
response_body_text = json.dumps(response_body_json, indent=4, sort_keys=True)
except ValueError:
response_body_json = None
response_body_text = request_exception.response.text
exception_message = "HTTPError: => %s %s : %s" % (request_exception.request.method,
request_exception.request.url,
response_body_text)
super(HttpCodeException, self).__init__(exception_message, original_exception=request_exception)
self.status_code = request_exception.response.status_code
self.response_body_text = response_body_text
self.response_body_json = response_body_json
class NotFound(AutoSslException):
"""Requested data not found"""
pass
class SslBlueprintInconsistency(AutoSslException):
"""SSL blueprint definition contains inconsistencies"""
pass
class InvalidCertificate(AutoSslException):
"""Certificate is not matching expected criteria"""
pass
class InvalidTrustChain(InvalidCertificate):
"""Certificate is not compatible with CA certificate specified"""
pass
class KeyMismatch(InvalidCertificate):
"""Certificate does not match private key"""
pass
class ExpiredCertificate(InvalidCertificate):
"""Certificate is expiring"""
pass
class DefinitionMismatch(InvalidCertificate):
"""Certificate is not matching blueprint definition"""
pass
class CertificateNotFound(NotFound):
"""Requested certificate not present on server"""
pass
class DeployCertificateError(AutoSslException):
"""Unexpected error when trying to deploy new certificate"""
pass
```
#### File: autossl/storage/base.py
```python
class Storage(object):
def __init__(self, tracking_record_id=None, **kwargs):
r"""Base interface to store data
:param tracking_record_id: identified of tracking record if tracking system is used
:type tracking_record_id:
:param \**kwargs: key/value parameters needed for initialization
:type \**kwargs: dict
"""
self.tracking_record_id = tracking_record_id
def save_data(self, name, data_type, content=None, local_path=None, **kwargs):
r"""Save specified content in storage
:param name: name of the content to be stored on server side
:type name: str
:param data_type: type of data to save
:type data_type: ssl.DataType
:param content: content to be stored on server side
:type content: bytes
:param local_path: local path to a file to store
:type local_path: pathlib.Path or str
:param \**kwargs: optional key/value parameters from blueprint to save data
:type \**kwargs: dict
Either one of `content` or `local_path` must be specified but not both
"""
raise NotImplementedError("Must be overridden in storage specific implementation.")
def retrieve_data(self, name, data_type, **kwargs):
r"""Retrieve data from storage
:param name: identifier of data to retrieve
:type name: str
:param data_type: type of data to retrieve
:type data_type: ssl.DataType
:param \**kwargs: optional key/value parameters from blueprint to retrieve data
:type \**kwargs: dict
:return: requested data
:rtype: bytes
:raise exception.NotFound: when requested data are missing in storage
"""
raise NotImplementedError("Must be overridden in storage specific implementation.")
```
#### File: autossl/storage/local.py
```python
import logging
import os
import shutil
from .. import exception, util
from . import base
logger = logging.getLogger(__name__)
class LocalFileStorage(base.Storage):
def __init__(self, path, tracking_record_id=None, **kwargs):
super(LocalFileStorage, self).__init__(tracking_record_id=tracking_record_id, **kwargs)
self.path = util.Path(os.path.expandvars(path))
if not self.path.is_dir():
raise IOError("Invalid folder path specified: '%s'" % self.path)
def save_data(self, name, content=None, local_path=None, **kwargs):
output_file_path = self.path / name
if content is not None:
if local_path:
logger.warning("local_path path '{}' ignored as content also specified.".format(local_path))
output_file_path.write_bytes(content)
else:
shutil.copy(str(local_path), str(output_file_path))
def retrieve_data(self, name, **kwargs):
file_path = self.path / name
if not file_path.exists():
raise exception.NotFound("Path %s does not exists." % file_path)
return file_path.read_bytes()
```
#### File: autossl/tracking/base.py
```python
from enum import Enum
import logging
logger = logging.getLogger(__name__)
class TrackingType(Enum):
"""list of tracking types supported."""
# End-to-end flow of certificate renewal and deployment on servers
Renewal = 'renewal'
# Simply deploy existing valid certificate on new or outdated servers
Synchronize = 'synchronize'
class Tracking(object):
def __init__(self, ssl_blueprint_path, **kwargs):
r"""Api to tracking server for specified input ssl blueprint
:param ssl_blueprint_path: local path to ssl blueprint
:type ssl_blueprint_path: pathlib.Path
:param \**kwargs: generic key/value parameters
:type \**kwargs: dict
"""
self.ssl_blueprint_path = ssl_blueprint_path
def create(self, tracking_type, servers=None):
"""Create a tracking record with details of current SSL blueprint
:param tracking_type: Type of tracking. Can be used to customized tracking record content.
:type tracking_type: TrackingType
:param servers: List of servers in scope of the action. All servers from config if None specified here.
:type servers: list
:return: Identifier for the created record
:rtype: str
"""
logger.debug("Nothing to do for 'create' tracking default implementation.")
def save_data(self, name, data_type, local_path=None, content=None, **kwargs):
r"""Save input data in tracking system
:param name: name of the file to attach to the tracking record
:type name: str
:param data_type: type of data to save
:type data_type: ssl.DataType
:param local_path: local path to file to attach to the tracking record
:type local_path: pathlib.Path
:param content: content of the file to attach to the tracking record
:type content: bytes
:param \**kwargs: generic key/value parameters
:type kwargs: dict
"""
logger.debug("Nothing to do for 'save_data' tracking default implementation.")
def update(self, message):
"""Update tracking record
:param message: text to add to tracking record
:type message: str
"""
logger.debug("Nothing to do for 'update' tracking default implementation.")
def refresh(self, record_id):
"""Update current tracking instance with last changes from tracking record on server side
:param record_id: identifier of the record to refresh
"""
logger.debug("Nothing to do for 'refresh' tracking default implementation.")
def retrieve_data(self, name=None, data_type=None, **kwargs):
r"""Retrieve specified data from tracking system
:param name: Name of file/data to retrieve
:type name: str
:param data_type: type of data to retrieve
:type data_type: ssl.DataType
:param \**kwargs: generic key/value parameters
:type kwargs: dict
:return: file content
:rtype: bytes
"""
logger.debug("Nothing to do for 'retrieve_data' tracking default implementation.")
def close_for_failure(self, message):
"""Specify action is completed with a failed status
:param message: custom message
:type message: str
"""
logger.debug("Nothing to do for 'close_for_failure' tracking default implementation.")
def close_for_success(self, message):
"""Specify action is completed with a success status
:param message: custom message
:type message: str
"""
logger.debug("Nothing to do for 'close_for_success' tracking default implementation.")
```
#### File: AutoSSL/autossl/util.py
```python
import importlib
import logging
try:
# Python 3
from pathlib import Path # noqa: F401
except ImportError:
# Python 2
from pathlib2 import Path # noqa: F401
import requests
import shutil
from six import PY2 # noqa: F401
import tempfile
from . import exception
logger = logging.getLogger(__name__)
def check_http_response_ok(response):
"""Validate http response code
all codes not in 2xx will raise an exception
:param response: requests Http response
:type response: requests.Response
:return: same http response
:rtype: requests.Response
:raise exception.HttpCodeException: if http status code in not in 2xx
"""
try:
response.raise_for_status()
except requests.exceptions.HTTPError as ex:
raise exception.HttpCodeException(ex)
return response
def str_to_class(class_path):
"""Dynamically import and return class type from full module and class path
:param class_path:
:type class_path: str
:return: Type of the class to instantiate
:rtype: type
:raise ImportError: if module does not exist
:raise AttributeError: if class not found in specified module
"""
module_name, class_name = class_path.rsplit('.', 1)
try:
module_ = importlib.import_module(module_name)
try:
return getattr(module_, class_name)
except AttributeError:
logging.exception('Class %s not found in module %s.' % (class_name, module_name))
raise
except ImportError:
logging.exception('Module %s does not exist.' % module_name)
raise
class TempDir(object):
def __init__(self, path=None):
"""Create Temporary directory that can be used with context manager for automated deletion at __exit__
:param path: local path. If None, temporary folder will be created at `__enter__` thanks to `tempfile.mkdtemp()`
:type path: str or pathlib.Path
"""
self.input_path = path
self.path = None
def __enter__(self):
self.path = Path(str(self.input_path) if self.input_path else tempfile.mkdtemp())
if not self.path.is_dir():
raise IOError("Specified path {} is not a directory.".format(self.path))
return self
def __exit__(self, *args):
if self.path.exists() and self.path.is_dir():
shutil.rmtree(str(self.path), ignore_errors=True)
```
#### File: jkacou/AutoSSL/setup.py
```python
import os
from pkg_resources import parse_version
from sys import version_info as py_version
from setuptools import setup, find_packages
from setuptools import __version__ as setuptools_version
HERE = os.path.abspath(os.path.dirname(__file__))
# retrieve package information
about = {}
with open(os.path.join(HERE, 'autossl', '__version__.py'), 'rb') as f:
exec(f.read().decode('utf-8'), about)
with open(os.path.join(HERE, 'README.rst'), 'rb') as readme_file:
readme = readme_file.read().decode('utf-8')
install_requires = [
'six',
'cryptography',
'pyyaml',
'requests',
]
extras_require = {
# acme
'acme': ['acme'],
# servers
# tracking
# storage
'git': ['GitPython'],
}
# ability to install automatically all dependencies
extras_require['all'] = list(set(value for sublist in extras_require.values() for value in sublist))
def has_environment_marker_range_operators_support():
"""Code extracted from 'pytest/setup.py'
https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31
The first known release to support environment marker with range operators
it is 17.1, see: https://setuptools.readthedocs.io/en/latest/history.html#id113
"""
return parse_version(setuptools_version) >= parse_version('17.1')
# Compatibility with old version of setuptools
if has_environment_marker_range_operators_support():
extras_require[':python_version<"3.4"'] = ['enum34', 'pathlib2']
elif py_version < (3, 4):
install_requires.extend(['enum34', 'pathlib2'])
setup(
name=about['__title__'],
version=about['__version__'],
author=about['__author__'],
author_email=about['__author_email__'],
description=about['__description__'],
long_description=readme,
long_description_content_type='text/markdown',
url=about['__url__'],
license=about['__license__'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
packages=find_packages(),
entry_points={
'console_scripts': [
'autossl = autossl.__main__:main'
]
},
platforms='Unix; MacOS X',
install_requires=install_requires,
extras_require=extras_require,
)
```
#### File: tests/ca_manager/test_local.py
```python
import pytest
from cryptography.hazmat.backends import default_backend
from cryptography.x509.oid import NameOID
from cryptography import x509
from autossl.ca_manager import local as local_ca
from autossl.storage import local as local_storage
from autossl import ssl
from tests import util as tests_util
@pytest.fixture(scope="function")
def storage(tmp_path):
yield local_storage.LocalFileStorage(path=str(tmp_path))
@pytest.fixture(scope="function")
def ca_manager(storage):
ca_key, ca_crt = tests_util.create_ca_certificate(ca_name='Autossl')
storage.save_data(name='ca_key', data_type=ssl.DataType.PrivateKey, content=ca_key)
storage.save_data(name='ca_crt', data_type=ssl.DataType.Certificate, content=ca_crt)
yield local_ca.LocalCa(ca_config=None,
staging=True,
storage_api=storage,
ca_private_key='ca_key',
ca_certificate='ca_crt')
def test_automated_renewal_supported(ca_manager):
assert ca_manager.is_automated_renewal_supported is True
@pytest.mark.parametrize('common_name', ['test.autossl.com'])
def test_get_signed_certificate(ca_manager, common_name, tmp_path):
_, csr_path = ssl.generate_csr(name='autossl_cert',
common_name=common_name,
output_path=tmp_path)
crt = ca_manager.get_signed_certificate(ssl_blueprint=None, csr_path=csr_path, servers_api=None)
# check CRT
x509_object = x509.load_pem_x509_certificate(data=crt, backend=default_backend())
assert len(x509_object.subject.get_attributes_for_oid(NameOID.COMMON_NAME)) == 1
assert x509_object.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value == common_name
```
#### File: AutoSSL/tests/test_command_line_parser.py
```python
import logging
import pytest
import autossl
from autossl import util, __main__
from tests import util as tests_util
def test_display_version():
assert __main__.display_version() == "Autossl version {version}".format(version=autossl.__version__)
@pytest.mark.parametrize('cli_credentials, credentials_file, result', [
# full user_password credentials from cli, no credential file
(['credential_1:my_username:my_pa*ss:wor$d'], None,
{'credential_1': {'raw_content': 'my_username:my_pa*ss:wor$d'}}),
# no password from cli, no credential file
(['credential_1:my_username'], None,
{'credential_1': {'raw_content': 'my_username'}}),
# just credential name from cli, no credential file
(['credential_1'], None,
{'credential_1': {'raw_content': None}}),
# full user_password credentials from cli, credential file with cli overriding file for credential_1
(['credential_1:my_username:my_pa*ss:wor$d'], tests_util.DATA_PATH / 'credentials_file',
{'credential_1': {'raw_content': 'my_username:my_pa*ss:wor$d'},
'credential_4_from_file': {'username': 'username_4', 'password': '<PASSWORD>', 'extra_parameter': 'extra_param'},
'credential_5_from_file': {'api_key': '499ebd66-29d6-4992-9ec4-5511a92d248e', 'api_id': '12345'}}),
])
def test_parse_credentials(cli_credentials, credentials_file, result):
assert __main__.parse_credentials(cli_credentials=cli_credentials, credentials_file=credentials_file) == result
def test_main_parser(capsys):
# check action is mandatory
with pytest.raises(SystemExit):
__main__.parse_arguments([])
if util.PY2:
assert 'error: too few arguments' in capsys.readouterr().err
else:
assert 'error: the following arguments are required: action' in capsys.readouterr().err
# default values (as action is mandatory, use 'version' that requires no additional parameter)
parser = __main__.parse_arguments(['version'])
assert parser.credentials == []
assert parser.debug is logging.INFO
assert parser.staging is False
assert parser.config is None
assert parser.blueprint is None
# parser = __main__.parse_arguments(['-u', r'domain\user:password', '--debug', 'version'])
# assert parser.credentials == r'domain\user:password'
# assert parser.debug is True
# TODO
```
#### File: AutoSSL/tests/test_ssl_blueprint.py
```python
from datetime import datetime
import pytest
from autossl import ssl, exception
from tests import util as tests_util
@pytest.mark.parametrize('cert1,cert2,is_same', [
# fully identical
({'common_name': 'name1', 'sans': ['name2', 'name3'], 'expiration': datetime(2020, 2, 3, 14, 38, 21)},
{'common_name': 'name1', 'sans': ['name2', 'name3'], 'expiration': datetime(2020, 2, 3, 14, 38, 21)},
True),
# fully identical but different san order
({'common_name': 'name1', 'sans': ['name2', 'name3'], 'expiration': datetime(2020, 2, 3, 14, 38, 21)},
{'common_name': 'name1', 'sans': ['name3', 'name2'], 'expiration': datetime(2020, 2, 3, 14, 38, 21)},
True),
# fully common_name
({'common_name': 'name1', 'sans': ['name2', 'name3'], 'expiration': datetime(2020, 2, 3, 14, 38, 21)},
{'common_name': 'name4', 'sans': ['name2', 'name3'], 'expiration': datetime(2020, 2, 3, 14, 38, 21)},
False),
# different san
({'common_name': 'name1', 'sans': ['name2', 'name3'], 'expiration': datetime(2020, 2, 3, 14, 38, 21)},
{'common_name': 'name1', 'sans': ['name2', 'name4'], 'expiration': datetime(2020, 2, 3, 14, 38, 21)},
False),
# different expiration
({'common_name': 'name1', 'sans': ['name2', 'name3'], 'expiration': datetime(2020, 2, 3, 14, 38, 21)},
{'common_name': 'name1', 'sans': ['name2', 'name3'], 'expiration': datetime(2021, 2, 3, 14, 38, 21)},
False),
])
def test_ssl_blueprint___eq__(cert1, cert2, is_same):
assert (ssl.SslCertificate(**cert1) == ssl.SslCertificate(**cert2)) is is_same
def test_missing_blueprint():
with pytest.raises(IOError):
ssl.SslBlueprint('dummy/path')
def test_ssl_blueprint_no_server():
ssl_blueprint = ssl.SslBlueprint(tests_util.DATA_PATH / 'tst.ov.example.com_no-server.yaml')
assert ssl_blueprint.name == 'auto_tst.autossl.example.com'
assert ssl_blueprint.certificate.certificate_type == 'OV'
assert ssl_blueprint.certificate.certificate_authority == 'Sectigo'
assert ssl_blueprint.certificate.common_name == 'tst.autossl.example.com'
assert len(ssl_blueprint.servers) == 0
def test_ov_ssl_blueprint():
ssl_blueprint = ssl.SslBlueprint(tests_util.DATA_PATH / 'tst.ov.example.com.yaml')
assert ssl_blueprint.name == 'auto_tst.autossl.example.com'
assert ssl_blueprint.certificate.certificate_type == 'OV'
assert ssl_blueprint.certificate.certificate_authority == 'Sectigo'
assert ssl_blueprint.certificate.common_name == 'tst.autossl.example.com'
assert ssl_blueprint.certificate.renewal_delay == 30
assert len(ssl_blueprint.servers) == 1
assert len(ssl_blueprint.certificate.sans) == 5
assert ssl_blueprint.certificate.organization['company_name'] == 'Autossl corporation'
assert ssl_blueprint.certificate.organization['street_address'] == 'Newbury street'
assert ssl_blueprint.certificate.organization['city'] == 'Boston'
assert ssl_blueprint.certificate.organization['state'] == 'Massachusetts'
assert ssl_blueprint.certificate.organization['postal_code'] == '02115'
assert ssl_blueprint.certificate.organization['country_code'] == 'US'
def test_dv_ssl_blueprint():
ssl_blueprint = ssl.SslBlueprint(tests_util.DATA_PATH / 'tst.dv.example.com.yaml')
assert ssl_blueprint.name == 'auto_tst.autossl.example.com'
assert ssl_blueprint.certificate.certificate_type == 'DV'
assert ssl_blueprint.certificate.certificate_authority == 'LetsEncrypt'
assert ssl_blueprint.certificate.common_name == 'tst.autossl.example.com'
assert ssl_blueprint.certificate.renewal_delay == 30
assert len(ssl_blueprint.servers) == 2
assert len(ssl_blueprint.certificate.sans) == 5
assert ssl_blueprint.certificate.organization is None
def test_ssl_blueprint_with_global_config():
ssl_blueprint = ssl.SslBlueprint(
yaml_path=tests_util.DATA_PATH / 'tst.ov.example.com_minimal.yaml',
global_config_path=tests_util.DATA_PATH / 'global_config.yaml',
)
assert ssl_blueprint.name == 'auto_tst.autossl.example.com'
assert ssl_blueprint.certificate.certificate_type == 'DV'
assert ssl_blueprint.certificate.certificate_authority == 'LetsEncrypt'
assert ssl_blueprint.certificate.common_name == 'tst.autossl.example.com'
assert ssl_blueprint.certificate.renewal_delay == 30
assert len(ssl_blueprint.servers) == 1
assert len(ssl_blueprint.certificate.sans) == 5
assert ssl_blueprint.certificate.organization['company_name'] == 'Autossl corporation'
assert ssl_blueprint.certificate.organization['street_address'] == 'Newbury street'
assert ssl_blueprint.certificate.organization['city'] == 'Boston'
assert ssl_blueprint.certificate.organization['state'] == 'Massachusetts'
assert ssl_blueprint.certificate.organization['postal_code'] == '02115'
assert ssl_blueprint.certificate.organization['country_code'] == 'US'
def test_ssl_blueprint_no_common_name(tmp_path):
blueprint_content = u"""
---
name: auto_tst.autossl.example.com
servers:
- type: autossl.server.local.LocalServer
parameters:
path: /etc/ssl/my_certificates
certificate:
type: DV
certificate_authority: LetsEncrypt
san:
- tst.autossl.example.com
- uat.tst.autossl.example.com
- pit.tst.autossl.example.com
...
"""
blueprint_path = tmp_path / 'blueprint.yaml'
blueprint_path.write_text(blueprint_content, encoding='utf-8')
ssl_blueprint = ssl.SslBlueprint(str(blueprint_path))
assert ssl_blueprint.certificate.common_name is None
assert len(ssl_blueprint.certificate.sans) == 3
def test_ssl_blueprint_no_san(tmp_path):
blueprint_content = u"""
---
name: auto_tst.autossl.example.com
servers:
- type: autossl.server.local.LocalServer
parameters:
path: /etc/ssl/my_certificates
certificate:
type: DV
certificate_authority: LetsEncrypt
common_name: tst.autossl.example.com
...
"""
blueprint_path = tmp_path / 'blueprint.yaml'
blueprint_path.write_text(blueprint_content, encoding='utf-8')
ssl_blueprint = ssl.SslBlueprint(str(blueprint_path))
assert ssl_blueprint.certificate.common_name == 'tst.autossl.example.com'
assert len(ssl_blueprint.certificate.sans) == 0
def test_ssl_blueprint_no_commmon_name_no_san(tmp_path):
blueprint_content = u"""
---
name: auto_tst.autossl.example.com
servers:
- type: autossl.server.local.LocalServer
parameters:
path: /etc/ssl/my_certificates
certificate:
type: DV
certificate_authority: LetsEncrypt
...
"""
blueprint_path = tmp_path / 'blueprint.yaml'
blueprint_path.write_text(blueprint_content, encoding='utf-8')
with pytest.raises(ValueError):
ssl.SslBlueprint(str(blueprint_path))
@pytest.mark.parametrize('common_name,is_valid', [
('test2_valid-test.example.com', True),
('*.example.com', True),
(' test.example.com', False),
('test.example.com ', False),
('test.*.com', False),
('%1.example.com', False),
])
def test_ssl_blueprint_validate_common_name(tmp_path, common_name, is_valid):
blueprint_content = u"""
---
name: auto_tst.autossl.example.com
servers:
- type: autossl.server.local.LocalServer
parameters:
path: /etc/ssl/my_certificates
certificate:
type: DV
certificate_authority: LetsEncrypt
common_name: '{}'
...
""".format(common_name)
blueprint_path = tmp_path / 'blueprint.yaml'
blueprint_path.write_text(blueprint_content, encoding='utf-8')
if is_valid:
ssl.SslBlueprint(str(blueprint_path))
else:
with pytest.raises(ValueError):
ssl.SslBlueprint(str(blueprint_path))
def test_get_domains():
ssl_blueprint = ssl.SslBlueprint(tests_util.DATA_PATH / 'tst.dv.example.com.yaml')
assert ssl_blueprint.domains == {
'tst.autossl.example.com',
'uat.tst.autossl.example.com',
'pit.tst.autossl.example.com',
'cit.tst.autossl.example.com',
'mgt.tst.autossl.example.com'
}
def test_is_domain_matching():
assert ssl.is_domain_matching('test.example.com', 'test.example.com')
assert ssl.is_domain_matching('test.example.com', 'test.example.com', True)
assert ssl.is_domain_matching('test.example.com', 'test.example.com', False)
assert ssl.is_domain_matching('test.example.com', 'test2.example.com') is False
assert ssl.is_domain_matching('test.example.com', 'test2.example.com', True) is False
assert ssl.is_domain_matching('test.example.com', 'test2.example.com', False) is False
assert ssl.is_domain_matching('test.example.com', '*.example.com') is True
assert ssl.is_domain_matching('test.example.com', '*.example.com', True) is False
assert ssl.is_domain_matching('test.example.com', '*.example.com', False) is True
def test_is_domain_list_matching():
assert ssl.is_domain_list_matching(['test.example.com'], ['test.example.com'])
assert ssl.is_domain_list_matching(['test.example.com'], ['test.example.com', 'test2.example.com'])
assert ssl.is_domain_list_matching(['test.example.com', 'test2.example.com'], ['test.example.com']) is False
assert ssl.is_domain_list_matching(['test.example.com', 'test2.example.com'], ['*.example.com'])
assert ssl.is_domain_list_matching(
['test.example.com', 'test2.example.com'], ['*.example.com'], exact_match=True) is False
def test_get_config():
ssl_blueprint = ssl.SslBlueprint(tests_util.DATA_PATH / 'tst.dv.example.com.yaml')
assert ssl_blueprint.get_config(name='tracking', path=['dummy_path'], default=[]) == []
assert ssl_blueprint.get_config(name='tracking', path=None, default=None) is None
assert ssl_blueprint.get_config(name='storage', path=None, default=None) == {
'credentials': 'credential_1',
'data': [{'type': 'key'}, {'type': 'csr'}, {'type': 'crt'}],
'parameters': {
'git_url': 'https://git.autossl.com/git/scm/ssl/certificates.git',
'config_user_name': 'Test User',
'config_user_email': '<EMAIL>',
},
'type': 'autossl.storage.gitscm.GitStorage'}
def test_check_chain_of_trust(tmp_path):
crt_path = tmp_path / 'local.crt'
ca_crt_path = tmp_path / 'local_ca.crt'
ca_key_path = tmp_path / 'local_ca.key'
# generate CA certificate
key, crt = tests_util.create_ca_certificate(ca_name='Autossl')
ca_crt_path.write_bytes(crt)
ca_key_path.write_bytes(key)
# sign a new certificate with the CA
_, csr_path = ssl.generate_csr(name='autossl_cert', common_name='test.autossl.com', output_path=str(tmp_path))
crt_content = tests_util.create_signed_certificate(
csr_path=csr_path,
ca_crt_path=ca_crt_path,
ca_key_path=ca_key_path,
)
crt_path.write_bytes(crt_content)
# valid trust chain should no raise any error
ssl.check_chain_of_trust(
chain_of_trust=[crt.decode('utf-8')], # Chain of trust comes normally from SSL blueprint so it not in bytes
crt_path=crt_path,
)
# generate self-signed certificate
self_signed_key_path, self_signed_crt_path = tests_util.create_self_signed_certificate(
crt_name="self_signed_local.crt",
output_path=tmp_path,
common_name='self_signed.test.autossl.com',
)
# self signed certificate should not be validated by this CA
with pytest.raises(exception.InvalidTrustChain):
ssl.check_chain_of_trust(
chain_of_trust=[crt.decode('utf-8')], # Chain of trust comes normally from SSL blueprint so it not in bytes
crt_path=self_signed_crt_path,
)
```
#### File: AutoSSL/tests/test_subca.py
```python
import os
import shutil
import tempfile
import collections
# external packages
import pytest
# autossl imports
from autossl import manager, ssl, util
from tests import util as tests_util
CertificateKeyPair = collections.namedtuple('CertificateKeyPair', 'key crt')
@pytest.fixture(scope='module')
def subca_manager():
# create temporary directories use by blueprint
temp_crt_dir = tempfile.mkdtemp()
os.environ['AUTOSSL_CRT_PATH'] = temp_crt_dir
temp_crt_dir_2 = tempfile.mkdtemp()
os.environ['AUTOSSL_CRT_PATH_2'] = temp_crt_dir_2
temp_storage_dir = tempfile.mkdtemp()
os.environ['AUTOSSL_STORAGE_PATH'] = temp_storage_dir
temp_tracking_dir = tempfile.mkdtemp()
os.environ['AUTOSSL_TRACKING_PATH'] = temp_tracking_dir
yield manager.SslManager(
global_config=None,
blueprint_path=tests_util.DATA_PATH / 'subca.example.com.yaml',
credentials=None,
staging=True
)
# cleanup generated artifacts
shutil.rmtree(temp_crt_dir, ignore_errors=True)
shutil.rmtree(temp_crt_dir_2, ignore_errors=True)
shutil.rmtree(temp_storage_dir, ignore_errors=True)
shutil.rmtree(temp_tracking_dir, ignore_errors=True)
@pytest.fixture(scope="module")
def ca_keypair_path():
key, crt = tests_util.create_ca_certificate(ca_name='Autossl')
ca_temp_dir = util.Path(tempfile.mkdtemp())
ca_crt_path = ca_temp_dir / 'local_ca.crt'
ca_key_path = ca_temp_dir / 'local_ca.key'
ca_crt_path.write_bytes(crt)
ca_key_path.write_bytes(key)
yield CertificateKeyPair(ca_key_path, ca_crt_path)
# cleanup temp folders
shutil.rmtree(str(ca_temp_dir), ignore_errors=True)
@pytest.fixture(scope="module")
def subca_keypair_path(subca_manager, ca_keypair_path):
storage_path = util.Path(os.environ['AUTOSSL_STORAGE_PATH'])
key_path = storage_path.joinpath(subca_manager.ssl_blueprint.name + '.key')
csr_path = storage_path.joinpath(subca_manager.ssl_blueprint.name + '.csr')
crt_path = storage_path.joinpath(subca_manager.ssl_blueprint.name + '.crt')
bundle_path = storage_path.joinpath(subca_manager.ssl_blueprint.name + '.bundle')
# generate sub-CA certificate request and key
subca_manager.request_renewal(
force=True, # disable interactive user input
)
# simulate CA signing
crt_content = tests_util.create_signed_certificate(
csr_path=csr_path,
ca_crt_path=ca_keypair_path.crt,
ca_key_path=ca_keypair_path.key,
certificate_validity_days=100
)
crt_path.write_bytes(crt_content)
bundle_path.write_bytes(ca_keypair_path.crt.read_bytes() + crt_content)
yield CertificateKeyPair(key_path, crt_path)
# valid certificate request
def test_subca_ok(tmp_path, subca_manager, subca_keypair_path):
# check sub-CA certificate
subca_crt_path, _ = subca_manager.get_and_check_artifacts()
subca_cert = ssl.SslCertificate().init_from_x509(x509_path=subca_crt_path)
assert subca_cert.common_name == 'subca.example.com'
# sign a new certificate with the sub-CA
_, csr_path = ssl.generate_csr(name='leafcert',
common_name='domain.subca.example.com',
sans=['domain1.subca.example.com', 'domain2.subca.example.com'],
output_path=str(tmp_path))
crt_content = tests_util.create_signed_certificate(
csr_path=csr_path,
ca_crt_path=subca_keypair_path.crt,
ca_key_path=subca_keypair_path.key
)
crt_path = tmp_path / 'leafcert.crt'
crt_path.write_bytes(crt_content)
# check trust chain
bundle_path = os.path.join(os.environ['AUTOSSL_STORAGE_PATH'], subca_manager.ssl_blueprint.name + '.bundle')
assert os.system('openssl verify -CAfile %s %s' % (bundle_path, crt_path)) == 0
# invalid certificate request, domains are not part of the authorized names
def test_subca_ko(tmp_path, subca_manager, subca_keypair_path):
# sign a new certificate with the sub-CA
_, csr_path = ssl.generate_csr(name='invalidcert',
common_name='domain.other.example.com',
sans=['domain.other.example.com'],
output_path=str(tmp_path))
crt_content = tests_util.create_signed_certificate(
csr_path=csr_path,
ca_crt_path=subca_keypair_path.crt,
ca_key_path=subca_keypair_path.key
)
crt_path = tmp_path / 'invalidcert.crt'
crt_path.write_bytes(crt_content)
# check trust chain
bundle_path = os.path.join(os.environ['AUTOSSL_STORAGE_PATH'], subca_manager.ssl_blueprint.name + '.bundle')
assert os.system('openssl verify -CAfile %s %s' % (bundle_path, crt_path)) != 0
```
#### File: tests/tracking/test_base.py
```python
import pytest
from autossl.tracking import base
from tests import util as tests_util
@pytest.fixture(scope="module")
def base_tracking():
ssl_blueprint_name = 'tst.ov.example.com.yaml'
ssl_blueprint_path = tests_util.DATA_PATH / ssl_blueprint_name
base_tracking_instance = base.Tracking(ssl_blueprint_path)
# basic checks
assert base_tracking_instance.ssl_blueprint_path is not None
yield base_tracking_instance
def test_create_basic(base_tracking):
base_tracking.create(tracking_type=base.TrackingType.Renewal)
def test_save_data(base_tracking):
base_tracking.save_data(name=None, data_type=None, local_path=None,
content=None, extra_param1='value1', extra_param2='value2')
def test_update(base_tracking):
base_tracking.update(message=None)
def test_refresh(base_tracking):
base_tracking.refresh(record_id=None)
def test_retrieve_data(base_tracking):
base_tracking.retrieve_data(name=None, data_type=None, extra_param1='value1', extra_param2='value2')
def test_close_for_failure(base_tracking):
base_tracking.close_for_failure(message=None)
def test_close_for_success(base_tracking):
base_tracking.close_for_success(message=None)
``` |
{
"source": "jkadowaki/Cosmological-Parameters",
"score": 2
} |
#### File: Cosmological-Parameters/scripts/multiplot.py
```python
import os
import re
import subprocess
import glob as g
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate as interp
################################################################################
class ImageFollower(object):
'update image in response to changes in clim or cmap on another image'
def __init__(self, follower):
self.follower = follower
def __call__(self, leader):
self.follower.set_cmap(leader.get_cmap())
self.follower.set_clim(leader.get_clim())
################################################################################
def plot_OM_OL(arr, fname, peak=-1):
plt.clf()
plt.rcParams['savefig.facecolor'] = "1."
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size='11')
# Probability Density
arr[np.where(arr==0.)] = np.nan
im = plt.imshow(arr, interpolation='None', origin='lower', extent=[0,2.5,-1,3])
plt.colorbar(im)
if peak > 0: im.set_clim(vmin=0, vmax=peak)
# q0 Lines
rot = 27.5 # Degrees
plt.plot([0.0, 2.5],[-0.5,0.8], 'w--')
plt.plot([0.0, 2.5],[0.0, 1.3], 'w--')
plt.plot([0.0, 2.5],[0.5, 1.8], 'w--')
plt.text(2.00, 0.80, '$q_0=-0.5$', rotation=rot, color='w')
plt.text(2.00, 1.30, '$q_0=0.0$', rotation=rot, color='w')
plt.text(2.00, 1.80, '$q_0=0.5$', rotation=rot, color='w')
plt.text(1.25, 0.85, '$\mathrm{Decelerating}$', rotation=rot, color='w')
plt.text(1.25, 1.03, '$\mathrm{Accelerating}$', rotation=rot, color='w')
# Omega_tot Line
rot = -46.5 # Degrees
plt.plot([0.0, 2.0],[1.0, -1.0], 'w-')
plt.text(1.70, -0.70, '$\Omega_\mathrm{tot}=1$',rotation=rot, color='w')
plt.text(1.20, -0.37, '$\mathrm{Open}$', rotation=rot, color='w')
plt.text(1.20, -0.20, '$\mathrm{Closed}$', rotation=rot, color='w')
# Omega_Lambda=0 Line
rot = 5 # Degrees
plt.plot([0.0, 2.5],[0.0, 0.0], 'w:')
plt.plot([0.0, 1.35, 1.8, 2.5],[0.0, 0.0, 0.02, 0.11], 'w-')
plt.text(2.10, -0.12, '$\Omega_\mathrm{\Lambda}=0$', color='w')
plt.text(1.40, -0.08, '$\mathrm{Recollapes}$', rotation=rot, color='w')
plt.text(1.40, 0.13, '$\mathrm{Expands \, to \, Infinity}$', rotation=rot, color='w')
# No Big Bang
plt.text(0.10, 2.75, '$\mathrm{No \, Big \, Bang}$', rotation=65)
# Tick Label Size
plt.xticks(size=15)
plt.yticks(size=15)
if prob_tag in fname:
h0 = re.findall('[-+]?\d+[\.]?\d*', fname)[-1]
print '\t', fname, '\t', np.max(arr[~np.isnan(arr)]), '\t', np.sum(arr[~np.isnan(arr)])
plt.title('$H_0 = %s$' % h0, fontsize=20)
plt.xlabel('$\Omega_\mathrm{M}$', fontsize=20)
plt.ylabel('$\Omega_\Lambda$', fontsize=20)
plt.savefig(fname, bbox_inches='tight')
Nr = 4
Nc = 5
fig = figure()
cax = fig.add_axes([0.2, 0.08, 0.6, 0.04])
w = 0.4
h = 0.22
ax = []
images = []
vmax = -1.
num = 0
for i in range(Nr):
for j in range(Nc):
pos = [0.075 + j*1.1*w, 0.18 + i*1.2*h, w, h]
a = fig.add_axes(pos)
if i > 0:
a.set_xticklabels([])
# Make some fake data with a range that varies
# somewhat from one plot to the next.
data = np.genfromtxt(file[num])
dd = ravel(data)
# Manually find the max of all colors for
# use in setting the color scale.
vmax = max(vmax, amax(dd))
images.append(a.imshow(data, interpolation='None', origin='lower', extent=[0,2.5,-1,3]))
ax.append(a)
num += 1
# Set the first image as the master, with all the others
# observing it for changes in cmap or norm.
norm = colors.Normalize(vmin=vmin, vmax=vmax)
for i, im in enumerate(images):
im.set_norm(norm)
if i > 0:
images[0].callbacksSM.connect('changed', ImageFollower(im))
# The colorbar is also based on this master image.
fig.colorbar(images[0], cax, orientation='horizontal')
################################################################################
def main():
global prob_tag, plot_ext
dir = 'fine_h0_resolution/' # All files to be in directory dir
text_ext = '.txt' # Any data/text file to be saved with extention text_ext
plot_ext = '.0.png' # Any plots to be saved with extension plot_ext
mov_ext = '.gif'
prob_tag = 'prob_' # All prob. dist. to start with prefix prob_tag
# Assumes file located in dir & named with prefix cs_tag & extension text_ext
# Assumes information on z-, m- bin #, and h0 value in filename (in order)
# e.g. 'chisq_z1_m1_h70.txt'
# Code currently breaks if h0 is a non-integer value.
prob_list = sorted(g.glob(os.path.join(dir, prob_tag + '*' + text_ext)))
z, m, h0 = np.transpose(np.array([re.findall('[-+]?\d+[\.]?\d*', os.path.basename(f)) \
for f in prob_list]))
# Iterate through every redshift, stellar mass bin
for z_param in np.unique(z):
for m_param in np.unique(m):
print '\nz: ', z_param, ';\t m: ', m_param, '\n'
index = np.where((z_param==z) * (m_param==m) == True)[0]
# Iterate through every chisq file
for num, file in enumerate([prob_list[i] for i in index]):
# Normalize P(H0 | mu_0)
h0_path = os.path.join(dir, h0_tag + 'z' + z_param + '_m' + m_param + text_ext)
h0_plot = h0_path.replace(text_ext, plot_ext)
h0_prob, h0_factor = normalize(np.array(h0_prob))
np.savetxt(h0_path, h0_prob)
plot_H0(h0[index], h0_prob, h0_plot)
#"""
# Create 3D normalized cubes
prob = np.array(OmOL_prob)
norm = np.sum(prob[~np.isnan(prob)])
peak_value = np.max(prob[~np.isnan(prob)]/norm)
print 'Prob Sum:\t', norm
print 'Peak Value:\t', peak_value, '\n'
for num, frame in enumerate(OmOL_prob):
np.savetxt(prob_list[num], frame/norm)
plot_OM_OL(frame/norm, plot_list[num], peak_value)
#"""
# Normalize P(Omega_M, Omega_Lambda | mu_0)
OM_OL_path = os.path.join(dir, om_ol_tag + 'z' + z_param + '_m' + m_param + text_ext)
OM_path = os.path.join(dir, om_tag + 'z' + z_param + '_m' + m_param + text_ext)
OL_path = os.path.join(dir, ol_tag + 'z' + z_param + '_m' + m_param + text_ext)
OM_OL_plot = OM_OL_path.replace(text_ext, plot_ext)
OM_OL_prob, fac = normalize(np.array(OM_OL_prob))
np.savetxt(OM_OL_path, OM_OL_prob)
plot_OM_OL(OM_OL_prob, OM_OL_plot)
OM_OL_prob[np.isnan(OM_OL_prob)]=0.
np.savetxt(OM_path, np.sum(OM_OL_prob, axis=0))
np.savetxt(OL_path, np.sum(OM_OL_prob, axis=1))
# Movie Iterating through different H0 in each z,m-bin
imlist = os.path.join(dir, prob_tag + 'z' + z_param + '_m' \
+ m_param + '_h*' + plot_ext)
mov = imlist.replace(plot_ext, mov_ext).replace('_h*','')
run('convert -delay 05x100 %s %s' % (imlist, mov))
################################################################################
if __name__ == "__main__":
main()
################################################################################
``` |
{
"source": "jkadowaki/paper_plots",
"score": 3
} |
#### File: redshift_paper/code/high_spin_halos2.py
```python
import sys
sys.path.append("../code")
from read_data import read_data
import matplotlib.cm as cm
from matplotlib.lines import Line2D
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
# Set matplotlib to use LaTeX
font = {'size': 20}
plt.rc('text', usetex=True)
plt.rc('font', **font)
################################################################################
def best_fit(x, m, b):
return m * x + b
################################################################################
def mag_vs_re(df, fname='mag_vs_re.pdf'):
slope, intercept, r_value, p_value, std_err = stats.linregress(df["Mr"], df["Re"])
if fname:
label = f"$R_e = {slope:.3f} M_r + {intercept:.3f}$" if intercept>0 else \
f"$R_e = {slope:.3f} M_r {intercept:.3f}$"
fig = plt.figure(figsize=(5,5))
plt.scatter(df["Mr"], df["Re"], marker='.', s=5)
plt.plot( [min(df["Mr"]), max(df["Mr"])],
[best_fit(min(df["Mr"]), slope, intercept),
best_fit(max(df["Mr"]), slope, intercept)],
c='r', label=label)
plt.xlim(-13.75,-17.5)
plt.ylim( 1, 6.5)
plt.xlabel(r"$M_r \, \mathrm{(mag)}$")
plt.ylabel(r"$R_e \, \mathrm{(kpc)}$")
plt.legend(loc='upper left', fontsize=12)
plt.savefig(fname, bbox_inches='tight')
plt.close()
print("\nStarndarization Formula: " + label)
return slope, intercept
################################################################################
def stellar_mass(gr_color, M_r):
M_sun = 4.65 # Willmer, 2018, ApJS, 236, 47
L_r = 10**(0.4 * (M_sun - M_r)) # Solar Luminosity
return L_r * 10**(1.629 * gr_color - 0.792)
def axisratio_vs_re(df, select=None, bins=4, standardized=True,
fname='ar_vs_re.pdf', mag_vs_re_fname=None):
# Format Scientific Notation
def format_scinot(num, precision=2):
exponent = int(np.floor(np.log10(num)))
factor = round(num/10**exponent, precision)
return r"{0:.2f} \times 10^{1}".format(factor, exponent)
re = "new_Re" if standardized else "Re"
xlabel = r"$r'_e$" if standardized else r"$r_e \, (\mathrm{kpc})$"
# Standardized Radius + Stellar Mass
m, b = mag_vs_re(df, fname=mag_vs_re_fname) # Regression Parameters
bins = 1 if standardized else bins # Number of Bins
df["new_Re"] = df["Re"] / best_fit(df["Mr"], m, b) # Standardized Re
df_high = df.loc[ df["Density"] == "High" ].sort_values("Re", ascending=False)
df_low = df.loc[ df["Density"] == "Low" ].sort_values("Re", ascending=False)
# Create Figure
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(8,8))
size_range = np.array([min(df[re]), max(df[re])])
ar_range = np.array([0.3,1])
plt.subplots_adjust(hspace=0.1)
# Plot data
ax1.scatter(df_high[re], df_high["b/a"],
color='green', ec='k', marker='^', s=20*df_high["Re"]**2,
lw=[1 if size<3.5 else 2.5 for size in df_high["Re"]] )
ax2.scatter(df_low[re], df_low["b/a"],
color='orange', ec='k', marker='o', s=20*df_low["Re"]**2,
lw=[1 if size<3.5 else 2.5 for size in df_low["Re"]] )
# Plot regression
for ax, df_env in ((ax1, df_high), (ax2, df_low)):
slope, intercept, r_value, p_value, std_err = stats.linregress(df_env[re], df_env["b/a"])
expectation = best_fit(size_range, slope, intercept)
if p_value < 0.05:
ax.plot(size_range, expectation, c='green')
print("\n\nEnvironment: ", df_env["Density"].iloc[0])
print("y = m x + b: <b/a> =", slope, "r'e +", intercept)
print("Corr Coeff : r =", r_value)
print("P-value: p =", p_value)
print("Standard Dev: d =", std_err, "\n")
print(df_env[["NAME", "Re", re, "b/a"]])
# Set Limits
ax1.set_ylim(ar_range)
ax2.set_ylim(ar_range)
ax2.set_xlim(0.6, 1.6)
# Set Axis Labels
ax2.set_xlabel(xlabel, fontsize=24)
ax1.set_ylabel(r"$b/a$", fontsize=24)
ax2.set_ylabel(r"$b/a$", fontsize=24)
# Unique Markers in Legend Only (Uses Markers w/o Bold Outline)
legend_elements = [ Line2D( [0], [0], marker='^', color='g', mec='k', lw=1,
label=r"$\mathrm{High}$", markersize=np.sqrt(20*1.5**2)),
Line2D( [0], [0], marker='o', color='orange', mec='k', lw=0,
label=r"$\mathrm{Low}$", markersize=np.sqrt(20*1.5**2)) ]
plt.legend(handles=legend_elements,
title=r"$\mathrm{Environment \, Density}$",
bbox_to_anchor=(0.7, -0.25),
fontsize=14, fancybox=True, shadow=True, ncol=2)
# Save Figure
plt.savefig(fname, bbox_inches='tight')
plt.close()
################################################################################
if __name__ == "__main__":
df = read_data("../data/kadowaki2019.tsv")
df = df.loc[df["Re"]<9.0]
axisratio_vs_re(df, select=None, standardized=True,
fname='../plots/ar_vs_re.pdf',
mag_vs_re_fname='../plots/mag_vs_re.pdf')
```
#### File: redshift_paper/code/high_spin_halos.py
```python
import sys
sys.path.append("../code")
from read_data import read_data
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
# Set matplotlib to use LaTeX
font = {'size': 20}
plt.rc('text', usetex=True)
plt.rc('font', **font)
################################################################################
def best_fit(x, m, b):
return m * x + b
################################################################################
def mag_vs_re(df, fname='mag_vs_re.pdf'):
slope, intercept, r_value, p_value, std_err = stats.linregress(df["Mr"], df["Re"])
if fname:
label = f"$R_e = {slope:.3f} M_r + {intercept:.3f}$" if intercept>0 else \
f"$R_e = {slope:.3f} M_r {intercept:.3f}$"
fig = plt.figure(figsize=(5,5))
plt.scatter(df["Mr"], df["Re"], marker='.', s=5)
plt.plot( [min(df["Mr"]), max(df["Mr"])],
[best_fit(min(df["Mr"]), slope, intercept),
best_fit(max(df["Mr"]), slope, intercept)],
c='r', label=label)
plt.xlim(-13.75,-17.5)
plt.ylim( 1, 6.5)
plt.xlabel(r"$M_r \, \mathrm{(mag)}$")
plt.ylabel(r"$R_e \, \mathrm{(kpc)}$")
plt.legend(loc='upper left', fontsize=12)
plt.savefig(fname, bbox_inches='tight')
plt.close()
print("\nStarndarization Formula:" + label)
return slope, intercept
################################################################################
def stellar_mass(gr_color, M_r):
M_sun = 4.65 # Willmer, 2018, ApJS, 236, 47
L_r = 10**(0.4 * (M_sun - M_r)) # Solar Luminosity
return L_r * 10**(1.629 * gr_color - 0.792)
def axisratio_vs_re(df, select=None, bins=5, standardized=True,
fname='ar_vs_re.pdf', mag_vs_re_fname=None):
# Format Scientific Notation
def format_scinot(num, precision=2):
exponent = int(np.floor(np.log10(num)))
factor = round(num/10**exponent, precision)
return r"{0:.2f} \times 10^{1}".format(factor, exponent)
re = "new_Re" if standardized else "Re"
xlabel = r"$\mathrm{Standardized} \, R_e$" if standardized else r"$R_e \, (\mathrm{kpc})$"
# Standardized Radius + Stellar Mass
m, b = mag_vs_re(df, fname=mag_vs_re_fname) # Regression Parameters
bins = 1 if standardized else bins # Number of Bins
df["new_Re"] = df["Re"] / best_fit(df["Mr"], m, b) # Standardized Re
df["M_star"] = stellar_mass(df["g-r"], df["Mr"]) # Stellar Mass
df["color"] = ['g' if obj=="High" else 'orange' for obj in df["Density"]]
df["marker"] = ['^' if obj=="High" else 'o' for obj in df["Density"]]
# Compute Stellar Mass Bounds
min_mass = int(np.floor(np.log10(min(df["M_star"])))) # Lower Limit
max_mass = int(np.ceil(np.log10(max(df["M_star"])))) # Upper Limit
bin_edge = np.logspace(min_mass, max_mass, bins+1) # Bounds
colors = cm.rainbow(np.linspace(0, 1, bins)) # Colors
print("Bins:", bins, bin_edge)
# Create Figure
fig, ax = plt.subplots(figsize=(8,8))
size_range = np.array([min(df[re]), max(df[re])])
ar_range = np.array([0.3,1])
# Iterate through all bins & plot with respective format
for idx in range(bins):
# Select appropriate data frame
print(f"\n\nBin {idx}: {bin_edge[idx]:.2e} to {bin_edge[idx+1]:.2e}")
#try:
df_select = df.loc[ (df["M_star"] < bin_edge[idx+1]) & (df["M_star"] >= bin_edge[idx])]
print(df_select)
# Plot individual data
for index, udg in df_select .iterrows():
label = r"$\mathrm{{{0}}}$".format(udg["Density"]) if standardized else \
r"${0} \leq M_* < {1}$".format( format_scinot(bin_edge[idx], 2),
format_scinot(bin_edge[idx+1],2) )
ax.scatter( udg[re], udg["b/a"],
color=udg["color"] if standardized else colors[idx],
marker=udg["marker"],
label=label)
# Plot regression
for df_env in (df.loc[df["Density"]=="High"], df.loc[df["Density"]=="Low"]):
slope, intercept, r_value, p_value, std_err = stats.linregress(df_env[re], df_env["b/a"])
expectation = best_fit(size_range, slope, intercept)
ax.plot(size_range, expectation, c=df_env.iloc[0]["color"] if standardized else colors[idx])
#except:
# print("No objects in this bin.")
# Set Limits
ax.set_ylim(ar_range)
# Set Axis Labels
ax.set_xlabel(xlabel, fontsize=24)
ax.set_ylabel(r"$b/a$", fontsize=24)
# Unique Markers in Legend Only (Uses Markers w/o Bold Outline)
handles, labels = ax.get_legend_handles_labels()
unique_index = [labels.index(l) for l in set(labels)]
unique = [(h,l) for i, (h,l) in enumerate(zip(handles, labels))
if i in unique_index]
if standardized:
ax.legend(*zip(*unique),
title=r"$\mathrm{Environment \, Density}$",
loc='lower right',
fontsize=14, fancybox=True, shadow=True)
else:
ax.legend(*zip(*unique),
title=r"$\mathrm{Stellar \, Mass}$",
bbox_to_anchor=(0.5, -0.125),
fontsize=14, fancybox=True, shadow=True, ncol=2)
# Save Figure
plt.savefig(fname, bbox_inches='tight')
plt.close()
################################################################################
if __name__ == "__main__":
df = read_data("../data/kadowaki2019.tsv")
df = df.loc[df["Re"]<9.0]
axisratio_vs_re(df, select=None, bins=4, standardized=True,
fname='../plots/ar_vs_re.pdf',
mag_vs_re_fname='../plots/mag_vs_re.pdf')
```
#### File: redshift_paper/code/pair_plot.py
```python
from __future__ import print_function
import warnings
warnings.simplefilter("ignore", UserWarning)
warnings.filterwarnings("ignore", category=UserWarning)
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
import numpy as np
import os
import pandas as pd
from scipy import stats
import seaborn as sns
import sys
sys.path.append('../code')
from read_data import read_data
# GLOBAL VARIABLES
hist_color = 'Green'
hist_idx = -1
################################################################################
"""
PAIR_PLOT.py
Goal: Creates
Methods:
(1)
(2)
(3)
"""
################################################################################
def change_color(three_colors=False):
global hist_color
if not three_colors:
if hist_color == 'Orange':
hist_color = 'Green'
else:
hist_color = 'Orange'
else:
if hist_color == 'Green':
hist_color = 'Orange'
elif hist_color == 'Orange':
hist_color = 'Blue'
else:
hist_color = 'Green'
################################################################################
def get_label_color_marker(df, efeat="LocalEnv"):
# Local Environment
if efeat == "LocalEnv":
label = [ r'$\mathrm{Dense}$' if val=='Dense' else \
r'$\mathrm{Sparse}$' if val=='Sparse' else \
r'$\mathrm{Unconstrained}$' for val in df[efeat] ]
color = [ 'lime' if val=='Dense' else \
'orange' if val=='Sparse' else \
'blue' for val in df[efeat] ]
marker = [ '^' if val=='Dense' else \
'o' if val=='Sparse' else \
'x' for val in df[efeat] ]
legend_title = r"$\mathrm{Local \, Environment}$"
# Global/Cluster Environment
elif efeat == "GlobalEnv":
label = [ r'$\mathrm{Cluster}$' if val=='Cluster' else \
r'$\mathrm{Non}$-$\mathrm{Cluster}$' if val=='Non-Cluster' else \
r'$\mathrm{Unconstrained}$' for val in df[efeat] ]
color = [ 'lime' if val=='Cluster' else \
'orange' if val=='Non-Cluster' else \
'blue' for val in df[efeat] ]
marker = [ '^' if val=='Cluster' else \
'o' if val=='Non-Cluster' else \
'x' for val in df[efeat] ]
legend_title = r"$\mathrm{Coma \, Membership}$"
# Environment Density
elif efeat == "Density":
label = [ r'$\mathrm{High}$' if val=='High' else \
r'$\mathrm{Low}$' if val=='Low' else \
r'$\mathrm{Unconstrained}$' for val in df[efeat] ]
color = [ 'lime' if val=='High' else \
'orange' if val=='Low' else \
'blue' for val in df[efeat] ]
marker = [ '^' if val=='High' else \
'o' if val=='Low' else \
'x' for val in df[efeat] ]
legend_title = r"$\mathrm{Density}$"
else:
label = [None] * len(df)
color = ['b'] * len(df)
marker = ['x'] * len(df)
legend_title = None
return label, color, marker, legend_title
################################################################################
def color_plots(df, xfeat, yfeat, efeat="GlobalEnv", mfeat="Re", flag="UV", plot_fname='color.pdf'):
"""
Creates color-color or color-magnitude plots.
df (DataFrame)
xfeat (str): Color or Magnitude Feature
yfeat (str): Color or Magnitude Feature
efeat (str): Environment Feature (i.e., 'LocalEnv' or 'GlobalEnv')
mfeat (str): Feature to base Marker Size
flag (str): Feature to Specify Detection ("Yes") or Upper Limit ("No")
plot_fname (str): Filename of Plot
"""
# Remove NaNs & Sorts Data to Plot Big Markers First
df = df[[xfeat, yfeat, mfeat, efeat, flag]].dropna()
df = df.sort_values(by=[mfeat], ascending=False)
# Select Legend Labels, Marker Sizes & Colors & Shapes
small_thres = 1.5 # kpc
large_thres = 3.5 # kpc
fontsize = 30
marker_size = 40
marker_edge = 'k'
thin_line = 0.3
thick_line = 2.25
label, color, marker, legend_title = get_label_color_marker(df, efeat)
# Scatter Plot
fig = plt.figure(figsize=(12,9))
ax = fig.add_subplot(111)
for idx in range(len(df)):
if df[flag].iloc[idx] == "No" and 'Mnuv' in yfeat:
plt.arrow( df[xfeat].iloc[idx], df[yfeat].iloc[idx],
# Dictates Arrow Size/End point
dx=0, dy=-df[mfeat].iloc[idx]/max(df[mfeat]),
color = color[idx],
head_width = (df[mfeat].iloc[idx]/10)**2,
# Add Bold Outline around `mfeat` Values above `large_thres`
linewidth=thick_line if df[mfeat].iloc[idx]>large_thres else thin_line )
else:
plt.scatter( df[xfeat].iloc[idx],
df[yfeat].iloc[idx],
label = label[idx],
color = color[idx],
marker = marker[idx],
# Marker Radius Scales Linearly with `mfeat` Value
s = marker_size * (df[mfeat].iloc[idx])**2,
edgecolors=marker_edge,
# Add Bold Outline around `mfeat` Values above `large_thres`
linewidth=thick_line if df[mfeat].iloc[idx]>large_thres else thin_line )
plt.tick_params(which='both', direction='in', pad=10, labelsize=fontsize)
plt.minorticks_on()
xlabel = xfeat.replace('Mnuv','M_\mathrm{NUV}')
plt.xlabel(('$' if '-' in xlabel else '$M_') + xlabel +'$', fontsize=fontsize)
plt.ylabel('$'+ yfeat.replace('Mnuv','M_\mathrm{NUV}') +'$', fontsize=fontsize)
plt.legend(title=legend_title)
# Unique Markers in Legend Only (Uses Markers w/o Bold Outline)
handles, labels = ax.get_legend_handles_labels()
handles = handles[::-1]
labels = labels[::-1]
unique = [ (h, l) for i, (h, l) in enumerate(zip(handles, labels)) \
if l not in labels[:i] ]
legend = ax.legend( *zip(*unique), loc='lower right',
prop={'size':22},
title_fontsize=24,
fancybox=True,
frameon=True,
title=legend_title )
#legend.set_title(fontsize=24)
# Set Marker Size in Legend to `small_thres` Size
for legend_handle in legend.legendHandles:
legend_handle._sizes = [marker_size * small_thres**2]
# Sets Axes Line Width
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(1)
# Removes Border Whitespace & Save
plt.tight_layout()
plt.savefig(plot_fname, format='pdf')
plt.close()
################################################################################
def main(data_file='kadowaki2019.tsv',
data_directory='../data',
plot_directory='../plots',
pair_name='pair.pdf',
color_name=True,
plot_pair=False,
udg_only=False,
local_env=True,
density=False,
verbose=False,
hack_color_fix=False):
"""
Args:
(str) data_directory:
(str) plot_directory:
(str)
(bool) update_dat_file:
"""
############################################################################
# Data File
param_file = os.path.join(data_directory, data_file)
prefix = ('udgs' if udg_only else 'candidates') + '_' + \
('density' if density else ('local' if local_env else 'global')) + '_'
# Save to Plots
pair_plot = os.path.join(plot_directory, prefix + pair_name)
three_colors = not udg_only and local_env
global hist_color
if three_colors:
hist_color = 'Green'
else:
if hack_color_fix:
change_color(three_colors=three_colors)
############################################################################
df_results = read_data(param_file, udg_only=udg_only, field='Coma')
efeat = 'Density' if density else ('LocalEnv' if local_env else 'GlobalEnv')
df_results = df_results.sort_values(by=[efeat])
df_results = df_results.reset_index(drop=True)
############################################################################
if color_name:
color_features = ["NUV-r", "g-r", "g-z"]
mag_features = ["Mz"]
for idx1,color in enumerate(color_features):
# Color-Magnitude Plots
for mag in mag_features:
if mag not in color:
# File Name
cm_fname = os.path.join(plot_directory,
prefix + color + "_" + mag + ".pdf")
# Plot
color_plots(df_results, xfeat=mag, yfeat=color,
efeat=efeat, mfeat="Re", flag="UV",
plot_fname=cm_fname)
# Color-Color Plots
for idx2,color2 in enumerate(color_features):
if (idx1 < idx2) and all([c not in color2 for c in color.split('-')]):
# File Name
cc_fname = os.path.join(plot_directory,
prefix + color + "_" + color2 + ".pdf")
# Plot
color_plots(df_results, xfeat=color2, yfeat=color,
efeat=efeat, mfeat="Re", flag="UV",
plot_fname= cc_fname)
############################################################################
def remove_nan(args, verbose=False):
nan_idx = np.array([])
for a in args:
# Ensures All List-like Data Structures are Pandas Series
if type(a) == np.ndarray:
a = pd.Series(a)
if verbose:
print("\t", a.name)
# Appends All Indices Corresponding to NaNs
nan_idx = np.concatenate((nan_idx, a[pd.isna(a)].index.values))
if verbose:
print(nan_idx)
# Stores Arguments with NaNs Removed
new_args = []
for a in args:
# Ensures All List-like Data Structures are Pandas Series
if type(a) == np.ndarray:
a = pd.Series(a)
new_args.append( a.drop(nan_idx, errors="ignore") )
return new_args
############################################################################
def remove_repeating_lists(args, verbose=False):
new_args = []
for a in args:
if np.size(np.unique(a)) > 1:
new_args.append(a)
if verbose:
print(new_args)
return new_args
############################################################################
def hist(*args, **kwargs):
if verbose:
print("\nhist")
new_args = remove_repeating_lists(remove_nan(args, verbose=verbose), verbose=verbose)
large_args = []
min_y, max_y = 9999999, -9999999
for a in new_args:
if len(a) > 4:
large_args.append(a)
if min(a) < min_y:
min_y = min(a)
if max(a) > max_y:
max_y = max(a)
if verbose:
print(large_args)
if len(large_args):
hist_bins = np.linspace(min(a), max(a), 6)
dist = sns.distplot(*large_args, rug=True, kde=True, hist=False, norm_hist=True, color=hist_color, bins=hist_bins)
sns.distplot(*large_args, kde=False, hist=True, norm_hist=True, color=hist_color, bins=hist_bins)
axes = dist.axes
hist_val = np.histogram(*large_args, bins=hist_bins, density=True)[0]
ylimit = np.max(hist_val)
curr_ylim = axes.get_ylim()[1]
if curr_ylim > 5*ylimit or ylimit > curr_ylim:
axes.set_ylim(0, ylimit/0.8)
axes.xaxis.set_tick_params(labelsize=50)
axes.yaxis.set_tick_params(labelsize=50)
change_color(three_colors = not udg_only and local_env)
############################################################################
def scatter(*args,**kwargs):
plt.scatter(*args, **kwargs, s=24, edgecolor='k', linewidth=0.1)
############################################################################
if plot_pair:
sns.set(style="ticks", color_codes=True)
features = ["sepMpc", "MUg0", "Mg", "g-r", "Re", "b/a", "n", efeat]
markers = ['^', 'o'] if not three_colors else ['^', 'o', 'x']
col_list = ['lime', 'Orange'] if not three_colors else ['lime', 'Orange', 'Blue' ]
cmap_list = ['Greens', 'Oranges'] if not three_colors else ['Greens', 'Oranges', 'Blues']
env_list = sorted(df_results[efeat].unique())
col_dict = dict(zip(env_list, col_list))
cmap_dict = dict(zip(env_list, cmap_list))
ax = sns.PairGrid(data=df_results[features],
hue=efeat ,
palette=col_dict,
diag_sharey=False,
hue_kws={"marker":markers})
############################################################################
def contours(*args,**kwargs):
if verbose:
print("\ncontours")
new_args = remove_repeating_lists(remove_nan(args, verbose=verbose), verbose=verbose)
if len(new_args) > 1:
print(df_results[efeat])
idx = args[0].index.values[0]
label = df_results[efeat].iloc[idx]
cmap = cmap_dict.get(label)
if verbose:
print(idx, label, cmap)
if idx != 1: # Exclude Unconstrained
sns.kdeplot(*new_args, cmap=cmap, shade_lowest=True)
############################################################################
ax.map_diag(hist)
ax.map_lower(contours)
ax.map_upper(scatter)
# LEGEND LABELS
if density:
env_replacements = {'High':r"$\mathrm{High}$",
'Low':r"$\mathrm{Low}$"}
elif local_env:
env_replacements = {'Dense':r"$\mathrm{Dense}$",
'Sparse':r"$\mathrm{Sparse}$"}
else:
env_replacements = {'Cluster':r"$\mathrm{Cluster}$",
'Non-Cluster':r"$\mathrm{Non}$-$\mathrm{Cluster}$"}
if not udg_only:
env_replacements["Unconstrained"] = r"$\mathrm{Unconstrained}$"
# Replace Current Labels for LaTeX Labels
labels = [env_replacements[env_label] for env_label in ax._legend_data.keys()]
# LEGEND HANDLES
handles = ax._legend_data.values()
# ADD LEGEND & Fix Placement
ax.fig.legend(handles=handles, labels=labels,
loc='lower center', ncol=3, fontsize=15,
frameon=True, edgecolor='k', markerscale=2.5, shadow=True,
title=r"$\mathrm{Environment \, Density}$" if density else \
r"$\mathrm{Local \, Environment}$" if local_env else \
r"$\mathrm{Coma \, Membership}$",
title_fontsize=20)
ax.fig.subplots_adjust(top=1.05, bottom=0.12)
# AXIS LABELS
replacements = { # Magnitudes
"Mnuv":r'$M_\mathrm{NUV}$',
"Mg":r'$M_g$',
"Mr":r'$M_r$',
"Mz":r'$M_z$',
# Colors
"NUV-g":r'$\mathrm{NUV} - g$',
"g-r":r'$g - r$',
"r-z":r'$r - z$',
# Intrinsic Properties
"n":r'$n$',
"Re":r'$r_e \, \left( \mathrm{kpc} \right)$',
"MUg0":r'$\mu \left(g,0\right) \, \left( \mathrm{mag} \, \mathrm{arcsec}^{-2} \right)$',
"b/a":r'$b/a$',
# Extrinsic Properties
"cz":r'$cz \, \left( \mathrm{km/s} \right)$',
"sepMpc":r'$r_\mathrm{proj} \, \left( \mathrm{Mpc} \right)$',
"NUM500":r'$\mathrm{\# \, of \, Massive \, Companions}$' }
for x_idx in range(len(features)-1):
for y_idx in range(len(features)-1):
ax.axes[x_idx][y_idx].tick_params(labelsize=15)
xlabel = ax.axes[x_idx][y_idx].get_xlabel()
ylabel = ax.axes[x_idx][y_idx].get_ylabel()
if xlabel in replacements.keys():
ax.axes[x_idx][y_idx].set_xlabel(replacements[xlabel], fontsize=20)
if ylabel in replacements.keys():
ax.axes[x_idx][y_idx].set_ylabel(replacements[ylabel], fontsize=20)
# Save & Display Figure
plt.savefig(pair_plot, bbox_inches = 'tight')
plt.close()
################################################################################
if __name__ == '__main__':
print("\n----------------- ALL CANDIDATES -----------------")
print("\n~~~~~LOCAL~~~~~~")
main(plot_pair=True,
color_name=True,
udg_only=False,
local_env=True,
verbose=False,
hack_color_fix=True)
print("\n~~~~~~GLOBAL~~~~~~")
main(plot_pair=True,
color_name=True,
udg_only=False,
local_env=False,
hack_color_fix=False)
print("\n~~~~~~DENSITY~~~~~~")
main(plot_pair=True,
color_name=True,
udg_only=False,
local_env=False,
density=True,
hack_color_fix=False)
print("\n-------------------- ALL UDGS --------------------")
print("\n~~~~~~LOCAL~~~~~~")
main(plot_pair=True,
color_name=True,
udg_only=True,
local_env=True,
hack_color_fix=False)
print("\n~~~~~~GLOBAL~~~~~~")
main(plot_pair=True,
color_name=True,
udg_only=True,
local_env=False,
hack_color_fix=False)
print("\n~~~~~~DENSITY~~~~~~")
main(plot_pair=True,
color_name=True,
udg_only=True,
local_env=False,
density=True,
hack_color_fix=False)
``` |
{
"source": "jkadowaki/SemEval2018",
"score": 3
} |
#### File: jkadowaki/SemEval2018/nn.py
```python
from sklearn.metrics import jaccard_similarity_score, f1_score, fbeta_score
import pandas as pd
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.optimizers import Adam
from keras.layers import Dense, Embedding, LSTM, Bidirectional, TimeDistributed, SpatialDropout1D, GRU, GlobalMaxPool1D, SimpleRNN
from keras.models import Sequential
from keras.layers.normalization import BatchNormalization
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint, TensorBoard
import numpy as np
import warnings
warnings.filterwarnings('ignore')
################################################################################
emotions = ["anger", "anticipation", "disgust", "fear", "joy", "love",
"optimism", "pessimism", "sadness", "surprise", "trust"]
emotion_to_int = {"0": 0, "1": 1, "NONE": 0.0}
tweet = "Tweet"
################################################################################
def split_xy(train_data, test_data):
"""
Splits dataset into tokens & labels and computers the vocabulary size and
the padded sequence size.
param data: (dataframe) Labelled Training or Testing set
returns: Sequence of Tokens, Labels, Sequence Length, Vocabular Size
"""
# Split the dataset into feature and labels
train_X = train_data[tweet]
train_Y = train_data[emotions]
test_X = test_data[tweet]
test_Y = test_data[emotions]
# Define Tokens
all_X = pd.concat([train_X, test_X])
tokenizer = Tokenizer(split=' ')
tokenizer.fit_on_texts(all_X)
# Convert Tweets to Token Sequence
train_token = tokenizer.texts_to_sequences(train_X)
test_token = tokenizer.texts_to_sequences(test_X)
# Compute Sequence Size & Vocabulary Size
maxlen = max([len(x.split()) for x in all_X])
vocab_size = len(tokenizer.word_index) + 1
# Pad Token Sequences
train_padded = pad_sequences(train_token, maxlen=maxlen)
test_padded = pad_sequences(test_token, maxlen=maxlen)
return train_padded, train_Y, test_padded, test_Y, maxlen, vocab_size
################################################################################
def lstm_model(vocab_size=2000, input_length=32):
############# HYPER-PARAMETER TUNING #############
# LSTM Parameters
embed_dim = 128
lstm_units = 64
lstm2_units = 64
s_dropout = 0.0 #0.1
dropout = 0.0 #0.1
recurrent_dropout = 0.0 #0.1
# Activation, Cost, Optimization, Metrics Parameters
activation = 'sigmoid'
loss = 'binary_crossentropy'
optimizer = 'adam'
metrics = ['accuracy']
####################################################
model = Sequential()
model.add( Embedding(vocab_size,
embed_dim,
input_length=input_length,
mask_zero=True))
model.add(BatchNormalization())
model.add( Bidirectional( GRU(lstm_units,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
return_sequences=True
) ))
model.add( Bidirectional( GRU(lstm2_units,
dropout=dropout,
recurrent_dropout=recurrent_dropout) ))
model.add(Dense(len(emotions), activation=activation))
model.compile(loss=loss, optimizer=optimizer, metrics=metrics)
# Tracks all Callbacks
callbacks = []
# Saves Best Model Parameters
checkpointer = ModelCheckpoint(filepath='/tmp/weights.hdf5',
verbose=1,
save_best_only=True)
callbacks.append(checkpointer)
# Returns LSTM Model & Best Model Parameters
return model, {'callbacks': callbacks}
################################################################################
def train_and_predict(train_data: pd.DataFrame,
test_data: pd.DataFrame) -> pd.DataFrame:
##### HYPER-PARAMETER TUNING #####
# Model Fitting Parameters
epochs = 1
batch_size = 4
threshold = 0.331
####################################
# Split Features/Labels & Compute Sequence Length + Size of Vocabulary
train_X, train_Y, test_X, test_Y, train_maxlen, train_vocab_size = split_xy(train_data, test_data)
# Bidirectional LSTM Model
model, params = lstm_model(vocab_size=train_vocab_size,
input_length=train_maxlen)
# Fit Model
history = model.fit(train_X, train_Y,
epochs=epochs,
batch_size=batch_size,
verbose=2,
#validation_data=(test_X, test_Y),
shuffle=True,
**params)
# Make Predictions for the Dev Set
test_predictions = (1-threshold + model.predict(test_X))
# Saves a Copy of the Original Probabilities
test_prob = test_data.copy()
test_prob[emotions] = test_predictions
# Classifies Each Run on the Dev Set
test_predictions = test_data.copy()
test_predictions[emotions] = test_prob[emotions].astype(int)
return test_prob, test_predictions
################################################################################
def print_metrics(df1, df2):
sim = jaccard_similarity_score(df1, df2)
f1_micro = f1_score(df1, df2, average='micro')
f1_macro = f1_score(df1, df2, average='macro')
print("\taccuracy: {:.4f}".format(sim), "\t",
"f1-micro: {:.4f}".format(f1_micro), "\t",
"f1-macro: {:.4f}".format(f1_macro))
return sim
################################################################################
if __name__ == "__main__":
# reads train and dev data into Pandas data frames
read_csv_kwargs = dict(sep="\t",
converters={e: emotion_to_int.get for e in emotions})
train_data = pd.read_csv("2018-E-c-En-train.txt", **read_csv_kwargs)
test_data = pd.read_csv("2018-E-c-En-test.txt", **read_csv_kwargs)
# Number of Times to Run the Prediction Algorithm
num_predictions = 5
num_models = 0
for num_models in range(num_predictions):
# Makes Predictions on Each Run of the Dev Set
print("\n\nModel {0}:".format(num_models))
tprob, tpred = train_and_predict(train_data, test_data)
tsim = print_metrics(test_data[emotions], tpred[emotions])
if num_models==0:
#dev_predictions = dprob.copy()
test_predictions = tprob.copy()
else:
#dev_predictions[emotions] += dprob[emotions]
test_predictions[emotions] += tprob[emotions]
"""
print("Current Dev Ensemble Metrics:")
temp1 = (dev_predictions[emotions]/(num_models+1)).astype(int)
score = print_metrics(dev_data[emotions], temp1[emotions])
print("Current Test Ensemble Metrics:")
temp2 = (test_predictions[emotions]/(num_models+1)).astype(int)
print_metrics(test_data[emotions], temp2[emotions])
"""
# Final Prediction Based on Multiple Runs
# Reduces Run-to-Run Variations & Improves Overall Prediction Accuracy
test_predictions[emotions] /= num_predictions
test_predictions.to_csv("tthreshold.txt", sep="\t", index=False)
# saves predictions and prints out multi-label accuracy
test_predictions[emotions] = (test_predictions[emotions]).astype(int)
test_predictions.to_csv("E-C_en_pred.txt", sep="\t", index=False)
``` |
{
"source": "jkae/knn-exercise",
"score": 3
} |
#### File: knn-exercise/examples/simple_example.py
```python
from pynn import kd_tree
def example_generate_tree():
"""
Generate an example k-d tree from US capitals.
"""
lat_lon = [
(32.377716, -86.300568), # Montgomery
(58.301598, -134.420212), # Juneau
(33.448143, -112.096962), # Phoenix
(34.746613, -92.288986), # Little Rock
(38.576668, -121.493629), # Sacramento
(39.739227, -104.984856), # Denver
(41.764046, -72.682198), # Hartford
(39.157307, -75.519722), # Dover
(21.307442, -157.857376), # Honolulu
(30.438118, -84.281296), # Tallahassee
(33.749027, -84.388229), # Atlanta
(43.617775, -116.199722), # Boise
(39.798363, -89.654961), # Springfield
(39.768623, -86.162643), # Indianapolis
(41.591087, -93.603729), # Des Moines
(39.048191, -95.677956), # Topeka
(38.186722, -84.875374), # Frankfort
(30.457069, -91.187393), # <NAME>
(44.307167, -69.781693), # Augusta
(38.978764, -76.490936), # Annapolis
(42.358162, -71.063698), # Boston
(42.733635, -84.555328), # Lansing
(44.955097, -93.102211), # <NAME>
(32.303848, -90.182106), # Jackson
(38.579201, -92.172935), # <NAME>
(46.585709, -112.018417), # Helena
(40.808075, -96.699654), # Lincoln
(39.163914, -119.766121), # <NAME>
(43.206898, -71.537994), # Concord
(40.220596, -74.769913), # Trenton
(35.68224, -105.939728), # Santa Fe
(35.78043, -78.639099), # Raleigh
(46.82085, -100.783318), # Bismarck
(42.652843, -73.757874), # Albany
(39.961346, -82.999069), # Columbus
(35.492207, -97.503342), # Oklahoma City
(44.938461, -123.030403), # Salem
(40.264378, -76.883598), # Harrisburg
(41.830914, -71.414963), # Providence
(34.000343, -81.033211), # Columbia
(44.367031, -100.346405), # Pierre
(36.16581, -86.784241), # Nashville
(30.27467, -97.740349), # Austin
(40.777477, -111.888237), # Salt Lake City
(44.262436, -72.580536), # Montpelier
(37.538857, -77.43364), # Richmond
(47.035805, -122.905014), # Olympia
(38.336246, -81.612328), # Charleston
(43.074684, -89.384445), # Madison
(41.140259, -104.820236) # Cheyenne
]
root_node = kd_tree.kdTree(lat_lon)
return root_node
def example_find_nearest_neighbor():
"""
Find a k-d tree's nearest neighbor to a given point.
<NAME>, CO should be closer to Chey<NAME> (41.140259, -104.820236) than Denver (39.739227, -104.984856)
"""
tree = example_generate_tree()
fort_collins = (40.5566532,-105.1026712) #
return kd_tree.kd_nearest_neighbor(tree, fort_collins)
``` |
{
"source": "jkaessens/gwas-assoc",
"score": 2
} |
#### File: gwas-assoc/bin/SampleQCI_pca_convert.py
```python
import sys
import re
import os
from os.path import *
import string
import re
import gzip
import math
import decimal
import datetime
from os import listdir
import subprocess
# may also need some of these:
# import Ingos lib
#sys.path.append(join(sys.path[0], "../../all_scripts"))
sys.path.append(os.environ['PYLIB_DIR'] + "/all_scripts")
from all_common import *
# import my lib
#sys.path.append(join(sys.path[0], "../lib"))
sys.path.append(os.environ['PYLIB_DIR'] + "/lib")
from plink_classes import *
from eigenstrat_classes import *
def pca_convert(plink, eigenstrat_parameter_file, annotation_file):
""" convert PLINK file data set to eigenstrat format """
# ----------------------------- #
# - generate parameter file m - #
# ----------------------------- #
packedped = PackedPed(write_file=eigenstrat_parameter_file)
packedped.set_input_PLINK_binary(
bed=plink + ".bed",\
bim=plink + ".bim",\
fam=plink + ".fam")
packedped.write_par_file() ; del packedped
# ------------------------ #
# - run convertf program - #
# ------------------------ #
cmd = Command( "convertf -p %s" \
%(eigenstrat_parameter_file) )
cmd.run() ; del cmd
os.system("mv %s.ind %s.ind.orig" \
%(plink, plink) )
# read individualIDs and HAPMAP info from from hapmap2 fam file
try:
fh_anno = file(annotation_file, "r")
except IOError, e:
print e
sys.exit(1)
individuals2batch_id = {}
# skip header
line = fh_anno.readline().replace("\n", "")
line = fh_anno.readline().replace("\n", "")
while line:
list = re.split("\s+",line)
IID = list[1]
batch_id = list[6]
individuals2batch_id[IID] = batch_id
line = fh_anno.readline().replace("\n", "")
fh_anno.close()
# re-write ind file with info on HapMap samples and batch_info
try:
fh_ind = file(plink + ".ind.orig", "r")
fh_ind_new = file(plink + ".ind", "w")
except IOError, e:
print e
sys.exit(1)
batches = []
batches_dict = {}
# no header line
line = fh_ind.readline().replace("\n", "")
while line:
list = re.split("\s+",line)
if list[0] == "":
del list[0]
# change info last column from "Case/Control" to batch_id
if individuals2batch_id.has_key(list[0]):
batch_id = individuals2batch_id[list[0]]
if not batches_dict.has_key(batch_id):
batches.append(batch_id)
batches_dict[batch_id] = True
if list[-1] == "Case":
line = line.replace("Case", batch_id)
elif list[-1] == "Control":
line = line.replace("Control", batch_id)
# nothing to replace
else:
print >> sys.stderr, "\n warning: could not replace case/control status for sample " +list[0]+ " by batch_id in file pca.evec file " +plink_pca + ".pca.evec ...\n\n"
fh_ind_new.writelines(line +"\n")
# nothing to replace
else:
print >> sys.stderr, "\n warning: could not found sample " +list[0]+ " in annotation file " +individuals_annotation_cases_controls_hapmap2+ " ...\n\n"
fh_ind_new.writelines(line +"\n")
line = fh_ind.readline().replace("\n", "")
fh_ind.close()
fh_ind_new.close()
del batches_dict
# Main
if __name__ == "__main__":
# check args
if len(sys.argv) != 4:
print "Usage: " + sys.argv[0] + " <input plink basename> <eigenstrat parameter file> <annotations>\n"
sys.exit(1)
pca_convert(sys.argv[1], sys.argv[2], sys.argv[3])
```
#### File: gwas-assoc/bin/SampleQCI_variant_filter.py
```python
import sys
import re
import os
def write_snps_autosomes_noLDRegions_noATandGC_noIndels(bim, outfile):
""" write only autosomal snps, remove SNPs from high LD regions (also MHC),
remove A/T and C/G SNPs, remove Indels """
print "\n remove SNPs from high LD regions ..\n\n"
print "\n remove A/T and C/G SNPs ...\n\n"
print "\n remove insertions/deletions ...\n\n"
try:
bim = file(bim, "r")
out = file(outfile, "w")
except IOError, e:
print e
sys.exit(1)
complement = {'A':'T', 'T':'A', 'C':'G', 'G':'C', 'D':'D', 'I':'I'}
indels = {'D':'D', 'I':'I'}
line = bim.readline().replace("\n","")
while line:
list = re.split("\s+",line)
chr = int(list[0])
pos = int(list[3])
a1 = list[4]
a2 = list[5]
# exclude non-autosomes
if 0 < chr and chr < 23:
# exclude xMHC SNPs AND exclude A/T and C/G SNPs AND exclude D/I SNPs
if (not ( (1 == chr and (48000000 <= pos and pos < 52000000)) or \
(2 == chr and (86000000 <= pos and pos < 100500000)) or \
(2 == chr and (134500000 <= pos and pos < 138000000)) or \
(2 == chr and (183000000 <= pos and pos < 183000000)) or \
(3 == chr and (47500000 <= pos and pos < 50000000)) or \
(3 == chr and (83500000 <= pos and pos < 87000000)) or \
(3 == chr and (89000000 <= pos and pos < 97500000)) or \
(5 == chr and (44500000 <= pos and pos < 50500000)) or \
(5 == chr and (98000000 <= pos and pos < 100500000)) or \
(5 == chr and (129000000 <= pos and pos < 132000000)) or \
(5 == chr and (135500000 <= pos and pos < 138500000)) or \
(6 == chr and (25500000 <= pos and pos < 33500000)) or \
(6 == chr and (57000000 <= pos and pos < 64000000)) or \
(6 == chr and (140000000 <= pos and pos < 142500000)) or \
(7 == chr and (55000000 <= pos and pos < 66000000)) or \
(8 == chr and (8000000 <= pos and pos < 12000000)) or \
(8 == chr and (43000000 <= pos and pos < 50000000)) or \
(8 == chr and (112000000 <= pos and pos < 115000000)) or \
(10 == chr and (37000000 <= pos and pos < 43000000)) or \
(11 == chr and (46000000 <= pos and pos < 57000000)) or \
(11 == chr and (87500000 <= pos and pos < 90500000)) or \
(12 == chr and (33000000 <= pos and pos < 40000000)) or \
(12 == chr and (109500000 <= pos and pos < 112000000)) or \
(20 == chr and (32000000 <= pos and pos < 34500000)) )) \
and (a1 != complement[a2]) \
and (not (indels.has_key(a1) or indels.has_key(a2))):
# write variants for inclusion
out.writelines("%s\n" %(list[1]))
line = bim.readline().replace("\n","")
bim.close()
out.close()
# Main
if __name__ == "__main__":
# check args
if len(sys.argv) != 3:
print "Usage: " + sys.argv[0] + " <bim-file> <target-file>\n"
print "\twhere:\n"
print "\t<bim-file> BIM input\n"
print "\t<target-file> list of variants that should be filtered\n"
sys.exit(1)
write_snps_autosomes_noLDRegions_noATandGC_noIndels(sys.argv[1], sys.argv[2])
```
#### File: gwas-assoc/bin/SNPQC_helpers.py
```python
import sys
import re
import os
from os.path import join, dirname
# import string
# import gzip
# import math
# import decimal
# import datetime
# from os import listdir
# import subprocess
# may also need some of these:
# import Ingos lib
# sys.path.append(os.path.join(os.path.dirname[0], "../../all_scripts"))
sys.path.append(os.environ['PYLIB_DIR'] + "/all_scripts")
sys.path.append(os.environ['PYLIB_DIR'] + "/lib")
# from all_common import *
# import my lib
# sys.path.append(join(sys.path[0], "../lib"))
# sys.path.append(os.environ['PYLIB_DIR'] + "/lib")
from plink_classes import Test_missing,Frq
from eigenstrat_classes import PcaLog
def determine_unknown_diagnosis(annotationfile, outfile, diagnoses):
""" determine samples with unknown diagnosis """
print "\n check for samples with unknown diagnosis ..\n\n"
try:
fh = file(annotationfile, "r")
fh_w = file(outfile, "w")
except IOError, e:
print e
sys.exit(1)
# header line
line = fh.readline().rstrip('\n')
list = re.split("\s+", line)
# delete empty elements
if list[0] == "":
del list[0]
if list[-1] == "":
del list[-1]
assert(list[8] == "diagnosis")
# body
line = fh.readline().rstrip('\n')
while line:
list = re.split("\s+", line)
# delete empty elements
if list[0] == "":
del list[0]
if list[-1] == "":
del list[-1]
diag = list[8]
if not (diag in diagnoses):
fh_w.writelines(line + "\n")
line = fh.readline().rstrip('\n')
fh.close()
fh_w.close()
def extract_QCsamples_annotationfile_relativesfile(fam, individuals_annotation_QCed, related_samples_file, related_samples_file_QCed, individuals_annotation, diagnoses):
""" extract only QCed samples from original annotation file """
if isinstance(diagnoses, basestring):
diagnoses = diagnoses.split(',')
individualIDs = {}
try:
fh_r = file(fam, "r")
except IOError, e:
print e
sys.exit(1)
line = fh_r.readline().rstrip('\n')
while line:
list = re.split("\s+", line)
if list[0] == "":
del list[0]
if not (list[1] in individualIDs):
individualIDs[list[1]] = True
line = fh_r.readline().rstrip('\n')
fh_r.close()
# -------------------------- #
# -- scan annotation file -- #
# -------------------------- #
try:
fh_r = file(individuals_annotation, "r")
fh_w = file(individuals_annotation_QCed, "w")
except IOError, e:
print e
sys.exit(1)
# read header
line = fh_r.readline().rstrip('\n')
fh_w.writelines(line + "\n")
# read body
count_samples = 0
line = fh_r.readline().rstrip('\n')
while line:
list = re.split("\s+", line)
if list[0] == "":
del list[0]
if list[-1] == "":
del list[-1]
if list[1] in individualIDs:
fh_w.writelines(line + "\n")
count_samples += 1
line = fh_r.readline().rstrip('\n')
fh_r.close()
fh_w.close()
assert(len(individualIDs) == count_samples)
# generate diagnosis individual annotation files
for diag in diagnoses:
try:
fh_r = file(individuals_annotation, "r")
# flake8 complains that neigher join nor dirname are defined...
fh_w = file(join(dirname(individuals_annotation_QCed), diag + "_QCed_annotation.txt"), "w")
except IOError, e:
print e
sys.exit(1)
# read header
line = fh_r.readline().rstrip('\n')
fh_w.writelines(line + "\n")
# read body
count_samples = 0
line = fh_r.readline().rstrip('\n')
while line:
list = re.split("\s+", line)
if list[0] == "":
del list[0]
if list[-1] == "":
del list[-1]
if list[8] == diag and (list[1] in individualIDs):
fh_w.writelines(line + "\n")
count_samples += 1
line = fh_r.readline().rstrip('\n')
fh_r.close()
fh_w.close()
# --------------------------------- #
# -- scan flagged relatives file -- #
# --------------------------------- #
try:
fh_r = file(related_samples_file, "r")
fh_w = file(related_samples_file_QCed, "w")
except IOError, e:
print e
sys.exit(1)
# read body
line = fh_r.readline().rstrip('\n')
while line:
list = re.split("\s+", line)
if list[0] == "":
del list[0]
if list[-1] == "":
del list[-1]
if list[1] in individualIDs:
fh_w.writelines(line + "\n")
line = fh_r.readline().rstrip('\n')
fh_r.close()
fh_w.close()
def generate_exclude_file_CON(HFresults_file, individuals_annotation_QCed, variant_exclude_file, batches_names, prefix_merged_SNPQCII, FDR_index_remove_variants, plotscript):
""" determine variants failed even with the worst batch removed """
# IMPORTANT: Here exclude rejected variants from "all batches" instead of "worst batch removed" ###
# THIS INDEX MUST BE SET BY USER
# '4' corresponds to FDR at 1e-5, see list thresholds_FDR below
# '6' corresponds to FDR at 1e-7, see list thresholds_FDR below
# FDR_index_remove_variants = 4
# --------------------------------------------------------- #
# -- (1) count number of batches in QCed annotation file -- #
# --------------------------------------------------------- #
batches_dict = {}
batches_list = []
try:
fh_r = file(individuals_annotation_QCed, "r")
except IOError, e:
print e
sys.exit(1)
# read header
line = fh_r.readline().rstrip('\n')
list = re.split("\s+", line)
assert(list[6] == "batch")
# read body
line = fh_r.readline().rstrip('\n')
while line:
list = re.split("\s+", line)
if list[0] == "":
del list[0]
if list[-1] == "":
del list[-1]
if not list[6] in batches_dict:
batches_dict[list[6]] = True
batches_list.append(list[6])
line = fh_r.readline().rstrip('\n')
fh_r.close()
numof_batches = len(batches_list)
print "\n Detected the following batches for HF test (n=" + str(numof_batches) + "): ..."
for i in xrange(numof_batches):
print "\n " + batches_list[i]
# QCed list of batches
batches_list_dict = {}
for i in xrange(numof_batches):
batches_list_dict[batches_list[i]] = True
# original list of batches
print "\n Following batches were removed: ..."
# for i in xrange(len(batches_names)):
# if not batches_names[i] in batches_list_dict:
# print "\n " + batches_names[i]
# ------------------------------------------------------------ #
# -- (2) read HF P-values from results file to control FDR -- #
# ------------------------------------------------------------ #
# TODO change HF to HF
# HF_Pval_vectors
HF_Pval_vector_allbatches_ctrls = [] # (HF_p-value, variant_id)
HF_Pval_vector_worstbatchremoved_ctrls = [] # (HF_p-value, variant_id)
try:
fh_r = file(HFresults_file, "r")
except IOError, e:
print e
sys.exit(1)
line = fh_r.readline().rstrip('\n')
while line:
list = re.split("\s+", line)
if list[0] == "":
del list[0]
if list[-1] == "":
del list[-1]
# four preceding columns (CHR, SNP, POS, A1)
numof_pvalues = len(list[4:])
# numof_pvales - #phenotypes(CON,CD,UC) / #phenotypes(CON,CD,UC)
# numof_pvales - #phenotypes(CON) / #phenotypes(CON)
if not (numof_batches == (numof_pvalues - 1)):
print >> sys.stderr, "abort: problem with results from splitted HF files, probably problem with jobs submitted to HP cluster."
print >> sys.stderr, " Expected #numof_pvalues=" + str(numof_batches - 1) + " in " + prefix_merged_SNPQCII + "_HF.auto.R"
print >> sys.stderr, " Observed #numof_pvalues=" + str(numof_batches - 1) + " in " + prefix_merged_SNPQCII + "_HF.auto.R"
print >> sys.stderr, "abort: #batches_annotation=" + str(numof_batches) + " != " + str((numof_pvalues - 1)) + "=#batches_HFtest"
sys.exit(1)
# ----------------------------------------------------------- #
# -- here the order of controls and diseases is important! -- #
# ----------------------------------------------------------- #
# (1) Controls
if list[4] != "NA":
HF_entire_collection_ctrls = float(list[4])
else:
HF_entire_collection_ctrls = 1.0
# --------------------------------------------- #
# -- look at p-values from entire collection -- #
# --------------------------------------------- #
variant_id = list[1]
HF_Pval_vector_allbatches_ctrls.append((HF_entire_collection_ctrls, variant_id))
# --------------------- #
# -- look at batches -- #
# --------------------- #
# (1) Controls
HF_max_excludebatch = 0.0
count_NA = 0
# find the highest p-value (when "worst" batch removed) when
# running entire ctrl collection with one batch removed at one time
for i in xrange(5, 5 + numof_batches, 1):
if list[i] != "NA":
HF_entire_collection_exclude_particularbatch = float(list[i])
if HF_entire_collection_exclude_particularbatch > HF_max_excludebatch:
HF_max_excludebatch = HF_entire_collection_exclude_particularbatch
else:
count_NA += 1
if numof_batches == count_NA:
HF_max_excludebatch = 1.0
HF_Pval_vector_worstbatchremoved_ctrls.append((HF_max_excludebatch, variant_id))
line = fh_r.readline().rstrip('\n')
fh_r.close()
# ------------------------------------------------------------------- #
# -- sort p-value vectors by first element of tuples, i.e. p-value -- #
# ------------------------------------------------------------------- #
HF_Pval_vector_allbatches_ctrls.sort(reverse=False)
HF_Pval_vector_worstbatchremoved_ctrls.sort(reverse=False)
assert(len(HF_Pval_vector_allbatches_ctrls) == len(HF_Pval_vector_worstbatchremoved_ctrls))
# ---------------------------------------------------------------- #
# -- count #variant failed at FDR at q=1e-1,1e-2,1e-3,...,1e-10 -- #
# ---------------------------------------------------------------- #
thresholds_FDR = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 1e-10]
counts_rejected_FDR_allbatches_ctrls = [0 for i in range(10)] # set count to 0
counts_rejected_FDR_worstbatchremoved_ctrls = [0 for i in range(10)] # set count to 0
# #### count total number of removeVariants from ctrls/CD/UC worstbatchremoved for each FDR
# ###counts_rejected_FDR_allbatches_ctrls_CD_UC_cases = [ {} for i in range(10) ] # add dictionaries
# ###counts_rejected_FDR_worstbatchremoved_ctrls_CD_UC_cases = [ {} for i in range(10) ] # add dictionaries
# fill this vector with HF_Pvalues at FDR thresholds
thresholds_Pvals_allbatches_ctrls = [float(0) for i in range(10)]
thresholds_Pvals_worstbatchremoved_ctrls = [float(0) for i in range(10)]
# ------------------------------------------------------------------------------- #
# -- calculate FDR for different FDR thresholds (Benjamini and Hochberg, 1995) -- #
# ------------------------------------------------------------------------------- #
# (a) ctrls - for all batches and worstbatchremoved
n = len(HF_Pval_vector_allbatches_ctrls)
for j in xrange(len(thresholds_FDR)):
break_i_loop_part1 = False
break_i_loop_part2 = False
for i in xrange(1, n + 1, 1):
rank = i / float(n)
threshold = rank * thresholds_FDR[j]
if (not break_i_loop_part1) and (HF_Pval_vector_allbatches_ctrls[i - 1][0] > threshold):
thresholds_Pvals_allbatches_ctrls[j] = HF_Pval_vector_allbatches_ctrls[i - 2][0]
counts_rejected_FDR_allbatches_ctrls[j] = i - 1
# add variantIDs to dict for counting total number of
# removeVariants from ctrls/CD/UC allbatches for each FDR
# for k in xrange(i - 1):
# #counts_rejected_FDR_allbatches_ctrls_CD_UC_cases[j][HF_Pval_vector_allbatches_ctrls[k][1]] = True
# pass
break_i_loop_part1 = True
if (not break_i_loop_part2) and (HF_Pval_vector_worstbatchremoved_ctrls[i - 1][0] > threshold):
thresholds_Pvals_worstbatchremoved_ctrls[j] = HF_Pval_vector_worstbatchremoved_ctrls[i - 2][0]
counts_rejected_FDR_worstbatchremoved_ctrls[j] = i - 1
break_i_loop_part2 = True
# add variantIDs to dict for counting total number of
# removeVariants from ctrls/CD/UC worstbatchremoved for each FDR
# for k in xrange(i - 1):
# #counts_rejected_FDR_worstbatchremoved_ctrls_CD_UC_cases[j][HF_Pval_vector_worstbatchremoved_ctrls[k][1]] = True
# pass
if break_i_loop_part1 and break_i_loop_part2:
break
# ---------------------------------------------------------------------------------------------- #
# -- (3) extract rejected variants for FDRs at threshold with index FDR_index_remove_variants -- #
# ---------------------------------------------------------------------------------------------- #
# IMPORTANT: Here exclude rejected variants from "all batches" instead of "worst batch removed" ###
try:
fh_r = file(HFresults_file, "r")
fh_w = file(variant_exclude_file, "w")
except IOError, e:
print e
sys.exit(1)
# THIS INDEX MUST BE SET BY USER at the beginning of this function
# '4' corresponds to FDR at 1e-5
# use variable FDR_index_remove_variants = 4 (per default)
# raus
dict_test = {}
count_removeVariants_worstbatchremoved = 0
line = fh_r.readline().rstrip('\n')
while line:
list = re.split("\s+", line)
if list[0] == "":
del list[0]
if list[-1] == "":
del list[-1]
# four preceding columns (CHR, SNP, POS, A1)
numof_pvalues = len(list[4:])
# numof_pvales - #phenotypes(CON,CD,UC) / #phenotypes(CON,CD,UC)
# numof_pvales - #phenotypes(CON) / #phenotypes(CON)
if not (numof_batches == (numof_pvalues - 1)):
print >> sys.stderr, "abort: problem with results from splitted HF files, probably problem with jobs submitted to HP cluster."
print >> sys.stderr, " Expected #numof_pvalues=" + str(numof_batches - 1) + " in " + prefix_merged_SNPQCII + "_HF.auto.R"
print >> sys.stderr, " Observed #numof_pvalues=" + str(numof_batches - 1) + " in " + prefix_merged_SNPQCII + "_HF.auto.R"
print >> sys.stderr, "abort: #batches_annotation=" + str(numof_batches) + " != " + str((numof_pvalues - 1)) + "=#batches_HFtest"
sys.exit(1)
removeVariant = False
# ----------------------------------------------------------- #
# -- here the order of controls and diseases is important! -- #
# ----------------------------------------------------------- #
# (1) Controls
if list[4] != "NA":
HF_entire_collection_ctrls = float(list[4])
else:
HF_entire_collection_ctrls = 1.0
# (1) Controls
if HF_entire_collection_ctrls <= thresholds_Pvals_allbatches_ctrls[FDR_index_remove_variants]:
# IMPORTANT: Here exclude rejected variants from "all batches" instead of "worst batch removed" ###
removeVariant = True
HF_max_excludebatch = 0.0
# find the highest p-value (when "worst" batch removed) when
# running entire ctrl collection with one batch removed at one time
for i in xrange(5, 5 + numof_batches, 1):
if list[i] != "NA":
HF_entire_collection_exclude_particularbatch = float(list[i])
if HF_entire_collection_exclude_particularbatch > HF_max_excludebatch:
HF_max_excludebatch = HF_entire_collection_exclude_particularbatch
# if batch with smallest HF pvalue is removed AND
# HF_max_excludebatch still below equal thresholds_Pvals_worstbatchremoved_ctrls[FDR_index_remove_variants], then remove variant.
if HF_max_excludebatch <= thresholds_Pvals_worstbatchremoved_ctrls[FDR_index_remove_variants]:
removeVariant = True
if removeVariant:
dict_test[list[1]] = True
count_removeVariants_worstbatchremoved += 1
fh_w.writelines(list[1] + "\n")
line = fh_r.readline().rstrip('\n')
fh_r.close()
fh_w.close()
# assert(count_removeVariants_worstbatchremoved == len(counts_rejected_FDR_worstbatchremoved_ctrls_CD_UC_cases[FDR_index_remove_variants]))
# ----------------------------------------------------------------------------------- #
# -- write #rejected variants for FDRs at different thresholds for plotting with R -- #
# -- HF across entire collection and worstbatchremoved -- #
# ----------------------------------------------------------------------------------- #
try:
fh_FDR_w = file(HFresults_file + ".FDRthresholds.SNPQCII.1.txt", "w")
except IOError, e:
print e
sys.exit(1)
# write header
fh_FDR_w.writelines("FDR\tFail_allbatches_ctrls\tHF_pval_allbatches_ctrls\tFail_worstbatchremoved_ctrls\tHF_pval_worstbatchremoved_ctrls\n")
for i in xrange(len(thresholds_FDR)):
fh_FDR_w.writelines("%s" % (str(thresholds_FDR[i])))
fh_FDR_w.writelines("\t%s" % (str(counts_rejected_FDR_allbatches_ctrls[i])))
fh_FDR_w.writelines("\t%s" % (str(thresholds_Pvals_allbatches_ctrls[i])))
fh_FDR_w.writelines("\t%s" % (str(counts_rejected_FDR_worstbatchremoved_ctrls[i])))
fh_FDR_w.writelines("\t%s\n" % (str(thresholds_Pvals_worstbatchremoved_ctrls[i])))
fh_FDR_w.close()
# IMPORTANT: Here exclude rejected variants from "all batches" instead of "worst batch removed" ###
# plot results applying FDR thresholds
os.system("R --slave --args %s %s < %s"
% (HFresults_file + ".FDRthresholds.SNPQCII",
str(FDR_index_remove_variants + 1),
plotscript))
def determine_pca_outlier(log, fam_file, outlier_file):
""" determine outlier run eigenstrat program """
pcalog = PcaLog(input_file=log)
# outlier dictionary key=outlier, val=sigmage
outlier = pcalog.get_outlier()
del pcalog
fam = {}
try:
fh_r = file(fam_file, "r")
except IOError, e:
print e
sys.exit(1)
line = fh_r.readline()
while line:
list = re.split("\s+",line)
fam[list[1]] = list[0]
line = fh_r.readline()
fh_r.close()
try:
fh_w = file(outlier_file, "w")
except IOError, e:
print e
sys.exit(1)
for indiv in outlier:
#fh_w.writelines(indiv + "\n")
fh_w.writelines(fam[indiv] +"\t" +indiv+ "\n")
fh_w.close()
def addbatchinfo_32PCAs(fam, individuals_annotation, evec_file, eval_file, new_evec_file, new_eval_file):
""" add batch information to final evec file """
try:
fh1 = file(fam, "r")
except IOError, e:
print e
sys.exit(1)
id2fid = {}
line = fh1.readline().rstrip('\n')
while line:
list = re.split("\s+",line)
fid = list[0]
indivID = list[1]
id2fid[indivID] = fid
line = fh1.readline().rstrip('\n')
fh1.close()
try:
fh1 = file(individuals_annotation, "r")
except IOError, e:
print e
sys.exit(1)
id2batch = {}
line = fh1.readline().rstrip('\n')
line = fh1.readline().rstrip('\n')
while line:
list = re.split("\s+",line)
indivID = list[1]
batch = list[6]
id2batch[indivID] = batch
line = fh1.readline().rstrip('\n')
fh1.close()
try:
fh2 = file(evec_file, "r")
fh3 = file(new_evec_file, "w")
except IOError, e:
print e
sys.exit(1)
line = fh2.readline().rstrip('\n')
fh3.writelines("FID\tIID" +line.replace("indivID","") + "\tbatch\n")
line = fh2.readline().rstrip('\n')
while line:
list = re.split("\s+",line)
id = list[0]
fh3.writelines(id2fid[id] +"\t"+\
list[0] +"\t"+\
list[1] +"\t"+\
list[2] +"\t"+\
list[3] +"\t"+\
list[4] +"\t"+\
list[5] +"\t"+\
list[6] +"\t"+\
list[7] +"\t"+\
list[8] +"\t"+\
list[9] +"\t"+\
list[10] +"\t"+\
list[11] +"\t"+\
list[12] +"\t"+\
list[13] +"\t"+\
list[14] +"\t"+\
list[15] +"\t"+\
list[16] +"\t"+\
list[17] +"\t"+\
list[18] +"\t"+\
list[19] +"\t"+\
list[20] +"\t"+\
list[21] +"\t"+\
list[22] +"\t"+\
list[23] +"\t"+\
list[24] +"\t"+\
list[25] +"\t"+\
list[26] +"\t"+\
list[27] +"\t"+\
list[28] +"\t"+\
list[29] +"\t"+\
list[30] +"\t"+\
list[31] +"\t"+\
list[32] +"\t"+\
id2batch[id] +"\n")
line = fh2.readline().rstrip('\n')
fh2.close()
fh3.close()
os.system("cp %s %s" %(eval_file, new_eval_file))
# This function does now require the HFresults_file and the individuals annotation to contain batches for exactly one disease.
# Please filter accordingly.
def generate_exclude_file_for_diagnosis(HFresults_file, individuals_annotation_QCed, variant_exclude_file, prefix_merged_SNPQCII, FDR_index_remove_variants, plotscript):
""" determine variants failed even with the worst batch removed """
## IMPORTANT: Here exclude rejected variants from "worst batch removed" ###
# THIS INDEX MUST BE SET BY USER
# '4' corresponds to FDR at 1e-5, see list thresholds_FDR below
# '6' corresponds to FDR at 1e-7, see list thresholds_FDR below
####FDR_index_remove_variants = 4
# --------------------------------------------------------- #
# -- (1) count number of batches in QCed annotation file -- #
# --------------------------------------------------------- #
batches_dict = {}
batches_list = []
try:
fh_r = file(individuals_annotation_QCed, "r")
except IOError, e:
print e
sys.exit(1)
# read header
line = fh_r.readline().rstrip('\n')
list = re.split("\s+",line)
assert(list[6] == "batch")
assert(list[8] == "diagnosis")
# read body
line = fh_r.readline().rstrip('\n')
while line:
list = re.split("\s+",line)
if list[0] == "":
del list[0]
if list[-1] == "":
del list[-1]
if not batches_dict.has_key(list[6]):
batches_dict[list[6]] = True
batches_list.append(list[6])
line = fh_r.readline().rstrip('\n')
fh_r.close()
numof_batches = len(batches_list)
print "\n Detected the following batches for HF test (n=" +str(numof_batches)+ "): ..."
for i in xrange(numof_batches):
print "\n " +batches_list[i]
# QCed list of batches
batches_list_dict = {}
for i in xrange(numof_batches):
batches_list_dict[batches_list[i]] = True
# original list of batches
print "\n Following batches were removed: ..."
for i in xrange(len(batches_list)):
if not batches_list_dict.has_key(batches_list[i]):
print "\n " +batches_list[i]
# ------------------------------------------------------------ #
# -- (2) read HF P-values from results file to control FDR -- #
# ------------------------------------------------------------ #
# HF_Pval_vectors
#HF_Pval_vector_allbatches_ctrls = [] # (HF_p-value, variant_id)
HF_Pval_vector_allbatches_cases = [] # (HF_p-value, variant_id)
#HF_Pval_vector_worstbatchremoved_ctrls = [] # (HF_p-value, variant_id)
HF_Pval_vector_worstbatchremoved_cases = [] # (HF_p-value, variant_id)
try:
fh_r = file(HFresults_file, "r")
except IOError, e:
print e
sys.exit(1)
print("Scanning samples...\n")
line = fh_r.readline().rstrip('\n')
while line:
list = re.split("\s+",line)
if list[0] == "":
del list[0]
if list[-1] == "":
del list[-1]
# four preceding columns (CHR, SNP, POS, A1)
numof_pvalues = len(list[4:])
# numof_pvales - #phenotypes(CON,CD,UC) / #phenotypes(CON,CD,UC)
if not (numof_batches == (numof_pvalues-2)/2):
print >> sys.stderr, "abort: problem with results from splitted HF files, probably problem with jobs submitted to HP cluster."
print >> sys.stderr, " Expected #numof_pvalues=" +str(numof_batches*2-2)+ " in " +HFresults_file
print >> sys.stderr, " Observed #numof_pvalues=" +str(numof_batches*2-2)+ " in " +HFresults_file
print >> sys.stderr, "abort: #batches_annotation=" +str(numof_batches)+ " != " +str((numof_pvalues-2)/2)+ "=#batches_HFtest"
sys.exit(1)
#assert(numof_batches == (numof_pvalues-3)/3)
# ----------------------------------------------------------- #
# -- here the order of controls and diseases is important! -- #
# ----------------------------------------------------------- #
# (1) Controls
#if list[4] != "NA":
# HF_entire_collection_ctrls = float(list[4])
#else:
# HF_entire_collection_ctrls = 1.0
##### (2) PS
####if list[5] != "NA":
#### HF_entire_collection_PS_cases = float(list[5])
####else:
#### HF_entire_collection_PS_cases = 1.0
##### (3) AS
####if list[6] != "NA":
#### HF_entire_collection_AS_cases = float(list[6])
####else:
#### HF_entire_collection_AS_cases = 1.0
# (4) Cases
if list[4] != "NA":
HF_entire_collection_cases = float(list[4])
else:
HF_entire_collection_cases = 1.0
# (5) UC
#if list[6] != "NA":
# HF_entire_collection_UC_cases = float(list[6])
#else:
# HF_entire_collection_UC_cases = 1.0
##### (6) PSC
####if list[9] != "NA":
#### HF_entire_collection_PSC_cases = float(list[9])
####else:
#### HF_entire_collection_PSC_cases = 1.0
# --------------------------------------------- #
# -- look at p-values from entire collection -- #
# --------------------------------------------- #
variant_id = list[1]
# HF_Pval_vector_allbatches_ctrls.append( (HF_entire_collection_ctrls, variant_id) )
HF_Pval_vector_allbatches_cases.append( (HF_entire_collection_cases, variant_id) )
# --------------------- #
# -- look at batches -- #
# --------------------- #
# (1) Controls
# HF_max_excludebatch = 0.0
# count_NA = 0
# # find the highest p-value (when "worst" batch removed) when
# # running entire ctrl collection with one batch removed at one time
# for i in xrange(6, 6+numof_batches, 1):
# if list[i] != "NA":
# HF_entire_collection_exclude_particularbatch = float(list[i])
# if HF_entire_collection_exclude_particularbatch > HF_max_excludebatch:
# HF_max_excludebatch = HF_entire_collection_exclude_particularbatch
# else:
# count_NA += 1
# if numof_batches == count_NA:
# HF_max_excludebatch = 1.0
# HF_Pval_vector_worstbatchremoved_ctrls.append( (HF_max_excludebatch, variant_id) )
# (2) Cases
HF_max_excludebatch = 0.0
count_NA = 0
# find the highest p-value (when "worst" batch removed) when
# running entire case collection with one batch removed at one time
for i in xrange(6+numof_batches, 6+2*numof_batches, 1):
if list[i] != "NA":
HF_entire_collection_exclude_particularbatch = float(list[i])
if HF_entire_collection_exclude_particularbatch > HF_max_excludebatch:
HF_max_excludebatch = HF_entire_collection_exclude_particularbatch
else:
count_NA += 1
if numof_batches == count_NA:
HF_max_excludebatch = 1.0
HF_Pval_vector_worstbatchremoved_cases.append( (HF_max_excludebatch, variant_id) )
line = fh_r.readline().rstrip('\n')
fh_r.close()
# ------------------------------------------------------------------- #
# -- sort p-value vectors by first element of tuples, i.e. p-value -- #
# ------------------------------------------------------------------- #
#HF_Pval_vector_allbatches_ctrls.sort(reverse=False)
HF_Pval_vector_allbatches_cases.sort(reverse=False)
#HF_Pval_vector_worstbatchremoved_ctrls.sort(reverse=False)
HF_Pval_vector_worstbatchremoved_cases.sort(reverse=False)
#assert(len(HF_Pval_vector_allbatches_ctrls) == len(HF_Pval_vector_allbatches_cases))
#assert(len(HF_Pval_vector_allbatches_ctrls) == len(HF_Pval_vector_worstbatchremoved_ctrls))
#assert(len(HF_Pval_vector_allbatches_ctrls) == len(HF_Pval_vector_worstbatchremoved_cases))
# ---------------------------------------------------------------- #
# -- count #variant failed at FDR at q=1e-1,1e-2,1e-3,...,1e-10 -- #
# ---------------------------------------------------------------- #
thresholds_FDR = [ 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 1e-10 ]
#counts_rejected_FDR_allbatches_ctrls = [ 0 for i in range(10) ] # set count to 0
counts_rejected_FDR_allbatches_cases = [ 0 for i in range(10) ] # set count to 0
#counts_rejected_FDR_worstbatchremoved_ctrls = [ 0 for i in range(10) ] # set count to 0
counts_rejected_FDR_worstbatchremoved_cases = [ 0 for i in range(10) ] # set count to 0
# count total number of removeVariants from ctrls/cases worstbatchremoved for each FDR
counts_rejected_FDR_allbatches_ctrls_cases = [ {} for i in range(10) ] # add dictionaries
counts_rejected_FDR_worstbatchremoved_ctrls_cases = [ {} for i in range(10) ] # add dictionaries
# fill this vector with HF_Pvalues at FDR thresholds
#thresholds_Pvals_allbatches_ctrls = [ float(0) for i in range(10) ]
thresholds_Pvals_allbatches_cases = [ float(0) for i in range(10) ]
#thresholds_Pvals_worstbatchremoved_ctrls = [ float(0) for i in range(10) ]
thresholds_Pvals_worstbatchremoved_cases = [ float(0) for i in range(10) ]
print("Calculating FDR for FDR thresholds...\n")
# ------------------------------------------------------------------------------- #
# -- calculate FDR for different FDR thresholds (Benjamini and Hochberg, 1995) -- #
# ------------------------------------------------------------------------------- #
# (a) ctrls - for all batches and worstbatchremoved
#print(HF_Pval_vector_worstbatchremoved_ctrls)
# (b) cases - for all batches and worstbatchremoved
n = len (HF_Pval_vector_allbatches_cases)
for j in xrange(len(thresholds_FDR)):
break_i_loop_part1 = False
break_i_loop_part2 = False
for i in xrange(1,n+1,1):
rank = i/float(n)
threshold = rank*thresholds_FDR[j]
if (not break_i_loop_part1) and (HF_Pval_vector_allbatches_cases[i-1][0] > threshold):
thresholds_Pvals_allbatches_cases[j] = HF_Pval_vector_allbatches_cases[i-2][0]
counts_rejected_FDR_allbatches_cases[j] = i-1
# add variantIDs to dict for counting total number of
# removeVariants from ctrls/CD/UC allbatches for each FDR
for k in xrange(i-1):
counts_rejected_FDR_allbatches_ctrls_cases[j][HF_Pval_vector_allbatches_cases[k][1]] = True
break_i_loop_part1 = True
if (not break_i_loop_part2) and (HF_Pval_vector_worstbatchremoved_cases[i-1][0] > threshold):
thresholds_Pvals_worstbatchremoved_cases[j] = HF_Pval_vector_worstbatchremoved_cases[i-2][0]
counts_rejected_FDR_worstbatchremoved_cases[j] = i-1
break_i_loop_part2 = True
# add variantIDs to dict for counting total number of
# removeVariants from ctrls/CD/UC worstbatchremoved for each FDR
for k in xrange(i-1):
counts_rejected_FDR_worstbatchremoved_ctrls_cases[j][HF_Pval_vector_worstbatchremoved_cases[k][1]] = True
if break_i_loop_part1 and break_i_loop_part2:
break
print "Case sample rejections for all batches by FDR: "
print(counts_rejected_FDR_allbatches_cases)
print "Case sample rejections for all but the worst batch by FDR:"
print(counts_rejected_FDR_worstbatchremoved_cases)
#print(HF_Pval_vector_worstbatchremoved_cases)
print "Extracting rejected variants...\n"
# ---------------------------------------------------------------------------------------------- #
# -- (3) extract rejected variants for FDRs at threshold with index FDR_index_remove_variants -- #
# ---------------------------------------------------------------------------------------------- #
## IMPORTANT: Here exclude rejected variants from "worst batch removed" ###
try:
fh_r = file(HFresults_file, "r")
fh_w = file(variant_exclude_file, "w")
except IOError, e:
print e
sys.exit(1)
# THIS INDEX MUST BE SET BY USER at the beginning of this function
# '4' corresponds to FDR at 1e-5
# use variable FDR_index_remove_variants = 4 (per default)
# raus
dict_test = {}
count_removeVariants_worstbatchremoved = 0
line = fh_r.readline().rstrip('\n')
while line:
list = re.split("\s+",line)
if list[0] == "":
del list[0]
if list[-1] == "":
del list[-1]
# four preceding columns (CHR, SNP, POS, A1)
numof_pvalues = len(list[4:])
# numof_pvales - #phenotypes(CON,CD,UC) / #phenotypes(CON,CD,UC)
if not (numof_batches == (numof_pvalues-2)/2):
print >> sys.stderr, "abort: problem with results from splitted HF files, probably problem with jobs submitted to HP cluster."
print >> sys.stderr, " Expected #numof_pvalues=" +str(numof_batches*2-2)+ " in " +prefix_merged_SNPQCII+ "_HF.auto.R"
print >> sys.stderr, " Observed #numof_pvalues=" +str(numof_batches*2-2)+ " in " +prefix_merged_SNPQCII+ "_HF.auto.R"
print >> sys.stderr, "abort: #batches_annotation=" +str(numof_batches)+ " != " +str((numof_pvalues-2)/2)+ "=#batches_HFtest"
sys.exit(1)
#assert(numof_batches == (numof_pvalues-3)/3)
removeVariant = False
# ----------------------------------------------------------- #
# -- here the order of controls and diseases is important! -- #
# ----------------------------------------------------------- #
# (1) Controls
# if list[4] != "NA":
# HF_entire_collection_ctrls = float(list[4])
# else:
# HF_entire_collection_ctrls = 1.0
# (4) cases
if list[5] != "NA":
HF_entire_collection_cases = float(list[4])
else:
HF_entire_collection_cases = 1.0
# (1) Controls
# if HF_entire_collection_ctrls <= thresholds_Pvals_allbatches_ctrls[FDR_index_remove_variants]:
# HF_max_excludebatch = 0.0
# # find the highest p-value (when "worst" batch removed) when
# # running entire ctrl collection with one batch removed at one time
# for i in xrange(6, 6+numof_batches, 1):
# if list[i] != "NA":
# HF_entire_collection_exclude_particularbatch = float(list[i])
# if HF_entire_collection_exclude_particularbatch > HF_max_excludebatch:
# HF_max_excludebatch = HF_entire_collection_exclude_particularbatch
# # if batch with smallest HF pvalue is removed AND
# # HF_max_excludebatch still below equal thresholds_Pvals_worstbatchremoved_ctrls[FDR_index_remove_variants], then remove variant.
# if HF_max_excludebatch <= thresholds_Pvals_worstbatchremoved_ctrls[FDR_index_remove_variants]:
# removeVariant = True
# (2) cases
if HF_entire_collection_cases <= thresholds_Pvals_allbatches_cases[FDR_index_remove_variants]:
HF_max_excludebatch = 0.0
# find the highest p-value (when "worst" batch removed) when
# running entire ctrl collection with one batch removed at one time
for i in xrange(6+numof_batches, 6+2*numof_batches, 1):
if list[i] != "NA":
HF_entire_collection_exclude_particularbatch = float(list[i])
if HF_entire_collection_exclude_particularbatch > HF_max_excludebatch:
HF_max_excludebatch = HF_entire_collection_exclude_particularbatch
# if batch with smallest HF pvalue is removed AND
# HF_max_excludebatch still below equal thresholds_Pvals_worstbatchremoved_CD_cases[FDR_index_remove_variants], then remove variant.
if HF_max_excludebatch <= thresholds_Pvals_worstbatchremoved_cases[FDR_index_remove_variants]:
removeVariant = True
if removeVariant:
dict_test[list[1]] = True
count_removeVariants_worstbatchremoved += 1
fh_w.writelines(list[1] +"\n")
line = fh_r.readline().rstrip('\n')
fh_r.close()
fh_w.close()
#print("count_removeVariants_worstbatchremoved=%s counts_rejected_FDR_worstbatchremoved_ctrls_cases[FDR_index_remove_variants]=%s" %(count_removeVariants_worstbatchremoved, len(counts_rejected_FDR_worstbatchremoved_ctrls_cases[FDR_index_remove_variants])))
#print(counts_rejected_FDR_worstbatchremoved_ctrls_cases)
# schlaegt fehl, sollte es aber nicht! ...oder?
#assert(count_removeVariants_worstbatchremoved == len(counts_rejected_FDR_worstbatchremoved_ctrls_cases[FDR_index_remove_variants]))
print("Write rejected variants\n")
# ----------------------------------------------------------------------------------- #
# -- write #rejected variants for FDRs at different thresholds for plotting with R -- #
# -- HF across entire collection and worstbatchremoved -- #
# ----------------------------------------------------------------------------------- #
try:
fh_FDR_w = file(HFresults_file + ".FDRthresholds.SNPQCII.1.txt", "w")
except IOError, e:
print e
sys.exit(1)
# write header
fh_FDR_w.writelines("FDR\tFail_allbatches_cases\tHF_pval_allbatches_cases\tFail_worstbatchremoved_cases\tHF_pval_worstbatchremoved_cases\n")
for i in xrange(len(thresholds_FDR)):
fh_FDR_w.writelines("%s" %(str(thresholds_FDR[i])))
#fh_FDR_w.writelines("\t%s" %(str(len(counts_rejected_FDR_allbatches_ctrls_cases[i]))))
#fh_FDR_w.writelines("\t%s" %(str(len(counts_rejected_FDR_worstbatchremoved_ctrls_cases[i]))))
#fh_FDR_w.writelines("\t%s" %(str(counts_rejected_FDR_allbatches_ctrls[i])))
#fh_FDR_w.writelines("\t%s" %(str(thresholds_Pvals_allbatches_ctrls[i])))
#fh_FDR_w.writelines("\t%s" %(str(counts_rejected_FDR_worstbatchremoved_ctrls[i])))
#fh_FDR_w.writelines("\t%s" %(str(thresholds_Pvals_worstbatchremoved_ctrls[i])))
fh_FDR_w.writelines("\t%s" %(str(counts_rejected_FDR_allbatches_cases[i])))
fh_FDR_w.writelines("\t%s" %(str(thresholds_Pvals_allbatches_cases[i])))
fh_FDR_w.writelines("\t%s" %(str(counts_rejected_FDR_worstbatchremoved_cases[i])))
fh_FDR_w.writelines("\t%s\n" %(str(thresholds_Pvals_worstbatchremoved_cases[i])))
fh_FDR_w.close()
print("plot\n")
# plot results applying FDR thresholds
os.system("R --slave --args %s %s < %s" \
%(HFresults_file + ".FDRthresholds.SNPQCII",\
str(FDR_index_remove_variants+1), \
plotscript))
```
#### File: lib/all_scripts/all_common.py
```python
import os
import sys
from os.path import *
import ConfigParser
from optparse import *
from copy import deepcopy
from operator import itemgetter
from misc_tools import *
from file_tools import *
from job_submission import *
####################################################################
#
# Main classes and functions
#
####################################################################
ALL_POSSIBLE_SWITCHES = []
class Parameter_Set:
def __init__(self, dict_of_params):
self.__p__ = {}
for d in dict_of_params:
self.__p__[d] = deepcopy(dict_of_params[d])
#
# simple getters
#
def __getitem__(self, key):
self.__getattr__(key)
def __getattr__(self, key):
#
# Any special prefix?
#
if key[0:4] == "SET_":
k = key[4:]
if self.__p__[k] == None:
return False
else:
return True
else:
try:
return self.__p__[key]
except KeyError:
return None
def get_parameter_sets(parameters,options, description, switch_file):
parse_possible_switches(switch_file)
aop = all_option_parsing(parameters, options, description, "")
return_list = []
for a in aop:
return_list.append(Parameter_Set(aop[a]))
return return_list
def parse_possible_switches(file):
global ALL_POSSIBLE_SWITCHES
try:
config = ConfigParser.SafeConfigParser()
config.read(realpath(file))
#
# Build list of all "Possible_Switch"es
#
for sec in config.sections():
dic = {}
dic.update(config.items(sec))
p = Possible_Switch(sec, eval(dic['value_as_list']), dic['optparse_type'], dic['optparse_metavar'], dic['optparse_help'], eval(dic['default_value']), eval(dic['optparse_choices']), str(dic['short_version']), eval(dic['shell_variable']), eval(dic['ignore_params_file']))
ALL_POSSIBLE_SWITCHES.append(p)
except ConfigParser.InterpolationError:
abort("Error in configuration file while trying to substitute params.\n" + str(sys.exc_info()[1]))
except ConfigParser.ParsingError:
abort("Error in configuration file.\n" + str(sys.exc_info()[1]))
class Possible_Switch:
def __init__(self, switch_name, value_as_list, optparse_type, optparse_metavar, optparse_help, default_value, optparse_choices, short_version, shell_variable, ignore_params_file):
# TODO: checking for params
self.switch_name = switch_name
self.value_as_list = value_as_list
self.optparse_type = optparse_type
self.optparse_metavar = optparse_metavar
self.optparse_help = optparse_help
self.optparse_choices = optparse_choices
self.short_version = short_version
self.shell_variable = shell_variable
self.default_value = default_value
#if default_value:
# print "switch:", switch_name, "has a default value of", default_value
self.ignore_params_file = ignore_params_file
####################################################################
#
# Other functions
#
####################################################################
def merge_opts_and_parameter_files(p_file, defaults_from_command_line, defaults_from_shell_variables):
"""
PARAMS:
- parameter file to read
RETURNS
- A dict of dicts merged parameters for all samples
"""
# If there is a config file in the home directory, get the "first set of defaults" from there
try:
first_defaults = {}
config = ConfigParser.SafeConfigParser(defaults_from_shell_variables)
config.optionxform = str
config.read(p_file)
dfcl = eval(str(defaults_from_command_line))
#
# A "false" from the command line means a --option has not been set!
# So change the value to a None
#
for d in dfcl:
if type(dfcl[d]) == type(False):
if dfcl[d] == False:
dfcl[d] = None
result_dict = {}
def special_sort(x,y):
try:
xx = float (x)
try:
yy = float (y)
if xx < yy:
return -1
elif xx > yy:
return 1
else:
return 0
except ValueError:
return -1
except ValueError:
try:
yy = float(y)
return 1
except ValueError:
if x < y:
return -1
elif x > y:
return 1
else:
return 0
sections = config.sections()
sections.sort(special_sort)
for sec in sections:
dic = {}
dic.update(config.items(sec))
for key in dfcl:
value = dfcl[key]
if not value == None:
dic[key.lower()]=value
result_dict[sec]= dic
return result_dict, config.defaults()
except ConfigParser.InterpolationError:
abort("Error in configuration file while trying to substitute params.\n" + str(sys.exc_info()[1]))
except ConfigParser.ParsingError:
abort("Error in configuration file.\n" + str(sys.exc_info()[1]))
def parse_all_parameter_sets(all_pars, req_pars, req_samples, instance_wide_defaults):
"""
RETURN:
A dict of parameter sets. Maybe this sets are modifed by other sets
(e.g. with "COMMAND init_set1,mod_set1,mod_set2,... ")
"""
#
# What are requested and what are modifying sets?
# Create set_modifcations!
#
global_modifying_sets = []
set_modifications = []
for r in req_samples:
parts = r.split(",")
head = parts[0]
if len(parts) == 1:
set_modifications.append([head])
else:
tail = parts[1:]
if head == "":
global_modifying_sets.extend(tail)
else:
set_modifications.append(parts)
#
# If there are globally modifying sets,
# append them to existens sets...
#
if len(global_modifying_sets) > 0:
for sm in set_modifications:
sm.extend(global_modifying_sets)
#
# Check if all requested and modifying sets exists
#
unfound_requested_sets = []
unfound_modifying_sets = []
for sm in set_modifications:
if not sm[0] in all_pars:
if not sm[0] in unfound_requested_sets:
unfound_requested_sets.append(sm[0])
if len(sm) > 1:
for mod_set in sm[1:]:
if not mod_set in all_pars:
if not mod_set in unfound_modifying_sets:
unfound_modifying_sets.append(mod_set)
abort_string = ""
if len(unfound_requested_sets):
abort_string = "\nUnable to find these requested sets: " + str(unfound_requested_sets)
if len(unfound_modifying_sets):
abort_string += "\nUnable to find these modifying sets: " + str(unfound_modifying_sets)
if not abort_string == "":
abort(abort_string)
#
# Now, go through the requested sets
#
# The resultig dict
result={}
for sm in set_modifications:
# head and tail of each set_modification
head = sm[0]
tail = []
if len(sm) > 1:
tail = sm[1:]
#
# the name of the resulting parameter set
#
result_set_name = head
for par_set_name in tail:
result_set_name += "," + par_set_name
#
# Start with the head
#
result[result_set_name] = deepcopy(all_pars[head])
#
# Now go through the modifying sets of the tail
#
for par_set_name in tail:
par_set = all_pars[par_set_name]
for par_set_key in par_set:
if par_set_key == "force" or par_set_key in instance_wide_defaults:
pass
else:
par_set_value = par_set[par_set_key]
result[result_set_name][par_set_key] = par_set_value
return result
def all_option_parsing(par_parameters,par_options, p_description, p_default_param_file ):
"""
PARAMS:
par_parameters: list of required list of params
par_options: list of optional parameters
p_description: description of the calling script for optparse
p_default_param_file: name of the default parameter file to look for
RETURN:
TODO
"""
p_parameters = []
for pp in par_parameters:
if not pp in p_parameters:
p_parameters.append(pp)
p_options = []
for op in par_options:
if not op in p_options:
p_options.append(op)
parameters_and_options = p_parameters + p_options
#
# prepare and start the parser
#
script_name = basename(sys.argv[0])
#
# A dict with all possible switches
#
aps = {}
for s in ALL_POSSIBLE_SWITCHES:
aps[s.switch_name] = s
default_description = "The parameter sets will be read from PARAM_SRC and can also be set/overridden using command parameters and options. PARAM_SRC can be a file or directory, where a file named '" + p_default_param_file + "' is expected."
parser = OptionParser(script_name + " [options] PARAM_SRC [ set1, set2, set3 ... ]", description=p_description + "\t " + default_description, formatter=TitledHelpFormatter())
def add_switch_to_option_group(s, g):
help_temp = s.optparse_help
#if not help_temp[-1] in ".:,;()":
# help_temp += "."
if s.optparse_choices:
help_temp += " CHOICES: " + str(s.optparse_choices[0])
for c in s.optparse_choices[1:]:
help_temp += "," + str(c)
if s.default_value:
help_temp = help_temp + " \t\tDEFAULT: " + str(s.default_value)
if s.optparse_type == "boolean":
g.add_option(s.short_version, "--" + s.switch_name, action="store_true", default=False, help=help_temp)
elif s.optparse_type == "explict_boolean":
g.add_option(s.short_version, "--" + s.switch_name, metavar="[Yes|No]", help=help_temp, choices=["Yes","No"])
else:
#print "switch_name", s.switch_name
#g.add_option(s.short_version, "--" + s.switch_name, type=s.optparse_type, metavar=s.optparse_metavar, help=help_temp, choices=s.optparse_choices, default=s.default_value)
g.add_option(s.short_version, "--" + s.switch_name, type=s.optparse_type, metavar=s.optparse_metavar, help=help_temp, choices=s.optparse_choices)
#
# Are there switches requested as parameters?
#
if len(p_parameters) > 0:
# sort & unqify req. params
p_parameters = sorted(list(set(p_parameters)))
group_params = OptionGroup(parser, "PARAMETERS (required)", "The required parameters are usually they are defined by the parameter file specified by PARAM_SRC, but they can be overridden using options on the command line:")
group_params_any_switches = False
group_shell_params = OptionGroup(parser, "Shell PARAMETERS (required)", "These params are read from shell variables - if existing. The are overridden by params read from parameter file, which can themselves can be overruled using them as command line options.")
group_shell_params_any_switches = False
for xxx in p_parameters:
po = aps[xxx]
if po.shell_variable:
add_switch_to_option_group(po, group_shell_params)
group_shell_params_any_switches = True
else:
add_switch_to_option_group(po, group_params)
group_params_any_switches = True
if group_params_any_switches:
parser.add_option_group(group_params)
if group_shell_params_any_switches:
parser.add_option_group(group_shell_params)
#
# Are there switches requested as options?
#
parser.add_option("--force", action="store_true", help="In case of no given args forces to start with the standard options without asking.")
if len(p_options) > 0:
group_shell_options = OptionGroup(parser, "Shell OPTIONS" ,"Options to be read from shell variables.")
group_shell_options_any_switches = False
for xxx in p_options:
po = aps[xxx]
if po.shell_variable:
add_switch_to_option_group(po, group_shell_options)
group_shell_options_any_switches = True
else:
#add_switch_to_option_group(po, group_options)
add_switch_to_option_group(po, parser)
if group_shell_options_any_switches:
parser.add_option_group(group_shell_options)
#
# Now the magic moment: Parse the args!
#
opts, args = parser.parse_args()
#
# Is any option set that prevents reading the config file with param sets?
# (Ist ein bisschen durch die Brust ins Auge...)
#
cmd_line_dict = eval(str(opts))
for pao in parameters_and_options:
if aps[pao].ignore_params_file:
# Ok, this a switch that could prevent reading the file.
# Is it set?
if cmd_line_dict[pao]:
#
# Build a result list just with the args and without reading
# from a parameters sets files
#
d = {}
for pao in parameters_and_options:
d[pao] = cmd_line_dict[pao]
cmd_line_params = {"DUMMY" : d}
return cmd_line_params
#
# Really start with all standard args (if nothing is specified)?
#
# Check for args beginning with ,
modifying_sets_as_args = False
requested_sets_as_args = False
for a in args:
if a.startswith (","):
modifying_sets_as_args = True
else:
requested_sets_as_args =True
if opts.force and requested_sets_as_args:
abort("Either specify sets of params or use the '--force' option to choose all 'standard' sets of parameters.")
if not requested_sets_as_args and not opts.force:
print
print "No sets of parameters are specified."
if modifying_sets_as_args:
print "(Modifying parameters sets are given.)"
print
s = raw_input("Do you really want to start with all 'standard' sets of parameters? [y/n]:")
if s.lower() not in ("y", "yes"):
abort("For help start with option '-h")
#
# Define the TARGET parameter file name
#
# Without any args, look in the local directory for a config file
# If the first arg is nothing readable (like a sample number ;-)
# try also the local directory
if len(args) == 0 or not exists(args[0]):
args.insert(0, ".")
parameter_file=""
if isdir(args[0]):
temp = join(args[0], p_default_param_file)
if isfile(temp):
parameter_file=temp
else:
parser.error("No default parameter file '" + p_default_param_file + "' found in " + args[0])
else:
if isfile(args[0]):
parameter_file=args[0]
else:
parser.error("No default parameter named " + args[0] + "found.")
#
# If necessary try to read defaults from shell variables
#
defaults_from_shell_variables = {}
for x in parameters_and_options :
pao = aps[x]
if pao.shell_variable:
try:
defaults_from_shell_variables[x.lower()] = os.environ[x]
except KeyError:
0
#
# Read all params from config file and merge/overide them with the comman line options
#
all_params, instance_wide_defaults = merge_opts_and_parameter_files(parameter_file, opts, defaults_from_shell_variables)
req_samples = []
try:
force_sets = instance_wide_defaults["opp.force-sets"].split(",")
except KeyError:
force_sets = []
requested_sets_present = False
if requested_sets_as_args:
for i in args[1:]:
req_samples.append(i)
requested_sets_present = True
else:
for ap in all_params:
try:
to_be_ignored = eval(all_params[ap]["ignore_this_sample"])
except KeyError:
to_be_ignored = True
if ap in force_sets:
to_be_ignored = False
if not to_be_ignored:
req_samples.append(ap)
requested_sets_present = True
for i in args[1:]:
if i.startswith(","):
req_samples.append(i)
if not requested_sets_present:
if len(req_samples) == 0:
abort("No sets specified (either in file or command line)")
else:
abort("Only modifying sets specified (either in file or command line)")
parameter_sets = parse_all_parameter_sets(all_params, p_parameters, req_samples, instance_wide_defaults)
#
# Final checks for all sets of parameters
#
abort_missing_string =""
abort_choices_string = ""
for ps_name in parameter_sets:
ps = parameter_sets[ps_name]
#
# 1. Are all requested parameters present?
#
for rp in p_parameters:
# Is there a not (yet) set parameter?
if rp not in ps:
# If the missing parameter is a possible shell variable ...
if aps[rp].shell_variable:
# ... get it from the environment
ps[rp] = os.environ[rp]
# Is the parameter still not set?
if rp not in ps:
# The use the switch default ...
if aps[rp].default_value:
ps[rp] = aps[rp].default_value
else:
# ... or write a "missing note"
abort_missing_string += " " + ps_name + ": '" + rp + "'\n"
#
# 2. Do the params comply to any given choices?
#
for pao in parameters_and_options :
if aps[pao].optparse_choices:
if not str(ps[pao]) in aps[pao].optparse_choices:
abort_choices_string += " " + ps_name + ": '" + pao + "' is '" + str(ps[pao]) + " 'instead one of: " + str(aps[pao].optparse_choices) + "\n"
#
# 3. Translate explicit boolean in True/False
#
for pao in parameters_and_options :
if aps[pao].optparse_type == "explict_boolean":
if ps[pao] == "Yes":
ps[pao] = True
else:
ps[pao] = False
#
# 4. In case of boolean transfer a non-boolean value into False
#
for pao in parameters_and_options :
if aps[pao].optparse_type == "boolean":
try:
if type(ps[pao]) != type(True):
temp = str(ps[pao]).strip()
if temp.lower() == "true" or temp == "":
ps[pao] = True
elif temp.lower() == "false":
ps[pao] = False
else:
abort_choices_string += "Option --" + pao + " in parameter file is set to: '" + temp + "'\n (Must either be 'False' or 'True' (or empty, which equals 'True'))"
except KeyError: # an optional boolean switch, that is not set
ps[pao] = False
#
# 5. If requested split values into lists
#
for pao in parameters_and_options:
if aps[pao].value_as_list:
try:
actual_type = type(ps[pao])
if not actual_type == type([]):
ps[pao] = ps[pao].split(",")
except:
pass
abort_string = ""
if abort_missing_string != "":
abort_string += "Missing parameters ...\n" + abort_missing_string
if abort_choices_string != "":
abort_string += "Parameter not one of the possible choices ...\n" + abort_choices_string
if abort_string != "":
abort(abort_string)
return parameter_sets
```
#### File: lib/all_scripts/misc_tools.py
```python
import string
from subprocess import call
import csv
import sys
from os.path import *
####################################################################
#
# COMMON FUNCTIONS
#
####################################################################
def read_table_from_csv(file_name, header_starts=["##hdr", "## hdr"], delimiter="\t"):
file = open(file_name )
reader = csv.reader(file, delimiter=delimiter)
result_list = []
column_names = None
for row in reader:
if row != []:
col1 = row[0].strip()
if col1 in header_starts:
# a header line
column_names = []
for cn in row[1:]:
temp = cn.strip()
if temp == "":
abort("Found an 'empty' column name in the header")
column_names.append(temp)
elif col1[0] != "#":
if column_names == None:
abort("Found a data line without any preceeding header line.")
if len(row) > len(column_names):
abort("Data line contains more entries than header line column names.")
# a data line
temp_row = []
for r in row:
temp_row.append(r.strip())
result_list.append( dict (zip ( column_names, temp_row) ) )
else:
pass # ignoring a comment
file.close()
return result_list
def all_members(aClass):
members = {}
bases = list(aClass.__bases__)
bases.reverse()
for base in bases:
members.update(all_members(base))
members.update(vars(aClass))
return members
__item_line_string__=""
def item_line(s="__item_line_none_value__"):
global __item_line_string__
if not s == "__item_line_none_value__":
__item_line_string__ +=str(s).strip() + " "
else:
if not __item_line_string__ == "":
print __item_line_string__
__item_line_string__ = ""
def print_underlined(s, c="-", d=1):
print " " * d + s
print c * (len(s) + d*2)
def print_header(s):
ss = str(s).split("\n")
textlen = 0
for s in ss:
textlen = max(textlen, len(s.strip()))
print
print
print "#" * (textlen + 8)
print "#" + " " * (textlen+6) + "#"
for s in ss:
print "# " + s + " " * (textlen - len(s) + 1) + " #"
print "#" + " " * (textlen+6) + "#"
print "#" * (textlen + 8)
print
def print_sub_header(s):
print
text = " " + str(s).strip() + " "
textlen = len(text)
print "#" * (textlen + 2)
print "#" + text
print "#" * (textlen + 2)
print
def abort(s = "Undefinied reason !", where=None):
if where:
print
print "Aborting script in Class:", str(where.__class__)
print s
print
else:
print
print "Aborting script:"
print s
print
sys.exit (1)
####################################################################
#
# Classe for easy shell execution
#
####################################################################
class Command:
def __init__(self, cmd):
self.__cmd = str(cmd)
self.__args = []
def add(self, arg):
self.adds("",arg)
def adds(self, switch, arg):
a = string.strip(str(arg))
if " " in a:
a = '"' +a + '"'
if self.__args == None:
self.__args = [switch, a]
else:
self.__args.append( str (switch) + " " + a)
def addj(self, *parts):
self.add( join(*parts))
def set_cmd(self,cmd):
self.__cmd = str(cmd)
def get(self):
return str(self.__cmd) + " " + string.join(join (self.__args), " ")
def reset(self):
self.__cmd = None
self.__args = None
def run(self, ignore_return = False):
if self.__cmd == None:
abort("Shell Command must contain at least a command to run.")
ret = call(self.get(), shell=True)
if ret and not ignore_return:
abort ("Error while running following shell command:\n\n" + self.get())
return ret
```
#### File: lib/lib/GCTA_mlma_classes.py
```python
import os
import os.path
import sys
import re
import gzip
from copy import deepcopy
class Mlma:
""" class Mlma implements tasks on mlma output files """
def __init__(self, mlma_file, write_file=None, write_file_status_append=False):
""" init """
self._mlma_file = mlma_file # name of mlma file
self._write_file = write_file # name of output file
self._write_file_status_append = write_file_status_append # status of write file: new or append
self._snps_order = [] # order of SNPs from mlma file
# only typed snps not imputed
self._typed = {}
# typed and/or imputed snps
self._typed_imputed = {}
self._snps_order_perchr = [] # order of SNPs from mlma file per chr, list of chr-lists
# for each chr a list, index 0 -> chr0, index 1 -> chr1, ..., index 26 -> chr26
for i in xrange(27):
self._snps_order_perchr.append([])
def map_mlma(self, typed=False):
""" map p-values from mlma file into memory """
try:
fh = file(self._mlma_file,"r")
except IOError, e:
print e
sys.exit(1)
typed_pattern = re.compile("^.*_typed.*$")
imputed_pattern = re.compile("^.*_imputed.*$")
line = fh.readline()
while line:
list = re.split("\s+",line)
# delete empty elements
if list[0] == "":
del list[0]
if list[-1] == "":
del list[-1]
rs = str(list[1])
self._snps_order.append(rs.replace("_typed","").replace("_imputed",""))
# typed only
if typed:
self._typed[rs.replace("_typed","").replace("_imputed","")] = (list[8], "typed")
# typed and/or imputed
else:
if typed_pattern.search(rs):
self._typed_imputed[rs.replace("_typed","")] = (list[8], "typed")
else:
self._typed_imputed[rs.replace("_imputed","")] = (list[8], "imputed")
line = fh.readline()
fh.close()
def free_map(self):
""" free mapped mlma file from memory """
self._snps_order = []
self._typed = {}
self._typed_imputed = {}
def rs_get_typed_hash(self):
""" get the typed rs numbers from file in hash """
return deepcopy(self._typed)
```
#### File: lib/lib/gff_classes.py
```python
import os
import os.path
import sys
import re
import gzip
class Gff_snps:
""" class Gff_snps implements tasks on GFF files with snps """
def __init__(self, gff_file, write_file=None, write_file_status_append=False):
""" init """
self._gff_file = gff_file # name of gff file
self._write_file = write_file # name of output file
self._write_file_status_append = write_file_status_append # status of write file: new or append
self._snps_order = [] # order of SNPs from gff file
# only novel snps
self._novel = {} # gff snp : chr, genetic_distance, bp_position, allele1, allele2
# novel and/or known snps
self._novel_known = {} # gff rs : chr, genetic_distance, bp_position, allele1, allele2
self._snps_order_perchr = [] # order of SNPs from gff file per chr, list of chr-lists
# for each chr a list, index 0 -> chr1, index 1 -> chr2, etc.
for i in xrange(26):
self._snps_order_perchr.append([])
def write_file_set(self, write_file):
""" set name of output file """
self._write_file = write_file
def write_file_set_append(self):
""" switch to append mode """
self._write_file_status_append = True
def map_refAllele(self, novel=False):
""" map GFF_snps file into memory """
try:
fh = file(self._gff_file,"r")
except IOError, e:
print e
sys.exit(1)
line = fh.readline()
while line:
list = re.split("\s+",line)
chr = list[0].replace("chr","")
if chr == "X":
chr = "23"
elif chr == "Y":
chr = "24"
elif chr == "XY":
chr = "25"
elif chr == "M":
chr = "26"
assert(list[2] == list[3])
pos = list[3]
feature_list = list[6:]
if feature_list[-1] == "":
feature_list.pop()
# possible features
feature_ref = re.compile("^ref=")
refAllele = None
for feature in feature_list:
if feature_ref.search(feature):
refAllele = feature.split("ref=")[1].split(";")[0]
key = chr +"->"+ pos
self._snps_order.append(key)
# novel only
if novel:
self._novel[key] = refAllele
# novel and/or known
else:
self._novel_known[key] = refAllele
line = fh.readline()
fh.close()
def map_source_list(self, novel=False):
""" map GFF_snps file into memory """
try:
fh = file(self._gff_file,"r")
except IOError, e:
print e
sys.exit(1)
line = fh.readline()
while line:
list = re.split("\s+",line)
chr = list[0].replace("chr","")
if chr == "X":
chr = "23"
elif chr == "Y":
chr = "24"
elif chr == "XY":
chr = "25"
elif chr == "M":
chr = "26"
assert(list[2] == list[3])
pos = list[3]
feature_list = list[6:]
if feature_list[-1] == "":
feature_list.pop()
# possible features
feature_sources = re.compile("^sources=")
sources_list = []
for feature in feature_list:
if feature_sources.search(feature):
sources_list = feature.split("sources=")[1].split(";")[0].split(",")
key = chr +"->"+ pos
self._snps_order.append(key)
# novel only
if novel:
# if additional sources
if self._novel.has_key(key):
for source in sources_list:
self._novel[key].append(source)
# if new sources
else:
self._novel[key] = sources_list
# novel and/or known
else:
# if additional sources
if self._novel_known.has_key(key):
for source in sources_list:
self._novel_known[key].append(source)
# if new sources
else:
self._novel_known[key] = sources_list
line = fh.readline()
fh.close()
def free_map(self):
""" free mapped Gff_snp file from memory """
self._snps_order = []
self._novel = {}
self._novel_known = {}
def novel_known_get_feature_hash(self):
""" get the feature for novel and/or known snps from gff file in hash """
return self._novel_known
``` |
{
"source": "jkafader-esnet/pypond",
"score": 2
} |
#### File: pypond/pypond/bases.py
```python
import logging
import os
import time
import types
import warnings
def setup_log(log_path=None): # pragma: no cover
"""
Usage:
_log('main.start', 'happy simple log event')
_log('launch', 'more={0}, complex={1} log=event'.format(100, 200))
"""
# pylint: disable=redefined-variable-type
logger = logging.getLogger("pypond")
if not log_path:
handle = logging.StreamHandler()
else:
# it's on you to make sure log_path is valid.
logfile = '{0}/pypond.log'.format(log_path)
handle = logging.FileHandler(logfile)
handle.setFormatter(logging.Formatter('ts=%(asctime)s %(message)s'))
logger.addHandler(handle)
logger.setLevel(logging.INFO)
return logger
log = setup_log() # pylint: disable=invalid-name
def _log(event, msg): # pragma: no cover
log.info('event=%s id=%s %s', event, int(time.time()), msg)
class PypondBase(object): # pylint: disable=too-few-public-methods
"""
Universal base class. Used to provide common functionality (logging, etc)
to all the other classes.
"""
__slots__ = ('_logger',)
def __init__(self):
"""ctor"""
self._logger = _log
def _log(self, event, msg='', format_args=tuple()): # pragma: no cover
"""Log events if environment variable PYPOND_LOG is set.
Parameters
----------
event : str
The event - ie: 'init.start' and etc.
msg : str
The log message
format_args : tuple
The args to format. This is to keep objects from being stringified
in production which is a performance drag.
"""
if 'PYPOND_LOG' in os.environ:
self._logger(event, msg.format(*format_args))
def _warn(self, msg, warn_type): # pylint: disable=no-self-use
"""Issue a python warning.
Parameters
----------
msg : str
The warning message
warn_type : Exception subclass
Custom warning from pypond.exceptions.
"""
warnings.warn(msg, warn_type, stacklevel=2)
@staticmethod
def _field_path_to_array(fspec):
"""Split the field spec if it is not already a list.
Also, allow for deep fields to be passed in as a tuple because
it will need to be used as a dict key in some of the processor
Options.
This is deployed in Event.get() to process anything passed
to it, but this should also be deployed "upstream" to avoid
having that split() done over and over in a loop.
"""
if isinstance(fspec, list) or isinstance(fspec, types.FunctionType):
# corner case, but Event.map() takes field_spec as a
# function, so let that pass through just in case.
return fspec
elif isinstance(fspec, tuple):
return list(fspec)
elif isinstance(fspec, str):
return fspec.split('.')
if fspec is None:
return ['value']
# base classes for pipeline sources, etc
class Observable(PypondBase):
"""
Base class for objects in the processing chain which
need other object to listen to them. It provides a basic
interface to define the relationships and to emit events
to the interested observers.
"""
def __init__(self):
super(Observable, self).__init__()
self._observers = list()
def emit(self, event):
"""add event to observers."""
for i in self._observers:
i.add_event(event)
def flush(self):
"""flush observers."""
self._log('Observable.flush')
for i in self._observers:
if hasattr(i, 'flush'):
self._log('Observable.flush')
i.flush()
def add_observer(self, observer):
"""add an observer if it does not already exist."""
self._log('Observable.add_observer', 'obs: {0}', (observer,))
should_add = True
for i in self._observers:
if i == observer:
should_add = False
if should_add:
self._observers.append(observer)
def has_observers(self):
"""does the object have observers?"""
return bool(len(self._observers) > 0)
```
#### File: pypond/pypond/exceptions.py
```python
class EventException(Exception):
"""Custom Event exception"""
def __init__(self, value):
# pylint: disable=super-init-not-called
self.value = value
def __str__(self): # pragma: no cover
return repr(self.value)
class EventWarning(Warning):
"""Custom Event warning"""
pass
class TimeRangeException(Exception):
"""Custom TimeRange exception"""
def __init__(self, value):
# pylint: disable=super-init-not-called
self.value = value
def __str__(self): # pragma: no cover
return repr(self.value)
class TimeRangeWarning(Warning):
"""Custom TimeRange warning"""
pass
class IndexException(Exception):
"""Custom Index exception"""
def __init__(self, value):
# pylint: disable=super-init-not-called
self.value = value
def __str__(self): # pragma: no cover
return repr(self.value)
class IndexWarning(Warning):
"""Custom Index warning"""
pass
class UtilityException(Exception):
"""Custom Utility exception"""
def __init__(self, value):
# pylint: disable=super-init-not-called
self.value = value
def __str__(self): # pragma: no cover
return repr(self.value)
class UtilityWarning(Warning):
"""Custom Utility warning"""
pass
class PipelineException(Exception):
"""Custom Pipeline exception"""
def __init__(self, value):
# pylint: disable=super-init-not-called
self.value = value
def __str__(self): # pragma: no cover
return repr(self.value)
class PipelineWarning(Warning):
"""Custom Pipeline warning"""
pass
class PipelineIOException(Exception):
"""Custom PipelineIO exception"""
def __init__(self, value):
# pylint: disable=super-init-not-called
self.value = value
def __str__(self): # pragma: no cover
return repr(self.value)
class PipelineIOWarning(Warning):
"""Custom PipelineIO warning"""
pass
class CollectionException(Exception):
"""Custom Collection exception"""
def __init__(self, value):
# pylint: disable=super-init-not-called
self.value = value
def __str__(self): # pragma: no cover
return repr(self.value)
class CollectionWarning(Warning):
"""Custom Collection warning"""
pass
class TimeSeriesException(Exception):
"""Custom TimeSeries exception"""
def __init__(self, value):
# pylint: disable=super-init-not-called
self.value = value
def __str__(self): # pragma: no cover
return repr(self.value)
class TimeSeriesWarning(Warning):
"""Custom TimeSeries warning"""
pass
class ProcessorException(Exception):
"""Custom Processor exception"""
def __init__(self, value):
# pylint: disable=super-init-not-called
self.value = value
def __str__(self): # pragma: no cover
return repr(self.value)
class ProcessorWarning(Warning):
"""Custom Processor warning"""
pass
class FilterException(Exception):
"""Custom Filter exception"""
def __init__(self, value):
# pylint: disable=super-init-not-called
self.value = value
def __str__(self): # pragma: no cover
return repr(self.value)
class FilterWarning(Warning):
"""Custom Filter warning"""
pass
class FunctionException(Exception):
"""Custom Function exception"""
def __init__(self, value):
# pylint: disable=super-init-not-called
self.value = value
def __str__(self): # pragma: no cover
return repr(self.value)
class FunctionWarning(Warning):
"""Custom Function warning"""
pass
NAIVE_MESSAGE = 'non-naive (aware) datetime objects required'
```
#### File: pypond/pypond/index.py
```python
import copy
import datetime
import re
from .bases import PypondBase
from .exceptions import IndexException, IndexWarning
from .range import TimeRange
from .util import (
aware_dt_from_args,
dt_from_ms,
localtime_from_ms,
localtime_info_from_utc,
monthdelta,
ms_from_dt,
sanitize_dt,
)
UNITS = dict(
s=dict(label='seconds', length=1),
m=dict(label='minutes', length=60),
h=dict(label='hours', length=3600),
d=dict(label='days', length=86400),
)
class Index(PypondBase):
"""
An index that represents as a string a range of time. That range may either
be in UTC or local time. UTC is the default.
The actual derived timerange can be found using asRange(). This will return
a TimeRange instance.
The original string representation can be found with toString(). A nice
version for date based indexes (e.g. 2015-03) can be generated with
toNiceString(format) (e.g. March, 2015).
The index string arg will may be of two forms:
- 2015-07-14 (day)
- 2015-07 (month)
- 2015 (year)
or:
- 1d-278 (range, in n x days, hours, minutes or seconds)
Parameters
----------
s : str
The index string in one of the aforementioned formats.
utc : bool, optional
Index interpreted as UTC or localtime. Please don't set this to false
since non-UTC times are the devil.
Raises
------
IndexException
Raised if arg s could not be translated into a valid timerange/index.
"""
def __init__(self, s, utc=True):
"""Create the Index.
"""
super(Index, self).__init__()
self._utc = utc
self._string = s
# keep track of what kind of index it is to simplify other things.
self._index_type = None
self._timerange = self.range_from_index_string(self._string, self._utc)
if self._index_type is None:
raise IndexException('could not determine timerange/index type from {arg}'.format(
arg=s))
def to_json(self):
"""
Returns the Index as JSON, which will just be its string
representation
This is actually like json.loads(s) - produces the
actual data structure.
Returns
-------
str
The index string as previously outlined.
"""
return self._string
def to_string(self):
"""
Simply returns the Index as its string
In JS land, this is synonymous with __str__ or __unicode__
Returns
-------
str
The index string as previously outlined.
"""
return self._string
def to_nice_string(self, fmt=None):
"""
for the calendar range style Indexes, this lets you return
that calendar range as a human readable format, e.g. "June, 2014".
The format specified is a Moment.format.
Originally implemented at Util.niceIndexString in the JS source,
this is just a greatly simplified version using self._index_type.
Parameters
----------
fmt : str, optional
User can pass in a valid strftime() format string.
Returns
-------
str
FThe index text string as a formatted (strftime()) time.
"""
if fmt is not None and self._index_type in ('day', 'month', 'year'):
return self.begin().strftime(fmt)
if self._index_type == 'day':
return self.begin().strftime('%B %-d %Y')
elif self._index_type == 'index':
return self._string
elif self._index_type == 'month':
return self.begin().strftime('%B')
elif self._index_type == 'year':
return self.begin().strftime('%Y')
def as_string(self):
"""Alias for to_string()
Returns
-------
str
The index string as previously outlined.
"""
return self.to_string()
def as_timerange(self):
"""Returns the Index as a TimeRange
Returns
-------
TimeRange
The underlying time range object.
"""
return self._timerange
def begin(self):
"""Returns start date of the index.
Returns
-------
datetime.datetime
Start date of the index.
"""
return self.as_timerange().begin()
def end(self):
"""Returns end date of the index.
Returns
-------
datetime.datetime
End date of the index.
"""
return self.as_timerange().end()
def __str__(self):
"""call to_string()
Returns
-------
str
String representation of the object.
"""
return self.to_string()
@property
def utc(self):
"""accessor for internal utc boolean."""
return self._utc
# utility methods
def _local_idx_warning(self, local=False):
"""blanket warning to avoid if statements and make pylint happy."""
if local:
msg = 'year/month/day indexes will being coerced to UTC from localtime'
self._warn(msg, IndexWarning)
def range_from_index_string(self, idx_str, is_utc=True): # pylint: disable=too-many-locals, too-many-statements
"""
Generate the time range from the idx string.
The index string arg will may be of two forms:
- 2015-07-14 (day)
- 2015-07 (month)
- 2015 (year)
or:
- 1d-278 (range, in n x days, hours, minutes or seconds)
and return a TimeRange for that time. The TimeRange may be considered to be
local time or UTC time, depending on the utc flag passed in.
This was in src/util.js in the original project, but the only thing using
the code in that util.js was the Index class, and it makes more sense
having this as a class method and setting self._index_type makes further
regex analysis of the index unnecessary.
Parameters
----------
idx_str : str
The index string in one of the aformentioned formats
is_utc : bool, optional
Index interpreted as utc or localtime. Please don't use localtime.
Returns
-------
TimeRange
A time range made from the interpreted index string.
Raises
------
IndexException
Raised when the string format is determined to be invalid.
"""
parts = idx_str.split('-')
num_parts = len(parts)
begin_time = None
end_time = None
local = False if is_utc else True
if num_parts == 3:
# 2015-07-14 (day)
self._index_type = 'day'
try:
year = int(parts[0])
month = int(parts[1])
day = int(parts[2])
except ValueError:
msg = 'unable to parse integer year/month/day from {arg}'.format(arg=parts)
raise IndexException(msg)
self._local_idx_warning(local)
dtargs = dict(year=year, month=month, day=day)
begin_time = aware_dt_from_args(dtargs, localize=local)
end_time = (begin_time + datetime.timedelta(days=1)) - datetime.timedelta(seconds=1)
elif num_parts == 2:
range_re = re.match('([0-9]+)([smhd])', idx_str)
if range_re:
# 1d-278 (range, in n x days, hours, minutes or seconds)
self._index_type = 'index'
try:
pos = int(parts[1]) # 1d-278 : 278
num = int(range_re.group(1)) # 1d-278 : 1
except ValueError:
msg = 'unable to parse valid integers from {s}'.format(s=idx_str)
msg += ' tried elements {pos} and {num}'.format(
pos=parts[1], num=range_re.group(1))
raise IndexException(msg)
unit = range_re.group(2) # 1d-278 : d
# num day/hr/etc units * seconds in that unit * 1000
length = num * UNITS[unit].get('length') * 1000
# pos * length = ms since epoch
begin_time = dt_from_ms(pos * length) if is_utc else \
localtime_from_ms(pos * length)
# (pos + 1) * length is one hour/day/minute/etc later
end_time = dt_from_ms((pos + 1) * length) if is_utc else \
localtime_from_ms((pos + 1) * length)
else:
# 2015-07 (month)
self._index_type = 'month'
try:
year = int(parts[0])
month = int(parts[1])
except ValueError:
msg = 'unable to parse integer year/month from {arg}'.format(arg=parts)
raise IndexException(msg)
self._local_idx_warning(local)
dtargs = dict(year=year, month=month, day=1)
begin_time = aware_dt_from_args(dtargs, localize=local)
end_time = monthdelta(begin_time, 1) - datetime.timedelta(seconds=1)
elif num_parts == 1:
# 2015 (year)
self._index_type = 'year'
try:
year = int(parts[0])
except ValueError:
msg = 'unable to parse integer year from {arg}'.format(arg=parts[0])
raise IndexException(msg)
self._local_idx_warning(local)
dtargs = dict(year=year, month=1, day=1)
begin_time = aware_dt_from_args(dtargs, localize=local)
end_time = begin_time.replace(year=year + 1) - datetime.timedelta(seconds=1)
if begin_time and end_time:
return TimeRange(begin_time, end_time)
else:
return None
# Static class methods
# The two window_* methods were in util.js in the pond source but
# they were only being called from this code, so here they are.
@staticmethod
def window_duration(win):
"""duration in ms given a window duration string.
previously: Generator.getLengthFromSize.
Parameters
----------
win : str
An index string in the previously mentioned 1d-278 style format.
Returns
-------
int
Duration of the index/range in ms.
"""
range_re = re.match('([0-9]+)([smhd])', win)
if range_re:
# normally would try/except, but the regex ensures it'll be a number
num = int(range_re.group(1))
unit = range_re.group(2)
return num * UNITS[unit].get('length') * 1000
else:
return None
@staticmethod
def window_position_from_date(win, dtime):
"""window position from datetime object. Called by get_index_string_list().
previously: Generator.getBucketPosFromDate
Parameters
----------
win : str
Prefix if the index string.
dtime : datetime.datetime
Datetime to calculate suffix from.
Returns
-------
int
The suffix for the index string.
"""
duration = Index.window_duration(win)
ddms = ms_from_dt(sanitize_dt(dtime))
return int(ddms / duration)
@staticmethod
def get_index_string(win, dtime):
"""Return the index string given an index prefix and a datetime
object. Example usage follows.
::
dtime = aware_dt_from_args(
dict(year=2015, month=3, day=14, hour=7, minute=32, second=22))
idx_str = Index.get_index_string('5m', dtime)
self.assertEqual(idx_str, '5m-4754394')
previously: Generator.bucketIndex
Parameters
----------
win : str
Prefix of the index string.
dtime : datetime.datetime
Datetime to generate index string from.
Returns
-------
str
The index string.
"""
pos = Index.window_position_from_date(win, dtime)
return '{win}-{pos}'.format(win=win, pos=pos)
@staticmethod
def get_index_string_list(win, timerange):
"""Given the time range, return a list of strings of index values
every <prefix> tick. Example usage follows (from test suite).
::
dtime_1 = aware_dt_from_args(
dict(year=2015, month=3, day=14, hour=7, minute=30, second=0))
dtime_2 = aware_dt_from_args(
dict(year=2015, month=3, day=14, hour=8, minute=29, second=59))
idx_list = Index.get_index_string_list('5m', TimeRange(dtime_1, dtime_2))
self.assertEqual(len(idx_list), 12)
self.assertEqual(idx_list[0], '5m-4754394')
self.assertEqual(idx_list[-1], '5m-4754405')
previously: Generator.bucketIndexList
Parameters
----------
win : str
Prefix of the index string.
timerange : TimeRange
Time range object to generate index string from
Returns
-------
list
A list of strings of index values at every "tick" in the range
specified.
"""
pos1 = Index.window_position_from_date(win, timerange.begin())
pos2 = Index.window_position_from_date(win, timerange.end())
idx_list = list()
if pos1 <= pos2:
pos = copy.copy(pos1)
while pos <= pos2:
idx_list.append('{win}-{pos}'.format(win=win, pos=pos))
pos += 1
return idx_list
@staticmethod
def get_daily_index_string(date, utc=True):
"""Generate an index string with day granularity.
Parameters
----------
date : datetime.datetime
An aware UTC datetime object
utc : bool, optional
Render the index in local time this is used for display purposes
to render charts in a localized way.
Returns
-------
string
The formatted index string.
"""
year = date.year if utc else localtime_info_from_utc(date).get('year')
month = date.strftime('%m') if utc else localtime_info_from_utc(date).get('month')
day = date.strftime('%d') if utc else localtime_info_from_utc(date).get('day')
return '{y}-{m}-{d}'.format(y=year, m=month, d=day)
@staticmethod
def get_monthly_index_string(date, utc=True):
"""Generate an index string with month granularity.
Parameters
----------
date : datetime.datetime
An aware UTC datetime object
utc : bool, optional
Render the index in local time this is used for display purposes
to render charts in a localized way.
Returns
-------
string
The formatted index string.
"""
year = date.year if utc else localtime_info_from_utc(date).get('year')
month = date.strftime('%m') if utc else localtime_info_from_utc(date).get('month')
return '{y}-{m}'.format(y=year, m=month)
@staticmethod
def get_yearly_index_string(date, utc=True):
"""Generate an index string with year granularity.
Parameters
----------
date : datetime.datetime
An aware UTC datetime object
utc : bool, optional
Render the index in local time this is used for display purposes
to render charts in a localized way.
Returns
-------
string
The formatted index string.
"""
year = date.year if utc else localtime_info_from_utc(date).get('year')
return '{y}'.format(y=year)
```
#### File: pypond/pypond/pipeline.py
```python
from pyrsistent import pmap
from .bases import PypondBase
from .event import Event
from .exceptions import PipelineException, PipelineWarning
from .indexed_event import IndexedEvent
from .io.input import Bounded, Stream
from .io.output import CollectionOut, EventOut
from .processor import (
Aggregator,
Align,
Collapser,
Converter,
Filler,
Filter,
Mapper,
Offset,
Processor,
Rate,
Selector,
Taker,
)
from .series import TimeSeries
from .timerange_event import TimeRangeEvent
from .util import is_pmap, Options, is_function, Capsule
class Runner(PypondBase): # pylint: disable=too-few-public-methods
"""
A runner is used to extract the chain of processing operations
from a Pipeline given an Output. The idea here is to traverse
back up the Pipeline(s) and build an execution chain.
When the runner is started, events from the "in" are streamed
into the execution chain and outputed into the "out".
Rebuilding in this way enables us to handle connected pipelines:
::
|--
in --> pipeline ---.
|----pipeline ---| -> out
The runner breaks this into the following for execution:
::
_input - the "in" or from() bounded input of
the upstream pipeline
_processChain - the process nodes in the pipelines
leading to the out
_output - the supplied output destination for
the batch process
NOTE: There's no current way to merge multiple sources, though
a time series has a TimeSeries.merge() static method for
this purpose.
Parameters
----------
pipeline : Pipeline
The pipeline to run.
output : PipelineOut
The output driving this runner
"""
def __init__(self, pline, output):
"""Create a new batch runner"""
super(Runner, self).__init__()
self._log('Runner.init')
self._pipeline = pline
self._output = output
self._input = None
self._execution_chain = list()
# We use the pipeline's chain() function to walk the
# DAG back up the tree to the "in" to:
# 1) assemble a list of process nodes that feed into
# this pipeline, the processChain
# 2) determine the _input
#
# NOTE: we do not currently support merging, so this is
# a linear chain.
process_chain = list()
if self._pipeline.last() is not None:
process_chain = self._pipeline.last().chain()
self._input = process_chain[0].pipeline().input()
else:
self._input = self._pipeline.input()
# Using the list of nodes in the tree that will be involved in
# our processing we can build an execution chain. This is the
# chain of processor clones, linked together, for our specific
# processing pipeline. We run this execution chain later by
# evoking start().
self._execution_chain = [self._output]
prev = self._output
for i in process_chain:
if isinstance(i, Processor):
processor = i.clone()
if prev is not None:
processor.add_observer(prev)
self._execution_chain.append(processor)
prev = processor
def start(self, force=False):
"""Start the runner
Args:
force (bool, optional): force Flush at the end of the batch source
to cause any buffers to emit.
"""
self._log('Runner.start', 'starting')
# Clear any results ready for the run
self._pipeline.clear_results()
# The head is the first process node in the execution chain.
# To process the source through the execution chain we add
# each event from the input to the head.
head = self._execution_chain.pop()
for i in self._input.events():
head.add_event(i)
# The runner indicates that it is finished with the bounded
# data by sending a flush() call down the chain. If force is
# set to false (the default) this is never called.
if force is True:
self._log('Runner.start', 'flushing')
head.flush()
def default_callback(*args): # pylint: disable=unused-argument
"""Default no-op callback for group_by in the Pipeline constructor."""
return None
class Pipeline(PypondBase): # pylint: disable=too-many-public-methods
"""
Build a new Pipeline.
A pipeline manages a processing chain, for either batch or stream processing
of collection data.
The argument may be either:
- a Pipeline (copy ctor)
- a pyrsistent.PMap in which case the internal state will be constructed from the map.
Usually you would initialize a Pipeline using the factory function,
rather than this object directly.
Parameters
----------
arg : Pipeline, PMap, None
See above.
"""
def __init__(self, arg=None):
"""New pipeline."""
super(Pipeline, self).__init__()
# sorry pylint, that's just how it goes sometimes
# pylint: disable=invalid-name, protected-access
self._log('Pipeline.init')
if isinstance(arg, Pipeline):
self._d = arg._d
elif is_pmap(arg):
self._d = arg
else:
self._d = pmap(
dict(
type=None,
input=None, # renamed from 'in' in the JS source
first=None,
last=None,
group_by=default_callback,
window_type='global',
window_duration=None,
emit_on='eachEvent',
utc=True,
)
)
self._results = list()
self._results_done = False
# Accessors to the current Pipeline state
def input(self):
"""Originally called in() in JS code."""
return self._d.get('in')
def mode(self):
"""Get the pipeline mode (ie: batch, stream).
Returns
-------
str
The mode.
"""
return self._d.get('mode')
def first(self):
"""Get the first processor
Returns
-------
Processor
An pipeline processor.
"""
return self._d.get('first')
def last(self):
"""Get the last processor
Returns
-------
Processor
An pipeline processor.
"""
return self._d.get('last')
def get_window_type(self):
"""Get the window type (global, etc).
Returns
-------
str
The window type.
"""
return self._d.get('window_type')
def get_window_duration(self):
"""Get the window duration.
Returns
-------
str
A formatted window duration.
"""
return self._d.get('window_duration')
def get_group_by(self):
"""Get the group by callback.
Returns
-------
function
Returns the group by function.
"""
return self._d.get('group_by')
def get_emit_on(self):
"""Get the emit on (eachEvent, etc).
Returns
-------
str
The emit on string (discards, flush, etc).
"""
return self._d.get('emit_on')
def get_utc(self):
"""Get the UTC state..
Returns
-------
bool
In UTC or not.
"""
return self._d.get('utc')
# Results
def clear_results(self):
"""Clear the result state of this Pipeline instance."""
self._results = None
self._results_done = False
def add_result(self, arg1, arg2=None):
"""Add the incoming result from the processor callback.
Parameters
----------
arg1 : str
Collection key string.
arg2 : Collection or str
Generally the incoming collection.
"""
if self._results is None:
if isinstance(arg1, str) and arg2 is not None:
self._results = dict()
else:
self._results = list() # pylint: disable=redefined-variable-type
if isinstance(arg1, str) and arg2 is not None:
self._results[arg1] = arg2
else:
self._results.append(arg1)
self._results_done = False
def results_done(self):
"""Set result state as done."""
self._results_done = True
#
# Pipeline mutations
#
def _set_in(self, pipe_in):
"""
Setting the In for the Pipeline returns a new Pipeline.
"""
self._log('Pipeline._set_in', 'in: {0}', (pipe_in,))
mode = None
source = pipe_in
if isinstance(pipe_in, TimeSeries):
mode = 'batch'
source = pipe_in.collection()
elif isinstance(pipe_in, Bounded):
mode = 'batch'
elif isinstance(pipe_in, Stream):
mode = 'stream'
else: # pragma: no cover
# .from_source() already bulletproofs against this
msg = 'Unknown input type'
raise PipelineException(msg)
new_d = self._d.update({'in': source, 'mode': mode})
return Pipeline(new_d)
def _set_first(self, node): # pragma: no cover
"""
Set the first processing node pointed to, returning
a new Pipeline. The original pipeline will still point
to its orginal processing node.
Currently unused.
"""
new_d = self._d.set('first', node)
return Pipeline(new_d)
def _set_last(self, node): # pragma: no cover
"""
Set the last processing node pointed to, returning
a new Pipeline. The original pipeline will still point
to its orginal processing node.
Currently unused.
"""
new_d = self._d.set('last', node)
return Pipeline(new_d)
def _append(self, processor):
# self._log('Pipeline._append', 'processor: {0}'.format(processor))
first = self.first()
last = self.last()
if first is None:
first = processor
if last is not None:
last.add_observer(processor)
last = processor
new_d = self._d.update({'first': first, 'last': last})
return Pipeline(new_d)
# Pipeline state chained methods
def window_by(self, window_or_duration=None, utc=True):
"""
Set the window, returning a new Pipeline. A new window will
have a type and duration associated with it. Current available
types are:
* fixed (e.g. every 5m)
* calendar based windows (e.g. every month)
Windows are a type of grouping. Typically you'd define a window
on the pipeline before doing an aggregation or some other operation
on the resulting grouped collection. You can combine window-based
grouping with key-grouping (see groupBy()).
There are several ways to define a window. The general format is
an options object containing a `type` field and a `duration` field.
Currently the only accepted type is `fixed`, but others are planned.
For duration, this is a duration string, for example "30s" or "1d".
Supported are: seconds (s), minutes (m), hours (h) and days (d).
The argument here is either a string or an object with string
attrs type and duration. The arg can be either a window or a duration.
If no arg is supplied or set to None, the window_type is set
to 'global' and there is no duration.
There is also a short-cut notation for a fixed window or a calendar
window. Simply supplying the duration string ("30s" for example) will
result in a `fixed` window type with the supplied duration.
Window *window_or_duration* may be:
* A fixed interval duration (see next): "fixed"
* A calendar interval: "daily," "monthly" or "yearly"
Duration is of the form:
* "30s" or "1d" etc - supports seconds (s), minutes (m), hours (h),
days (d). When duration is passed as the arg, window_type is
set to 'fixed'.
Parameters
----------
window_or_duration : string, Capsule
See above.
utc : bool
How to render the aggregations - in UTC vs. the user's local time.
Can not be set to False if using a fixed window size.
Returns
-------
Pipeline
The Pipeline.
"""
self._log(
'Pipeline.window_by',
'window_or_duration: {0} utc: {1}', (window_or_duration, utc)
)
w_type = None
duration = None
if isinstance(window_or_duration, str):
if window_or_duration == 'daily' or window_or_duration == 'monthly' \
or window_or_duration == 'yearly':
w_type = window_or_duration
else:
w_type = 'fixed'
duration = window_or_duration
if utc is False:
self._warn(
'Can not set utc=False w/a fixed window size - resetting to utc=True',
PipelineWarning
)
utc = True
elif isinstance(window_or_duration, Capsule):
w_type = window_or_duration.type
duration = window_or_duration.duration
else:
w_type = 'global'
duration = None
new_d = self._d.update(dict(window_type=w_type, window_duration=duration, utc=utc))
self._log(
'Pipeline.window_by',
'new_d: {0}', (new_d,)
)
return Pipeline(new_d)
def clear_window(self):
"""
Remove windowing from the Pipeline. This will
return the pipeline to no window grouping. This is
useful if you have first done some aggregation by
some window size and then wish to collect together
the all resulting events.
Returns
-------
Pipeline
The Pipeline
"""
self._log('Pipeline.clear_window')
return self.window_by()
def group_by(self, key=None):
"""
Sets a new groupBy expression. Returns a new Pipeline.
Grouping is a state set on the Pipeline. Operations downstream
of the group specification will use that state. For example, an
aggregation would occur over any grouping specified.
The key to group by. You can pass in a function that takes and
event as an arg and dynamically returns the group by key.
Otherwise key will be interpreted as a field_path:
* a single field name or deep.column.path, or
* a array style field_path ['deep', 'column', 'path'] to a single
column.
This is not a list of multiple columns, it is the path to
a single column to pull group by keys from. For example,
a column called 'status' that contains the values 'OK' and
'FAIL' - they key would be 'status' and two collections
OK and FAIL will be generated.
If key is None, then the default column 'value' will
be used.
Parameters
----------
key : function, list or string
The key to group by. See above.
Returns
-------
Pipeline
The Pipeline
"""
grp = None
if is_function(key):
grp = key
elif isinstance(key, (str, list, tuple)):
def get_callback(event):
"""gb a column value."""
return event.get(key)
grp = get_callback # pylint: disable=redefined-variable-type
else:
grp = default_callback
new_d = self._d.update(dict(group_by=grp))
return Pipeline(new_d)
def clear_group_by(self):
"""
Remove the grouping from the pipeline. In other words
recombine the events.
Returns
-------
Pipeline
The Pipeline
"""
return self.group_by()
def emit_on(self, trigger):
"""
Sets the condition under which an accumulated collection will
be emitted. If specified before an aggregation this will control
when the resulting event will be emitted relative to the
window accumulation. Current options are:
* to emit on every event, or
* just when the collection is complete, or
* when a flush signal is received, either manually calling done(),
or at the end of a bounded source.
The strings indicating how to trigger how a Collection should
be emitted - can be:
* "eachEvent" - when a new event comes in, all currently maintained
collections will emit their result.
* "discard" - when a collection is to be discarded, first it will
emit. But only then.
* "flush" - when a flush signal is received.
The difference will depend on the output you want, how often
you want to get updated, and if you need to get a partial state.
There's currently no support for late data or watermarks. If an
event passes comes in after a collection window, that collection
is considered finished.
Parameters
----------
trigger : string
See above
Returns
-------
Pipeline
The Pipeline
"""
new_d = self._d.set('emit_on', trigger)
return Pipeline(new_d)
# I/O
def from_source(self, src):
"""
Note: originally named from() in JS code.
The source to get events from. The source needs to be able to
iterate its events using `for..of` loop for bounded Ins, or
be able to emit() for unbounded Ins. The actual batch, or stream
connection occurs when an output is defined with `to()`.
Pipelines can be chained together since a source may be another
Pipeline.
Parameters
----------
src : Bounded, Stream or Pipeline
The source for the Pipeline, or another Pipeline.
Returns
-------
Pipeline
The Pipeline.
"""
self._log('Pipeline.from_source', 'called with: {0}', (src,))
if isinstance(src, (Bounded, Stream, TimeSeries)):
return self._set_in(src)
else:
msg = 'from_source() only takes Pipeline, Bounded or Stream got: {0}'.format(src)
raise PipelineException(msg)
def to_event_list(self):
"""Directly return the results from the processor rather than
passing a callback in.
Returns
-------
list or dict
Returns the _results attribute with events.
"""
return self.to(EventOut)
def to_keyed_collections(self):
"""Directly return the results from the processor rather than
passing a callback in.
Returns
-------
list or dict
Returns the _results attribute from a Pipeline object after processing.
Will contain Collection objects.
"""
ret = self.to(CollectionOut)
if ret is not None:
return ret
else:
# return an empty dict so any calls to collection.get() won't cause
# things to unceremoniously blow up and just return None instead.
return dict()
def to(self, out, observer=None, options=Options()): # pylint: disable=invalid-name
"""
Sets up the destination sink for the pipeline.
For a batch mode connection, i.e. one with a Bounded source,
the output is connected to a clone of the parts of the Pipeline dependencies
that lead to this output. This is done by a Runner. The source input is
then iterated over to process all events into the pipeline and though to the Out.
For stream mode connections, the output is connected and from then on
any events added to the input will be processed down the pipeline to
the out.
::
def cback(event):
do_something_with_the_event(event)
timeseries = TimeSeries(IN_OUT_DATA)
(
Pipeline()
.from_source(timeseries)
.emit_on('flush')
.collapse(['in', 'out'], 'total', Functions.sum())
.aggregate(dict(total=Functions.max()))
.to(EventOut, cback)
)
NOTE: arg list has been changed from the ordering in the JS source
to conform to python convention.
Parameters
----------
out : EventOut, CollectionOut, etc instance
The output.
observer : function or instance
The observer.
options : Options, optional
Options.
Returns
-------
Pipeline
The Pipeline.
"""
self._log(
'Pipeline.to',
'out: {0}, obs: {1}, opt: {2} mode: {3}',
(out, observer, options, self.mode())
)
Out = out # pylint: disable=invalid-name
if self.input() is None:
msg = 'Tried to eval pipeline without a In. Missing from() in chain?'
raise PipelineException(msg)
out = Out(self, observer, options)
if self.mode() == 'batch':
runner = Runner(self, out)
runner.start(True)
if self._results_done and observer is None:
return self._results
elif self.mode() == 'stream':
out = Out(self, observer, options)
if self.first():
self.input().add_observer(self.first())
if self.last():
self.last().add_observer(out)
else:
self.input().add_observer(out)
return self
def count(self, observer, force=True):
"""
Outputs the count of events.
Parameters
----------
observer : function
The callback function. This function will be passed collection.size(),
window_key, group_by_key) as args.
force : bool, optional
Flush at the end of processing batch events, output again with possibly
partial result
Returns
-------
Pipeline
The Pipeline.
"""
def override(collection, window_key, group_by_key):
"""
This overrides the default behavior of CollectionOut
that passes collection/wkey/gbkey to the callback
passed in.
"""
observer(collection.size(), window_key, group_by_key)
return self.to(CollectionOut, override, force)
def offset_by(self, offset_by, field_spec=None):
"""
Processor to offset a set of fields by a value. Mostly used for
testing processor and pipeline operations with a simple operation.
Parameters
----------
offset_by : int, float
The amout to offset by.
field_spec : str, list, tuple, None, optional
Column or columns to look up. If you need to retrieve multiple deep
nested values that ['can.be', 'done.with', 'this.notation'].
A single deep value with a string.like.this.
If None, the default 'value' column will be used.
Returns
-------
Pipeline
The modified Pipeline.
"""
self._log('Pipeline.offset_by', 'offset: {0}', (offset_by,))
offset = Offset(
self,
Options(
by=offset_by,
field_spec=field_spec,
prev=self.last() if self.last() else self
)
)
return self._append(offset)
def aggregate(self, fields):
"""
Uses the current Pipeline windowing and grouping
state to build collections of events and aggregate them.
IndexedEvents will be emitted out of the aggregator based
on the `emitOn` state of the Pipeline.
To specify what part of the incoming events should
be aggregated together you specify a `fields`
object. This is a map from fieldName to operator.
::
uin = Stream()
(
Pipeline()
.from_source(uin)
.window_by('1h')
.emit_on('eachEvent')
.aggregate(
{
'in_avg': {'in': Functions.avg()},
'out_avg': {'out': Functions.avg()}
}
)
.to(EventOut, cback)
)
Parameters
----------
fields : dict
Fields and operators to be aggregated. Deep fields may be
indicated by using this.style.notation. As in the above
example, they fields.keys() are the names of the new
columns to be created (or an old one to be overwritten),
and the value is another dict - the key is the existing
column and the value is the function to apply to it when
creating the new column.
Returns
-------
Pipeline
The Pipeline
"""
agg = Aggregator(
self,
Options(
fields=fields,
prev=self._chain_last()
)
)
return self._append(agg)
def _chain_last(self):
"""Get the operative last for the processors
Returns
-------
Pipeline
Returns either self.last() or self
"""
return self.last() if self.last() is not None else self
def map(self, op): # pylint: disable=invalid-name
"""
Map the event stream using an operator.
Parameters
----------
op : function
A function that returns a new Event.
Returns
-------
Pipeline
The Pipeline.
"""
res = Mapper(self, Options(op=op, prev=self._chain_last()))
return self._append(res)
def filter(self, op): # pylint: disable=invalid-name
"""
Filter the event stream using an operator
Parameters
----------
op : function
A function that returns True or False
Returns
-------
Pipeline
The Pipeline
"""
flt = Filter(
self,
Options(
op=op,
prev=self._chain_last(),
)
)
return self._append(flt)
def select(self, field_spec=None):
"""
Select a subset of columns.
Parameters
----------
field_spec : str, list, tuple, None, optional
Column or columns to look up. If you need to retrieve multiple deep
nested values that ['can.be', 'done.with', 'this.notation'].
A single deep value with a string.like.this.
If None, the default 'value' column will be used.
Returns
-------
Pipeline
The Pipeline.
"""
sel = Selector(
self,
Options(
field_spec=field_spec,
prev=self._chain_last(),
)
)
return self._append(sel)
def collapse(self, field_spec_list, name, reducer, append=True):
"""
Collapse a subset of columns using a reducer function.
Parameters
----------
field_spec_list : list
List of columns to collapse. If you need to retrieve deep
nested values that ['can.be', 'done.with', 'this.notation'].
name : string
The resulting output column's name.
reducer : function
Function to use to do the reduction.
append : bool
Add the new column to the existing ones, or replace them.
Returns
-------
Pipeline
The Pipeline.
"""
coll = Collapser(
self,
Options(
field_spec_list=field_spec_list,
name=name,
reducer=reducer,
append=append,
prev=self._chain_last(),
)
)
return self._append(coll)
def fill(self, field_spec=None, method='zero', fill_limit=None):
"""Take the data in this timeseries and "fill" any missing
or invalid values. This could be setting None values to zero
so mathematical operations will succeed, interpolate a new
value, or pad with the previously given value.
Parameters
----------
field_spec : str, list, tuple, None, optional
Column or columns to look up. If you need to retrieve multiple deep
nested values that ['can.be', 'done.with', 'this.notation'].
A single deep value with a string.like.this.
If None, the default column field 'value' will be used.
method : str, optional
Filling method: zero | linear | pad
fill_limit : None, optional
Set a limit on the number of consecutive events will be filled
before it starts returning invalid values. For linear fill,
no filling will happen if the limit is reached before a valid
value is found.
Returns
-------
Pipeline
The Pipeline.
"""
fill = Filler(
self,
Options(
field_spec=field_spec,
method=method,
fill_limit=fill_limit,
prev=self._chain_last(),
)
)
return self._append(fill)
def align(self, field_spec=None, window='5m', method='linear', limit=None):
"""
Align entry point
"""
align = Align(
self,
Options(
field_spec=field_spec,
window=window,
limit=limit,
method=method,
prev=self._chain_last(),
)
)
return self._append(align)
def rate(self, field_spec=None, allow_negative=True):
"""
derivative entry point
"""
align = Rate(
self,
Options(
field_spec=field_spec,
allow_negative=allow_negative,
prev=self._chain_last(),
)
)
return self._append(align)
def take(self, limit):
"""
Take events up to the supplied limit, per key.
Parameters
----------
limit : int
Integer number of events to take.
global_flush: bool, optional
If set to true (default is False) then the Taker will
send out a single .flush() event if the limit has been
exceeded and the window_type is 'global.' This can be
used as a fail safe with processors that cache events
(like the Filler) to ensure all events are emitted when
the Pipeline is used in 'stream' mode. This is not
needed in 'batch' mode because the flush signal is sent
automatically.
Returns
-------
Pipeline
The Pipeline.
"""
take = Taker(
self,
Options(
limit=limit,
prev=self._chain_last(),
)
)
return self._append(take)
def _convert_opts(self, options): # pylint: disable=no-self-use
if options is None:
return dict()
else:
return options
def as_events(self, options=None):
"""
Converts incoming TimeRangeEvents or IndexedEvents to
Events. This is helpful since some processors will
emit TimeRangeEvents or IndexedEvents, which may be
unsuitable for some applications.
There are three options:
1. use the beginning time (options = Options(alignment='lag')
2. use the center time (options = Options(alignment='center')
3. use the end time (options = Options(alignment='lead')
Parameters
----------
options : Options
The options, see above.
Returns
-------
Pipeline
The Pipeline.
"""
conv = Converter(
self,
Options(
type=Event,
prev=self._chain_last(),
**self._convert_opts(options)
),
)
return self._append(conv)
def as_time_range_events(self, options=None):
"""
Converts incoming Events or IndexedEvents to TimeRangeEvents.
There are three option for alignment:
1. time range will be in front of the timestamp - ie:
options = Options(alignment='front')
2. time range will be centered on the timestamp - ie:
options = Options(alignment='center')
3. time range will be positoned behind the timestamp - ie:
options = Options(alignment='behind')
The duration is of the form "1h" for one hour, "30s" for 30 seconds and so on.
Parameters
----------
options : dict
Args to add to Options - duration and alignment.
Returns
-------
Pipeline
The Pipeline
"""
conv = Converter(
self,
Options(
type=TimeRangeEvent,
prev=self._chain_last(),
**self._convert_opts(options)
),
)
return self._append(conv)
def as_indexed_events(self, options=None):
"""
Converts incoming Events to IndexedEvents.
Note: It isn't possible to convert TimeRangeEvents to IndexedEvents.
Parameters
----------
options : Options
Contains the conversion options. In this case, the duration string
of the Index is expected. Must contain the key 'duration' and the
duration string is of the form "1h" for one hour, "30s" for 30
seconds and so on.
Returns
-------
TYPE
Description
"""
conv = Converter(
self,
Options(
type=IndexedEvent,
prev=self._chain_last(),
**self._convert_opts(options)
),
)
return self._append(conv)
# module functions
```
#### File: pypond/processor/filler.py
```python
import copy
import numbers
from operator import truediv
from pyrsistent import thaw
import six
from .base import Processor
from ..exceptions import ProcessorException, ProcessorWarning
from ..util import (
is_pipeline,
is_valid,
ms_from_dt,
nested_get,
nested_set,
Options,
)
class Filler(Processor): # pylint: disable=too-many-instance-attributes
"""
A processor that fills missing/invalid values in the event
with new values (zero, interpolated or padded).
When doing a linear fill, Filler instances should be chained.
See the Fill/sanitize doc (sanitize.md) for details.
If no field_spec is supplied, the default field 'value' will be used.
Parameters
----------
arg1 : Filler or Pipeline
Copy constructor or the pipeline.
options : Options
Options object.
"""
def __init__(self, arg1, options=Options()):
"""create the mapper"""
super(Filler, self).__init__(arg1, options)
self._log('Filler.init', 'uid: {0}'.format(self._id))
# options
self._field_spec = None
self._method = None
self._mode = None
self._fill_limit = None
# internal members
# state for pad to refer to previous event
self._previous_event = None
# key count for zero and pad fill
self._key_count = dict()
# special state for linear fill
self._last_good_linear = None
# cache of events pending linear fill
self._linear_fill_cache = list()
if isinstance(arg1, Filler):
# pylint: disable=protected-access
self._field_spec = arg1._field_spec
self._method = arg1._method
self._mode = arg1._mode
self._fill_limit = arg1._fill_limit
elif is_pipeline(arg1):
self._field_spec = options.field_spec
self._method = options.method
self._mode = arg1.mode()
self._fill_limit = options.fill_limit
else:
msg = 'Unknown arg to Filler: {0}'.format(arg1)
raise ProcessorException(msg)
self._log('Filler.init.Options', '{0}', (options,))
if self._method not in ('zero', 'pad', 'linear'):
msg = 'Unknown method {0} passed to Filler'.format(self._method)
raise ProcessorException(msg)
if self._fill_limit is not None and not isinstance(self._fill_limit, int):
msg = 'Arg fill_limit must be an integer'
raise ProcessorException(msg)
if isinstance(self._field_spec, six.string_types):
self._field_spec = [self._field_spec]
elif self._field_spec is None:
self._field_spec = ['value']
# when using linear mode, only a single column will be processed
# per instance. more details in sanitize.md
if self._method == 'linear' and len(self._field_spec) != 1:
msg = 'linear fill takes a path to a single column\n'
msg += ' - see the sanitize documentation for usage details.'
raise ProcessorException(msg)
def clone(self):
"""clone it."""
return Filler(self)
def _pad_and_zero(self, data):
"""
Process and fill the values at the paths as apropos when the
fill method is either pad or zero.
"""
for path in self._field_spec:
field_path = self._field_path_to_array(path)
# initialize a counter for this column
if tuple(field_path) not in self._key_count:
self._key_count[tuple(field_path)] = 0
val = nested_get(data, field_path)
# this is pointing at a path that does not exist
if val == 'bad_path':
self._warn('path does not exist: {0}'.format(field_path), ProcessorWarning)
continue
if not is_valid(val):
# massage the path per selected method
# have we hit the limit?
if self._fill_limit is not None and \
self._key_count[tuple(field_path)] >= self._fill_limit:
continue
if self._method == 'zero': # set to zero
nested_set(data, field_path, 0)
# note that this column has been zeroed
self._key_count[tuple(field_path)] += 1
elif self._method == 'pad': # set to previous value
if self._previous_event is not None:
if is_valid(self._previous_event.get(field_path)):
nested_set(
data, field_path,
self._previous_event.get(field_path)
)
# note that this column has been padded
# on success
self._key_count[tuple(field_path)] += 1
else:
# it is a valid value, so reset the counter for
# this column
self._key_count[tuple(field_path)] = 0
def _is_valid_linear_event(self, event):
"""
Check to see if an even has good values when doing
linear fill since we need to keep a completely intact
event for the values.
While we are inspecting the data payload, make a note if
any of the paths are pointing at a list. Then it
will trigger that filling code later.
"""
valid = True
field_path = self._field_path_to_array(self._field_spec[0])
val = nested_get(thaw(event.data()), field_path)
# this is pointing at a path that does not exist, issue a warning
# can call the event valid so it will be emitted. can't fill what
# isn't there.
if val == 'bad_path':
self._warn('path does not exist: {0}'.format(field_path), ProcessorWarning)
return valid
# a tracked field path is not valid so this is
# not a valid linear event. also, if it is not a numeric
# value, mark it as invalid and let _interpolate_event_list()
# complain about/skip it.
if not is_valid(val) or not isinstance(val, numbers.Number):
valid = False
return valid
def _linear_fill(self, event):
"""
This handles the linear filling. It returns a list of
events to be emitted. That list may only contain a single
event.
If an event is valid - it has valid values for all of
the field paths - it is cached as "last good" and
returned to be emitted. The return value is a list
of one event.
If an event has invalid values, it is cached to be
processed later and an empty list is returned.
Additional invalid events will continue to be cached until
a new valid value is seen, then the cached events will
be filled and returned. That will be a list of indeterminate
length.
"""
# see if the event is valid and also if it has any
# list values to be filled.
is_valid_event = self._is_valid_linear_event(event)
# Deal with the event as apropos depending on if it is
# valid or not and if we have nor have not seen a valid
# event yet.
events = list()
if is_valid_event and not self._linear_fill_cache:
# valid event, no cached events, use as last good
# and return the event. This is what we want to see.
self._last_good_linear = event
events.append(event)
elif not is_valid_event and self._last_good_linear is not None:
# an invalid event was received and we have previously
# seen a valid event, so add to the cache for fill processing
# later.
self._linear_fill_cache.append(event)
# now make sure we have not exceeded the fill_limit
# if it has been set. if it has, emit all the cached
# events and reset the main state such that the next
# condition will continue to trigger until we see another
# valid event.
if self._fill_limit is not None and \
len(self._linear_fill_cache) >= self._fill_limit:
for i in self._linear_fill_cache:
self.emit(i)
self._linear_fill_cache = list()
self._last_good_linear = None
elif not is_valid_event and self._last_good_linear is None:
# an invalid event but we have not seen a good
# event yet so there is nothing to start filling "from"
# so just return and live with it.
events.append(event)
elif is_valid_event and self._linear_fill_cache:
# a valid event was received, and there are cached events
# to be processed, so process and return the filled events
# to be emitted.
event_list = [self._last_good_linear] + self._linear_fill_cache + [event]
# the first event a.k.a. self._last_good_linear has
# already been emitted either as a "good"
# event or as the last event in the previous filling pass.
# that's why it's being shaved off here.
for i in self._interpolate_event_list(event_list)[1:]:
events.append(i)
# reset the cache, note as last good
self._linear_fill_cache = list()
self._last_good_linear = event
return events
def add_event(self, event):
"""
Perform the fill operation on the event and emit.
Parameters
----------
event : Event, IndexedEvent, TimerangeEvent
Any of the three event variants.
"""
if self.has_observers():
to_emit = list()
new_data = thaw(event.data())
if self._method in ('zero', 'pad'):
# zero and pad use much the same method in that
# they both will emit a single event every time
# add_event() is called.
self._pad_and_zero(new_data)
emit = event.set_data(new_data)
to_emit.append(emit)
# remember previous event for padding
self._previous_event = emit
elif self._method == 'linear':
# linear filling follows a somewhat different
# path since it might emit zero, one or multiple
# events every time add_event() is called.
for emit in self._linear_fill(event):
to_emit.append(emit)
# end filling logic
for emitted_event in to_emit:
self._log('Filler.add_event', 'emitting: {0}', (emitted_event,))
self.emit(emitted_event)
def _interpolate_event_list(self, events): # pylint: disable=too-many-branches, too-many-locals
"""
The fundamental linear interpolation workhorse code. Process
a list of events and return a new list. Does a pass for
every field_spec.
This is abstracted out like this because we probably want
to interpolate a list of events not tied to a Collection.
A Pipeline result list, etc etc.
Sorry pylint, sometime you need to write a complex method.
"""
base_events = copy.copy(events)
# new array of interpolated events for each field path
new_events = list()
field_path = self._field_path_to_array(self._field_spec[0])
# setup done, loop through the events.
for event_enum in enumerate(base_events):
# cant interpolate first or last event so just save it
# as-is and move on.
if event_enum[0] == 0 or event_enum[0] == len(base_events) - 1:
new_events.append(event_enum[1])
continue
# if a non-numeric value is encountered, stop processing
# this field spec and hand back the original unfilled events.
if is_valid(event_enum[1].get(field_path)) and \
not isinstance(event_enum[1].get(field_path),
numbers.Number):
self._warn(
'linear requires numeric values - skipping this field_spec',
ProcessorWarning
)
return base_events
# found a bad value so start calculating.
if not is_valid(event_enum[1].get(field_path)):
previous_value = None
previous_ts = None
next_value = None
next_ts = None
# look to the previous event in the new_event list since
# that's where previously interpolated values will be.
# if found, get the timestamp as well.
previous_value = new_events[event_enum[0] - 1].get(field_path)
if previous_value:
previous_ts = ms_from_dt(new_events[event_enum[0] - 1].timestamp())
# see about finding the next valid value and its timestamp
# in the original list.
next_idx = event_enum[0] + 1
while next_value is None and next_idx < len(base_events):
val = base_events[next_idx].get(field_path)
if is_valid(val):
next_ts = ms_from_dt(base_events[next_idx].timestamp())
next_value = val # terminates the loop
next_idx += 1
# previous_value should only be none if there are a string
# of bad values at the beginning of the sequence.
# next_value will be none if that value no longer has
# valid values in the rest of the sequence.
if previous_value is not None and next_value is not None:
# pry the data from current event
new_data = thaw(event_enum[1].data())
current_ts = ms_from_dt(event_enum[1].timestamp())
if previous_ts == next_ts:
# average the two values
new_val = truediv((previous_value + next_value), 2)
else:
point_frac = truediv(
(current_ts - previous_ts), (next_ts - previous_ts))
new_val = previous_value + ((next_value - previous_value) * point_frac)
# set that value to the field spec in new data
nested_set(new_data, field_path, new_val)
# call .set_data() to create a new event
new_events.append(event_enum[1].set_data(new_data))
else:
# couldn't calculate new value either way, just
# keep the old event.
new_events.append(event_enum[1])
else:
# theoretically never called because the incoming lists
# will be bookended by valid events now that we're only
# processing a single column per Filler instance.
# leaving here in case we start feeding this new data.
new_events.append(event_enum[1]) # pragma: no cover
# save the current state before doing another pass
# on a different field_path
return new_events
def flush(self):
"""Don't delegate flush to superclass yet. Make sure
there are no cached events (could happen if we stop
seeing valid events) before passing it up the food
chain."""
self._log('Filler.flush')
if self.has_observers() and self._method == 'linear':
self._log('Filler.flush.linear')
# are there any left-over events like if a path
# just stops seeing any good events so they are
# never filled and emitted.
for i in self._linear_fill_cache:
self.emit(i)
super(Filler, self).flush()
```
#### File: pypond/processor/mapper.py
```python
from .base import Processor
from ..exceptions import ProcessorException
from ..util import is_pipeline, Options
class Mapper(Processor):
"""
A processor which takes an operator as its only option
and uses that to either output a new event.
Parameters
----------
arg1 : Mapper or Pipeline
Copy constructor or the pipeline.
options : Options
Options object.
"""
def __init__(self, arg1, options=Options()):
"""create the mapper"""
super(Mapper, self).__init__(arg1, options)
self._log('Mapper.init', 'uid: {0}'.format(self._id))
self._op = None
if isinstance(arg1, Mapper):
self._op = arg1._op # pylint: disable=protected-access
elif is_pipeline(arg1):
self._op = options.op
else:
msg = 'Unknown arg to Mapper: {0}'.format(arg1)
raise ProcessorException(msg)
if callable(self._op) is False:
msg = 'op: {0} is not a callable function'.format(self._op)
raise ProcessorException(msg)
def clone(self):
"""clone it."""
return Mapper(self)
def add_event(self, event):
"""
Perform the map operation on the event and emit.
Parameters
----------
event : Event, IndexedEvent, TimerangeEvent
Any of the three event variants.
"""
if self.has_observers():
evn = self._op(event)
self._log('Mapper.add_event', 'emitting: {0}', (evn,))
self.emit(evn)
```
#### File: pypond/processor/taker.py
```python
from .base import Processor
from ..exceptions import ProcessorException
from ..index import Index
from ..util import is_pipeline, Options
class Taker(Processor):
"""
A processor which limits the number of events that are processed.
Parameters
----------
arg1 : Taker or Pipeline
Copy constructor or the pipeline.
options : Options
Options object.
"""
def __init__(self, arg1, options=Options()):
"""create the mapper"""
super(Taker, self).__init__(arg1, options)
self._log('Taker.init', 'uid: {0}'.format(self._id))
# options
self._limit = None
self._window_type = None
self._window_duration = None
self._group_by = None
# instance memebers
self._count = dict()
self._flush_sent = False
if isinstance(arg1, Taker):
# pylint: disable=protected-access
self._limit = arg1._limit
self._window_type = arg1._window_type
self._window_duration = arg1._window_duration
self._group_by = arg1._group_by
elif is_pipeline(arg1):
self._limit = options.limit
self._window_type = arg1.get_window_type()
self._window_duration = arg1.get_window_duration()
self._group_by = arg1.get_group_by()
else:
msg = 'Unknown arg to Taker: {0}'.format(arg1)
raise ProcessorException(msg)
def clone(self):
"""clone it."""
return Taker(self)
def add_event(self, event):
"""
Output an event that is offset.
Parameters
----------
event : Event, IndexedEvent, TimerangeEvent
Any of the three event variants.
"""
if self.has_observers():
ts = event.timestamp()
window_key = None
if self._window_type == 'fixed':
window_key = Index.get_index_string(self._window_duration, ts)
else:
window_key = self._window_type
group_by_key = self._group_by(event)
coll_key = '{wk}::{gbk}'.format(wk=window_key, gbk=group_by_key) if \
group_by_key is not None else window_key
if coll_key not in self._count:
self._count[coll_key] = 0
self._count[coll_key] += 1
# emit the events for each collection key that has not reached
# the limit. This is the main point of this processor.
if self._count.get(coll_key) <= self._limit:
self._log('Taker.add_event', 'collection key: {0}', (coll_key,))
self._log(
'Taker.add_event',
'count: {0} limit: {1}',
(self._count.get(coll_key), self._limit)
)
self._log('Taker.add_event', 'emitting: {0}', (event,))
self.emit(event)
def flush(self):
"""flush"""
super(Taker, self).flush()
```
#### File: pypond/pypond/timerange_event.py
```python
from pyrsistent import pmap, thaw
from .event import EventBase
from .util import is_pmap, ms_from_dt
class TimeRangeEvent(EventBase):
"""
The creation of an TimeRangeEvent is done by combining two parts -
the timerange and the data.
To construct you specify a TimeRange, along with the data.
The first arg can be:
- a TimeRangeEvent instance (copy ctor)
- a pyrsistent.PMap, or
- a python tuple, list or pyrsistent.PVector object containing two
python datetime objects or ms timestamps - the args for the
TimeRange object.
To specify the data you can supply either:
- a python dict
- a pyrsistent.PMap, or
- a simple type such as an integer. In the case of the simple type
this is a shorthand for supplying {"value": v}.
Parameters
----------
instance_or_args : TimeRange, iterable, pyrsistent.pmap
See above
arg2 : dict, pmap, int, float, str, optional
See above.
"""
__slots__ = () # inheriting relevant slots, stil need this
def __init__(self, instance_or_args, arg2=None):
"""
Create a time range event.
"""
# pylint doesn't like self._d but be consistent w/original code.
# pylint: disable=invalid-name
if isinstance(instance_or_args, TimeRangeEvent):
super(TimeRangeEvent, self).__init__(instance_or_args._d) # pylint: disable=protected-access
return
elif is_pmap(instance_or_args):
super(TimeRangeEvent, self).__init__(instance_or_args)
return
rng = self.timerange_from_arg(instance_or_args)
data = self.data_from_arg(arg2)
super(TimeRangeEvent, self).__init__(pmap(dict(range=rng, data=data)))
# Query/accessor methods
def to_json(self):
"""
Returns the TimeRangeEvent as a JSON object, essentially
::
{timerange: tr, data: {key: value, ...}}
This is actually like json.loads(s) - produces the
actual data structure from the object internal data.
Returns
-------
dict
Dict representation of internals (timerange, data).
"""
return dict(
timerange=self.timerange().to_json(),
data=thaw(self.data()),
)
def key(self):
"""Returns a range string in the format 'begin,end' as expressed
as ms since the epoch.
Returns
-------
str
The begin and end of the timerange in ms since the epoch.
"""
return '{0},{1}'.format(ms_from_dt(self.begin()), ms_from_dt(self.end()))
def type(self): # pylint: disable=no-self-use
"""Return the type of this event type
Returns
-------
class
The class of this event type.
"""
return TimeRangeEvent
def to_point(self, cols=None):
"""
Returns a flat array starting with the timestamp, followed by the values.
Can be given an optional list of columns so the returned list will
have the values in order. Primarily for the TimeSeries wire format.
Parameters
----------
cols : list, optional
List of data columns to order the data points in so the
TimeSeries wire format lines up correctly. If not specified,
the points will be whatever order that dict.values() decides
to return it in.
Returns
-------
list
Epoch ms followed by points.
"""
points = [self.timerange().to_json()]
data = thaw(self.data())
if isinstance(cols, list):
points += [data.get(x, None) for x in cols]
else:
points += [x for x in list(data.values())]
return points
def timerange_as_utc_string(self):
"""The timerange of this data, in UTC time, as a string.
Returns
-------
str
Formatted time string
"""
return self.timerange().to_utc_string()
def timerange_as_local_string(self):
"""The timerange of this data, in Local time, as a string.
Returns
-------
str
Formatted time string.
"""
return self.timerange().to_local_string()
def timestamp(self):
"""The timestamp of this Event data. It's just the beginning
of the range in this case.
Returns
-------
datetime.datetime
Beginning of range.
"""
return self.begin()
def timerange(self):
"""The TimeRange of this data.
Returns
-------
TimeRange
The underlying time range object.
"""
return self._d.get('range')
def begin(self):
"""The begin time of this Event, which will be just the timestamp.
Returns
-------
datetime.datetime
Beginning of range.
"""
return self.timerange().begin()
def end(self):
"""The end time of this Event, which will be just the timestamp.
Returns
-------
datetime.datetime
End of range.
"""
return self.timerange().end()
# data setters, returns new object
def set_data(self, data):
"""Sets the data portion of the event and returns a new TimeRangeEvent.
:param data: The new data portion for this event object.
:type data: dict
:returns: TimeRangeEvent - a new TimeRangeEvent object.
Parameters
----------
data : dict
New payload to set as the data for this event.
Returns
-------
TimeRangeEvent
A new time range event object with new data payload.
"""
_dnew = self._d.set('data', self.data_from_arg(data))
return TimeRangeEvent(_dnew)
# Humanize
def humanize_duration(self):
"""Humanize the timerange.
Returns
-------
str
Humanized string of the time range.
"""
return self.timerange().humanize_duration()
```
#### File: pypond/tests/clean_test.py
```python
import copy
import datetime
import unittest
import warnings
from pypond.collection import Collection
from pypond.event import Event
from pypond.exceptions import ProcessorException, ProcessorWarning, TimeSeriesException
from pypond.indexed_event import IndexedEvent
from pypond.io.input import Stream
from pypond.io.output import CollectionOut
from pypond.pipeline import Pipeline
from pypond.processor import Filler
from pypond.series import TimeSeries
from pypond.timerange_event import TimeRangeEvent
from pypond.util import aware_utcnow
# global variables for the callbacks to write to.
# they are alwasy reset to None by setUp()
RESULT = None
EVENT_LIST = [
Event(1429673400000, {'in': 1, 'out': 2}),
Event(1429673460000, {'in': 3, 'out': 4}),
Event(1429673520000, {'in': 5, 'out': 6}),
]
TICKET_RANGE = dict(
name="outages",
columns=["timerange", "title", "esnet_ticket"],
points=[
[[1429673400000, 1429707600000], "BOOM", "ESNET-20080101-001"],
[[1429673400000, 1429707600000], "BAM!", "ESNET-20080101-002"],
],
)
AVAILABILITY_DATA = dict(
name="availability",
columns=["index", "uptime"],
points=[
["2014-07", "100%"],
["2014-08", "88%"],
["2014-09", "95%"],
["2014-10", "99%"],
["2014-11", "91%"],
["2014-12", "99%"],
["2015-01", "100%"],
["2015-02", "92%"],
["2015-03", "99%"],
["2015-04", "87%"],
["2015-05", "92%"],
["2015-06", "100%"],
]
)
class CleanBase(unittest.TestCase):
def setUp(self):
"""set up for all tests."""
# canned collection
self._canned_collection = Collection(EVENT_LIST)
# canned series objects
self._canned_event_series = TimeSeries(
dict(name='collection', collection=self._canned_collection))
global RESULTS
RESULTS = None
class TestRenameFill(CleanBase):
"""
A set of test for the second gen methods to manipulate timeseries
and events.
"""
def test_rename(self):
"""Test the renamer facility."""
# rename an Event series
ts = copy.deepcopy(self._canned_event_series)
renamed = ts.rename_columns({'in': 'new_in', 'out': 'new_out'})
self.assertEqual(
renamed.at(0).get('new_in'),
self._canned_event_series.at(0).get('in')
)
self.assertEqual(
renamed.at(0).get('new_out'),
self._canned_event_series.at(0).get('out')
)
self.assertEqual(
renamed.at(1).get('new_in'),
self._canned_event_series.at(1).get('in')
)
self.assertEqual(
renamed.at(1).get('new_out'),
self._canned_event_series.at(1).get('out')
)
self.assertEqual(
renamed.at(2).get('new_in'),
self._canned_event_series.at(2).get('in')
)
self.assertEqual(
renamed.at(2).get('new_out'),
self._canned_event_series.at(2).get('out')
)
# rename a TimeRangeEvent series
ts = TimeSeries(TICKET_RANGE)
renamed = ts.rename_columns({'title': 'event', 'esnet_ticket': 'ticket'})
self.assertEqual(renamed.at(0).get('event'), ts.at(0).get('title'))
self.assertEqual(renamed.at(0).get('ticket'), ts.at(0).get('esnet_ticket'))
self.assertEqual(renamed.at(1).get('event'), ts.at(1).get('title'))
self.assertEqual(renamed.at(1).get('ticket'), ts.at(1).get('esnet_ticket'))
self.assertEqual(renamed.at(0).timestamp(), ts.at(0).timestamp())
self.assertEqual(renamed.at(1).timestamp(), ts.at(1).timestamp())
# rename and IndexedEvent series
ts = TimeSeries(AVAILABILITY_DATA)
renamed = ts.rename_columns(dict(uptime='available'))
self.assertEqual(renamed.at(0).get('available'), ts.at(0).get('uptime'))
self.assertEqual(renamed.at(2).get('available'), ts.at(2).get('uptime'))
self.assertEqual(renamed.at(4).get('available'), ts.at(4).get('uptime'))
self.assertEqual(renamed.at(6).get('available'), ts.at(6).get('uptime'))
self.assertEqual(renamed.at(0).timestamp(), ts.at(0).timestamp())
self.assertEqual(renamed.at(1).timestamp(), ts.at(1).timestamp())
self.assertEqual(renamed.at(2).timestamp(), ts.at(2).timestamp())
def test_bad_args(self):
"""Trigger error states for coverage."""
simple_missing_data = dict(
name="traffic",
columns=["time", "direction"],
points=[
[1400425947000, {'in': 1, 'out': None, 'drop': None}],
[1400425948000, {'in': None, 'out': 4, 'drop': None}],
[1400425949000, {'in': None, 'out': None, 'drop': 13}],
[1400425950000, {'in': None, 'out': None, 'drop': 14}],
[1400425960000, {'in': 9, 'out': 8, 'drop': None}],
[1400425970000, {'in': 11, 'out': 10, 'drop': 16}],
]
)
ts = TimeSeries(simple_missing_data)
# bad ctor arg
with self.assertRaises(ProcessorException):
f = Filler(dict())
# invalid method
with self.assertRaises(TimeSeriesException):
ts.fill(method='bogus')
# limit not int
with self.assertRaises(ProcessorException):
ts.fill(fill_limit='z')
# direct access to filler via pipeline needs to take a single path
with self.assertRaises(ProcessorException):
pip = Pipeline()
pip.fill(method='linear', field_spec=['direction.in', 'direction.out'])
# invalid method
with self.assertRaises(ProcessorException):
pip = Pipeline()
pip.fill(method='bogus')
# catch bad path at various points
with warnings.catch_warnings(record=True) as wrn:
ts.fill(field_spec='bad.path')
self.assertEqual(len(wrn), 1)
self.assertTrue(issubclass(wrn[0].category, ProcessorWarning))
with warnings.catch_warnings(record=True) as wrn:
ts.fill(field_spec='bad.path', method='linear')
self.assertEqual(len(wrn), 1)
self.assertTrue(issubclass(wrn[0].category, ProcessorWarning))
with warnings.catch_warnings(record=True) as wrn:
ts.fill(field_spec='direction.bogus')
self.assertEqual(len(wrn), 1)
self.assertTrue(issubclass(wrn[0].category, ProcessorWarning))
# trigger warnings about non-numeric values in linear.
with warnings.catch_warnings(record=True) as wrn:
simple_missing_data = dict(
name="traffic",
columns=["time", "direction"],
points=[
[1400425947000, {'in': 1, 'out': None}],
[1400425948000, {'in': 'non_numeric', 'out': 4}],
[1400425949000, {'in': 5, 'out': None}],
]
)
ts = TimeSeries(simple_missing_data)
ts.fill(field_spec='direction.in', method='linear')
self.assertEqual(len(wrn), 1)
self.assertTrue(issubclass(wrn[0].category, ProcessorWarning))
# empty series for coverage caught a bug
empty = TimeSeries(dict(
name="Sensor values",
columns=["time", "temperature"],
points=[
]
))
self.assertEqual(empty.fill(field_spec='temperature').size(), 0)
def test_zero_fill(self):
"""test using the filler to fill missing values with zero."""
simple_missing_data = dict(
name="traffic",
columns=["time", "direction"],
points=[
[1400425947000, {'in': 1, 'out': None}],
[1400425948000, {'in': None, 'out': 4}],
[1400425949000, {'in': 5, 'out': None}],
[1400425950000, {'in': None, 'out': 8}],
[1400425960000, {'in': 9, 'out': None}],
[1400425970000, {'in': None, 'out': 12}],
]
)
ts = TimeSeries(simple_missing_data)
# fill all invalid values
new_ts = ts.fill(field_spec=['direction.in', 'direction.out'])
self.assertEqual(new_ts.size(), 6)
self.assertEqual(new_ts.at(0).get('direction.out'), 0)
self.assertEqual(new_ts.at(2).get('direction.out'), 0)
self.assertEqual(new_ts.at(1).get('direction.in'), 0)
# fill one column
new_ts = ts.fill(field_spec='direction.in')
self.assertEqual(new_ts.size(), 6)
self.assertEqual(new_ts.at(1).get('direction.in'), 0)
self.assertEqual(new_ts.at(3).get('direction.in'), 0)
self.assertIsNone(new_ts.at(0).get('direction.out'))
self.assertIsNone(new_ts.at(2).get('direction.out'))
def test_complex_zero_fill(self):
"""make sure more complex nested paths work OK"""
complex_missing_data = dict(
name="traffic",
columns=["time", "direction"],
points=[
[1400425947000,
{'in': {'tcp': 1, 'udp': 3}, 'out': {'tcp': 2, 'udp': 3}}],
[1400425948000,
{'in': {'tcp': 3, 'udp': None}, 'out': {'tcp': 4, 'udp': 3}}],
[1400425949000,
{'in': {'tcp': 5, 'udp': None}, 'out': {'tcp': None, 'udp': 3}}],
[1400425950000,
{'in': {'tcp': 7, 'udp': None}, 'out': {'tcp': None, 'udp': 3}}],
[1400425960000,
{'in': {'tcp': 9, 'udp': 4}, 'out': {'tcp': 6, 'udp': 3}}],
[1400425970000,
{'in': {'tcp': 11, 'udp': 5}, 'out': {'tcp': 8, 'udp': 3}}],
]
)
ts = TimeSeries(complex_missing_data)
# zero fill everything
new_ts = ts.fill(field_spec=['direction.out.tcp', 'direction.in.udp'])
self.assertEqual(new_ts.at(0).get('direction.in.udp'), 3)
self.assertEqual(new_ts.at(1).get('direction.in.udp'), 0) # fill
self.assertEqual(new_ts.at(2).get('direction.in.udp'), 0) # fill
self.assertEqual(new_ts.at(3).get('direction.in.udp'), 0) # fill
self.assertEqual(new_ts.at(4).get('direction.in.udp'), 4)
self.assertEqual(new_ts.at(5).get('direction.in.udp'), 5)
self.assertEqual(new_ts.at(0).get('direction.out.tcp'), 2)
self.assertEqual(new_ts.at(1).get('direction.out.tcp'), 4)
self.assertEqual(new_ts.at(2).get('direction.out.tcp'), 0) # fill
self.assertEqual(new_ts.at(3).get('direction.out.tcp'), 0) # fill
self.assertEqual(new_ts.at(4).get('direction.out.tcp'), 6)
self.assertEqual(new_ts.at(5).get('direction.out.tcp'), 8)
# do it again, but only fill the out.tcp
new_ts = ts.fill(field_spec=['direction.out.tcp'])
self.assertEqual(new_ts.at(0).get('direction.out.tcp'), 2)
self.assertEqual(new_ts.at(1).get('direction.out.tcp'), 4)
self.assertEqual(new_ts.at(2).get('direction.out.tcp'), 0) # fill
self.assertEqual(new_ts.at(3).get('direction.out.tcp'), 0) # fill
self.assertEqual(new_ts.at(4).get('direction.out.tcp'), 6)
self.assertEqual(new_ts.at(5).get('direction.out.tcp'), 8)
self.assertEqual(new_ts.at(0).get('direction.in.udp'), 3)
self.assertEqual(new_ts.at(1).get('direction.in.udp'), None) # no fill
self.assertEqual(new_ts.at(2).get('direction.in.udp'), None) # no fill
self.assertEqual(new_ts.at(3).get('direction.in.udp'), None) # no fill
self.assertEqual(new_ts.at(4).get('direction.in.udp'), 4)
self.assertEqual(new_ts.at(5).get('direction.in.udp'), 5)
def test_linear(self):
"""Test linear interpolation filling returned by to_keyed_collections()."""
simple_missing_data = dict(
name="traffic",
columns=["time", "direction"],
points=[
[1400425947000, {'in': 1, 'out': 2}],
[1400425948000, {'in': None, 'out': None}],
[1400425949000, {'in': None, 'out': None}],
[1400425950000, {'in': 3, 'out': None}],
[1400425960000, {'in': None, 'out': None}],
[1400425970000, {'in': 5, 'out': 12}],
[1400425980000, {'in': 6, 'out': 13}],
]
)
ts = TimeSeries(simple_missing_data)
new_ts = ts.fill(field_spec=['direction.in', 'direction.out'],
method='linear')
self.assertEqual(new_ts.size(), 7)
self.assertEqual(new_ts.at(0).get('direction.in'), 1)
self.assertEqual(new_ts.at(1).get('direction.in'), 1.6666666666666665) # filled
self.assertEqual(new_ts.at(2).get('direction.in'), 2.333333333333333) # filled
self.assertEqual(new_ts.at(3).get('direction.in'), 3)
self.assertEqual(new_ts.at(4).get('direction.in'), 4.0) # filled
self.assertEqual(new_ts.at(5).get('direction.in'), 5)
self.assertEqual(new_ts.at(0).get('direction.out'), 2)
self.assertEqual(new_ts.at(1).get('direction.out'), 2.4347826086956523) # filled
self.assertEqual(new_ts.at(2).get('direction.out'), 2.8695652173913047) # filled
self.assertEqual(new_ts.at(3).get('direction.out'), 3.304347826086957) # filled
self.assertEqual(new_ts.at(4).get('direction.out'), 7.6521739130434785) # filled
self.assertEqual(new_ts.at(5).get('direction.out'), 12)
def test_linear_list(self):
"""Test linear interpolation returned as an event list."""
simple_missing_data = dict(
name="traffic",
columns=["time", "direction"],
points=[
[1400425947000, {'in': 1, 'out': None}],
[1400425948000, {'in': None, 'out': None}],
[1400425949000, {'in': None, 'out': None}],
[1400425950000, {'in': 3, 'out': 8}],
[1400425960000, {'in': None, 'out': None}],
[1400425970000, {'in': 5, 'out': 12}],
[1400425980000, {'in': 6, 'out': 13}],
]
)
ts = TimeSeries(simple_missing_data)
# also test chaining multiple fillers together. in this series,
# field_spec=['direction.in', 'direction.out'] would not start
# filling until the 4th point so points 2 and 3 of direction.in
# would not be filled. A chain like this will ensure both
# columns will be fully filled.
elist = (
Pipeline()
.from_source(ts)
.fill(field_spec='direction.in', method='linear')
.fill(field_spec='direction.out', method='linear')
.to_event_list()
)
self.assertEqual(len(elist), len(simple_missing_data.get('points')))
self.assertEqual(elist[0].get('direction.in'), 1)
self.assertEqual(elist[1].get('direction.in'), 1.6666666666666665) # filled
self.assertEqual(elist[2].get('direction.in'), 2.333333333333333) # filled
self.assertEqual(elist[3].get('direction.in'), 3)
self.assertEqual(elist[4].get('direction.in'), 4.0) # filled
self.assertEqual(elist[5].get('direction.in'), 5)
self.assertEqual(elist[0].get('direction.out'), None) # can't fill
self.assertEqual(elist[1].get('direction.out'), None) # can't fill
self.assertEqual(elist[2].get('direction.out'), None) # can't fill
self.assertEqual(elist[3].get('direction.out'), 8)
self.assertEqual(elist[4].get('direction.out'), 10.0) # filled
self.assertEqual(elist[5].get('direction.out'), 12)
def test_assymetric_linear_fill(self):
"""Test new chained/assymetric linear default fill in TimeSeries."""
simple_missing_data = dict(
name="traffic",
columns=["time", "direction"],
points=[
[1400425947000, {'in': 1, 'out': None}],
[1400425948000, {'in': None, 'out': None}],
[1400425949000, {'in': None, 'out': None}],
[1400425950000, {'in': 3, 'out': 8}],
[1400425960000, {'in': None, 'out': None}],
[1400425970000, {'in': 5, 'out': 12}],
[1400425980000, {'in': 6, 'out': 13}],
]
)
ts = TimeSeries(simple_missing_data)
new_ts = ts.fill(method='linear', field_spec=['direction.in', 'direction.out'])
self.assertEqual(new_ts.at(0).get('direction.in'), 1)
self.assertEqual(new_ts.at(1).get('direction.in'), 1.6666666666666665) # filled
self.assertEqual(new_ts.at(2).get('direction.in'), 2.333333333333333) # filled
self.assertEqual(new_ts.at(3).get('direction.in'), 3)
self.assertEqual(new_ts.at(4).get('direction.in'), 4.0) # filled
self.assertEqual(new_ts.at(5).get('direction.in'), 5)
self.assertEqual(new_ts.at(0).get('direction.out'), None) # can't fill
self.assertEqual(new_ts.at(1).get('direction.out'), None) # can't fill
self.assertEqual(new_ts.at(2).get('direction.out'), None) # can't fill
self.assertEqual(new_ts.at(3).get('direction.out'), 8)
self.assertEqual(new_ts.at(4).get('direction.out'), 10.0) # filled
self.assertEqual(new_ts.at(5).get('direction.out'), 12)
def test_linear_stream(self):
"""Test streaming on linear fill"""
def cback(collection, window_key, group_by):
"""the callback"""
global RESULTS # pylint: disable=global-statement
RESULTS = collection
events = [
Event(1400425947000, 1),
Event(1400425948000, 2),
Event(1400425949000, dict(value=None)),
Event(1400425950000, dict(value=None)),
Event(1400425951000, dict(value=None)),
Event(1400425952000, 5),
Event(1400425953000, 6),
Event(1400425954000, 7),
]
stream = Stream()
(
Pipeline()
.from_source(stream)
.fill(method='linear', field_spec='value')
.to(CollectionOut, cback)
)
for i in events:
stream.add_event(i)
self.assertEqual(RESULTS.size(), len(events))
self.assertEqual(RESULTS.at(0).get(), 1)
self.assertEqual(RESULTS.at(1).get(), 2)
self.assertEqual(RESULTS.at(2).get(), 2.75) # filled
self.assertEqual(RESULTS.at(3).get(), 3.5) # filled
self.assertEqual(RESULTS.at(4).get(), 4.25) # filled
self.assertEqual(RESULTS.at(5).get(), 5)
self.assertEqual(RESULTS.at(6).get(), 6)
self.assertEqual(RESULTS.at(7).get(), 7)
def test_linear_stream_limit(self):
"""Test streaming on linear fill with limiter"""
# Sets up a state where we stop seeing a good data
# on a linear fill. In this case the Taker is used to
# not only limit the number of results, but also to
# make sure any cached events get emitted.
def cback(collection, window_key, group_by):
"""the callback"""
global RESULTS # pylint: disable=global-statement
RESULTS = collection
events = [
Event(1400425947000, 1),
Event(1400425948000, 2),
Event(1400425949000, dict(value=None)),
Event(1400425950000, 3),
Event(1400425951000, dict(value=None)),
Event(1400425952000, dict(value=None)),
Event(1400425953000, dict(value=None)),
Event(1400425954000, dict(value=None)),
]
# error state first - the last 4 events won't be emitted.
stream = Stream()
(
Pipeline()
.from_source(stream)
.fill(method='linear', field_spec='value')
.to(CollectionOut, cback)
)
for i in events:
stream.add_event(i)
self.assertEqual(RESULTS.size(), 4)
# shut it down and check again.
stream.stop()
# events "stuck" in the cache have been emitted
self.assertEqual(RESULTS.size(), 8)
# now use the Taker to make sure any cached events get
# emitted as well - setting the fill_limit to 3 here
# will make it so on the 7th event (after 3 have been
# cached) those will be emitted, and then the 8th event
# will be emitted because the state has been reset to
# "have not seen a valid value yet" which means that
# invalid events will be emitted and not cached.
stream = Stream()
(
Pipeline()
.from_source(stream)
.fill(method='linear', fill_limit=3, field_spec='value')
.to(CollectionOut, cback)
)
for i in events:
stream.add_event(i)
self.assertEqual(RESULTS.size(), 8)
def test_pad_and_zero_limiting(self):
"""test the limiting on pad and zero options."""
simple_missing_data = dict(
name="traffic",
columns=["time", "direction"],
points=[
[1400425947000, {'in': 1, 'out': None}],
[1400425948000, {'in': None, 'out': None}],
[1400425949000, {'in': None, 'out': None}],
[1400425950000, {'in': 3, 'out': 8}],
[1400425960000, {'in': None, 'out': None}],
[1400425970000, {'in': None, 'out': 12}],
[1400425980000, {'in': None, 'out': 13}],
[1400425990000, {'in': 7, 'out': None}],
[1400426000000, {'in': 8, 'out': None}],
[1400426010000, {'in': 9, 'out': None}],
[1400426020000, {'in': 10, 'out': None}],
]
)
ts = TimeSeries(simple_missing_data)
# verify fill limit for zero fill
zero_ts = ts.fill(method='zero', fill_limit=2,
field_spec=['direction.in', 'direction.out'])
self.assertEqual(zero_ts.at(0).get('direction.in'), 1)
self.assertEqual(zero_ts.at(1).get('direction.in'), 0) # fill
self.assertEqual(zero_ts.at(2).get('direction.in'), 0) # fill
self.assertEqual(zero_ts.at(3).get('direction.in'), 3)
self.assertEqual(zero_ts.at(4).get('direction.in'), 0) # fill
self.assertEqual(zero_ts.at(5).get('direction.in'), 0) # fill
self.assertEqual(zero_ts.at(6).get('direction.in'), None) # over limit skip
self.assertEqual(zero_ts.at(7).get('direction.in'), 7)
self.assertEqual(zero_ts.at(8).get('direction.in'), 8)
self.assertEqual(zero_ts.at(9).get('direction.in'), 9)
self.assertEqual(zero_ts.at(10).get('direction.in'), 10)
self.assertEqual(zero_ts.at(0).get('direction.out'), 0) # fill
self.assertEqual(zero_ts.at(1).get('direction.out'), 0) # fill
self.assertEqual(zero_ts.at(2).get('direction.out'), None) # over limit skip
self.assertEqual(zero_ts.at(3).get('direction.out'), 8)
self.assertEqual(zero_ts.at(4).get('direction.out'), 0) # fill
self.assertEqual(zero_ts.at(5).get('direction.out'), 12)
self.assertEqual(zero_ts.at(6).get('direction.out'), 13)
self.assertEqual(zero_ts.at(7).get('direction.out'), 0) # fill
self.assertEqual(zero_ts.at(8).get('direction.out'), 0) # fill
self.assertEqual(zero_ts.at(9).get('direction.out'), None) # over limit skip
self.assertEqual(zero_ts.at(10).get('direction.out'), None) # over limit skip
# verify fill limit for pad fill
pad_ts = ts.fill(method='pad', fill_limit=2,
field_spec=['direction.in', 'direction.out'])
self.assertEqual(pad_ts.at(0).get('direction.in'), 1)
self.assertEqual(pad_ts.at(1).get('direction.in'), 1) # fill
self.assertEqual(pad_ts.at(2).get('direction.in'), 1) # fill
self.assertEqual(pad_ts.at(3).get('direction.in'), 3)
self.assertEqual(pad_ts.at(4).get('direction.in'), 3) # fill
self.assertEqual(pad_ts.at(5).get('direction.in'), 3) # fill
self.assertEqual(pad_ts.at(6).get('direction.in'), None) # over limit skip
self.assertEqual(pad_ts.at(7).get('direction.in'), 7)
self.assertEqual(pad_ts.at(8).get('direction.in'), 8)
self.assertEqual(pad_ts.at(9).get('direction.in'), 9)
self.assertEqual(pad_ts.at(10).get('direction.in'), 10)
self.assertEqual(pad_ts.at(0).get('direction.out'), None) # no fill start
self.assertEqual(pad_ts.at(1).get('direction.out'), None) # no fill start
self.assertEqual(pad_ts.at(2).get('direction.out'), None) # no fill start
self.assertEqual(pad_ts.at(3).get('direction.out'), 8)
self.assertEqual(pad_ts.at(4).get('direction.out'), 8) # fill
self.assertEqual(pad_ts.at(5).get('direction.out'), 12)
self.assertEqual(pad_ts.at(6).get('direction.out'), 13)
self.assertEqual(pad_ts.at(7).get('direction.out'), 13) # fill
self.assertEqual(pad_ts.at(8).get('direction.out'), 13) # fill
self.assertEqual(pad_ts.at(9).get('direction.out'), None) # over limit skip
self.assertEqual(pad_ts.at(10).get('direction.out'), None) # over limit skip
def test_fill_event_variants(self):
"""fill time range and indexed events."""
range_list = [
TimeRangeEvent(
(aware_utcnow(), aware_utcnow() + datetime.timedelta(minutes=1)),
{'in': 100}
),
TimeRangeEvent(
(aware_utcnow(), aware_utcnow() + datetime.timedelta(minutes=2)),
{'in': None}
),
TimeRangeEvent(
(aware_utcnow(), aware_utcnow() + datetime.timedelta(minutes=3)),
{'in': None}
),
TimeRangeEvent(
(aware_utcnow(), aware_utcnow() + datetime.timedelta(minutes=4)),
{'in': 90}
),
TimeRangeEvent(
(aware_utcnow(), aware_utcnow() + datetime.timedelta(minutes=5)),
{'in': 80}
),
TimeRangeEvent(
(aware_utcnow(), aware_utcnow() + datetime.timedelta(minutes=6)),
{'in': 70}
),
]
coll = Collection(range_list)
# canned series objects
rts = TimeSeries(
dict(name='collection', collection=coll))
new_rts = rts.fill(field_spec='in')
self.assertEqual(new_rts.at(1).get('in'), 0)
self.assertEqual(new_rts.at(2).get('in'), 0)
# indexed events
index_list = [
IndexedEvent('1d-12355', {'value': 42}),
IndexedEvent('1d-12356', {'value': None}),
IndexedEvent('1d-12357', {'value': None}),
IndexedEvent('1d-12358', {'value': 52}),
IndexedEvent('1d-12359', {'value': 55}),
IndexedEvent('1d-12360', {'value': 58}),
]
coll = Collection(index_list)
its = TimeSeries(
dict(name='collection', collection=coll))
new_its = its.fill()
self.assertEqual(new_its.at(1).get(), 0)
self.assertEqual(new_its.at(2).get(), 0)
def test_scan_stop(self):
"""stop seeking good values if there are none - for coverage."""
simple_missing_data = dict(
name="traffic",
columns=["time", "direction"],
points=[
[1400425947000, {'in': 1, 'out': None}],
[1400425948000, {'in': 3, 'out': None}],
[1400425949000, {'in': None, 'out': None}],
[1400425950000, {'in': None, 'out': 8}],
[1400425960000, {'in': None, 'out': None}],
[1400425970000, {'in': None, 'out': 12}],
[1400425980000, {'in': None, 'out': 13}],
]
)
ts = TimeSeries(simple_missing_data)
new_ts = ts.fill(field_spec='direction.out', method='linear')
self.assertEqual(new_ts.at(2).get('direction.in'), None)
self.assertEqual(new_ts.at(3).get('direction.in'), None)
self.assertEqual(new_ts.at(4).get('direction.in'), None)
self.assertEqual(new_ts.at(5).get('direction.in'), None)
self.assertEqual(new_ts.at(6).get('direction.in'), None)
def test_pad(self):
"""Test the pad style fill."""
simple_missing_data = dict(
name="traffic",
columns=["time", "direction"],
points=[
[1400425947000, {'in': 1, 'out': None, 'drop': None}],
[1400425948000, {'in': None, 'out': 4, 'drop': None}],
[1400425949000, {'in': None, 'out': None, 'drop': 13}],
[1400425950000, {'in': None, 'out': None, 'drop': 14}],
[1400425960000, {'in': 9, 'out': 8, 'drop': None}],
[1400425970000, {'in': 11, 'out': 10, 'drop': 16}],
]
)
ts = TimeSeries(simple_missing_data)
new_ts = ts.fill(method='pad',
field_spec=['direction.in', 'direction.out', 'direction.drop'])
self.assertEqual(new_ts.at(0).get('direction.in'), 1)
self.assertEqual(new_ts.at(1).get('direction.in'), 1) # padded
self.assertEqual(new_ts.at(2).get('direction.in'), 1) # padded
self.assertEqual(new_ts.at(3).get('direction.in'), 1) # padded
self.assertEqual(new_ts.at(4).get('direction.in'), 9)
self.assertEqual(new_ts.at(5).get('direction.in'), 11)
self.assertEqual(new_ts.at(0).get('direction.out'), None) # 1st can't pad
self.assertEqual(new_ts.at(1).get('direction.out'), 4)
self.assertEqual(new_ts.at(2).get('direction.out'), 4) # padded
self.assertEqual(new_ts.at(3).get('direction.out'), 4) # padded
self.assertEqual(new_ts.at(4).get('direction.out'), 8)
self.assertEqual(new_ts.at(5).get('direction.out'), 10)
self.assertEqual(new_ts.at(0).get('direction.drop'), None) # 1st can't pad
self.assertEqual(new_ts.at(1).get('direction.drop'), None) # bad prev can't pad
self.assertEqual(new_ts.at(2).get('direction.drop'), 13)
self.assertEqual(new_ts.at(3).get('direction.drop'), 14)
self.assertEqual(new_ts.at(4).get('direction.drop'), 14) # padded
self.assertEqual(new_ts.at(5).get('direction.drop'), 16)
if __name__ == '__main__':
unittest.main()
```
#### File: pypond/tests/compare_test.py
```python
import datetime
import unittest
from pypond.collection import Collection
from pypond.event import Event
from pypond.indexed_event import IndexedEvent
from pypond.timerange_event import TimeRangeEvent
from pypond.range import TimeRange
from pypond.series import TimeSeries, TimeSeriesException
from pypond.util import aware_utcnow, ms_from_dt, dt_from_ms
EVENT_LIST = [
Event(1429673400000, {'in': 1, 'out': 2}),
Event(1429673460000, {'in': 3, 'out': 4}),
Event(1429673520000, {'in': 5, 'out': 6}),
]
UNORDERED_EVENT_LIST = [
Event(1429673460000, {'in': 3, 'out': 4}),
Event(1429673400000, {'in': 1, 'out': 2}),
Event(1429673520000, {'in': 5, 'out': 6}),
]
EVENT_LIST_DUP = [
Event(1429673400000, {'in': 1, 'out': 2}),
Event(1429673460000, {'in': 3, 'out': 4}),
Event(1429673460000, {'in': 4, 'out': 5}),
Event(1429673520000, {'in': 5, 'out': 6}),
]
IDX_EVENT_DUP = [
IndexedEvent('1d-12354', {'value': 42}),
IndexedEvent('1d-12355', {'value': 43}),
IndexedEvent('1d-12355', {'value': 44}),
IndexedEvent('1d-12356', {'value': 45}),
]
class BaseTestEvent(unittest.TestCase):
"""Base class for comparison tests."""
def setUp(self):
pass
class TestComparisonUtils(BaseTestEvent):
"""Test methods checking for/handling duplicate events and other accessors
introduced in the 8.0 branch of the JS code."""
def test_at_key_and_dedup(self):
"""test Collection.at_key() and dedup()"""
# events
coll = Collection(EVENT_LIST_DUP)
key_time = dt_from_ms(1429673460000)
find = coll.at_key(key_time)
self.assertEqual(len(find), 2)
self.assertEqual(find[0].get('in'), 3)
self.assertEqual(find[1].get('in'), 4)
ddcoll = coll.dedup()
self.assertEqual(ddcoll.size(), 3)
self.assertEqual(ddcoll.at(1).get('in'), 4) # the second dup event
# indexed events
coll = Collection(IDX_EVENT_DUP)
find = coll.at_key('1d-12355')
self.assertEqual(len(find), 2)
self.assertEqual(find[0].get('value'), 43)
self.assertEqual(find[1].get('value'), 44)
ddcoll = coll.dedup()
self.assertEqual(ddcoll.size(), 3)
self.assertEqual(ddcoll.at(1).get('value'), 44) # the second dup event
# time range events
test_end_ts = aware_utcnow()
test_begin_ts = test_end_ts - datetime.timedelta(hours=12)
test_end_ms = ms_from_dt(test_end_ts)
test_begin_ms = ms_from_dt(test_begin_ts)
dup_tre = [
TimeRangeEvent((test_begin_ms, test_end_ms), 11),
TimeRangeEvent((test_begin_ms + 60000, test_end_ms + 60000), 12),
TimeRangeEvent((test_begin_ms + 60000, test_end_ms + 60000), 13),
TimeRangeEvent((test_begin_ms + 120000, test_end_ms + 120000), 14),
]
coll = Collection(dup_tre)
search = TimeRange(test_begin_ms + 60000, test_end_ms + 60000)
find = coll.at_key(search)
self.assertEqual(len(find), 2)
self.assertEqual(find[0].get('value'), 12)
self.assertEqual(find[1].get('value'), 13)
ddcoll = coll.dedup()
self.assertEqual(ddcoll.size(), 3)
self.assertEqual(ddcoll.at(1).get('value'), 13) # the second dup event
def test_list_as_map(self):
"""test collection.list_as_map()"""
coll = Collection(EVENT_LIST_DUP)
cmap = coll.event_list_as_map()
self.assertEqual(len(cmap), 3)
self.assertEqual(len(cmap.get(1429673400000)), 1)
self.assertEqual(len(cmap.get(1429673460000)), 2) # dups
def test_new_same(self):
"""trigger an error for coverage."""
self.assertFalse(Event.same(EVENT_LIST[0], IDX_EVENT_DUP[0]))
def test_nested_merge(self):
"""trigger merging nested data."""
# pylint: disable=invalid-name
e_ts = aware_utcnow()
e1 = Event(e_ts, dict(payload=dict(a=1)))
e2 = Event(e_ts, dict(payload=dict(b=2)))
emerge = Event.merge([e1, e2])
self.assertEqual(emerge[0].get('payload.a'), 1)
self.assertEqual(emerge[0].get('payload.b'), 2)
def test_bad_args(self):
"""bad args for new TimeSeries functions/coverage."""
# def timeseries_list_reduce(data, series_list, reducer, field_spec=None):
def test_func():
"""test function."""
i = 1
return i
with self.assertRaises(TimeSeriesException):
TimeSeries.timeseries_list_reduce({}, {}, test_func)
with self.assertRaises(TimeSeriesException):
TimeSeries.timeseries_list_reduce({}, [], {})
def test_is_duplicate(self):
"""Test Event.is_duplicate()"""
# events
# pylint: disable=invalid-name
e_ts = aware_utcnow()
e1 = Event(e_ts, 23)
e2 = Event(e_ts, 23)
self.assertTrue(Event.is_duplicate(e1, e2))
self.assertTrue(Event.is_duplicate(e1, e2, ignore_values=False))
e3 = Event(e_ts, 25)
self.assertTrue(Event.is_duplicate(e1, e3))
self.assertFalse(Event.is_duplicate(e1, e3, ignore_values=False))
# indexed events
ie1 = IndexedEvent('1d-12355', {'value': 42})
ie2 = IndexedEvent('1d-12355', {'value': 42})
self.assertTrue(Event.is_duplicate(ie1, ie2))
self.assertTrue(Event.is_duplicate(ie1, ie2, ignore_values=False))
ie3 = IndexedEvent('1d-12355', {'value': 44})
self.assertTrue(Event.is_duplicate(ie1, ie3))
self.assertFalse(Event.is_duplicate(ie1, ie3, ignore_values=False))
# time range events
test_end_ts = aware_utcnow()
test_begin_ts = test_end_ts - datetime.timedelta(hours=12)
test_end_ms = ms_from_dt(test_end_ts)
test_begin_ms = ms_from_dt(test_begin_ts)
tre1 = TimeRangeEvent((test_begin_ms, test_end_ms), 11)
tre2 = TimeRangeEvent((test_begin_ms, test_end_ms), 11)
self.assertTrue(Event.is_duplicate(tre1, tre2))
self.assertTrue(Event.is_duplicate(tre1, tre2, ignore_values=False))
tre3 = TimeRangeEvent((test_begin_ms, test_end_ms), 22)
self.assertTrue(Event.is_duplicate(tre1, tre3))
self.assertFalse(Event.is_duplicate(tre1, tre3, ignore_values=False))
if __name__ == '__main__':
unittest.main()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.