metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jfjlaros/online-array",
"score": 3
}
|
#### File: online-array/online_array/__init__.py
```python
import argparse
import os
from .online_array import OnlineArray, online_array, unbounded_online_array
from .utils import fill_array
__version_info__ = ('0', '0', '1')
__version__ = '.'.join(__version_info__)
__author__ = '<NAME>'
__contact__ = '<EMAIL>'
__homepage__ = 'https://github.com/jfjlaros/online-array.git'
usage = __doc__.split('\n\n\n')
class ProtectedFileType(argparse.FileType):
def __call__(self, string):
if 'w' in self._mode and os.path.exists(string):
raise IOError('failed to create "{}": file exists.'.format(string))
return super(ProtectedFileType, self).__call__(string)
def doc_split(func):
return func.__doc__.split('\n\n')[0]
def version(name):
return '{} version {}\n\nAuthor : {} <{}>\nHomepage : {}'.format(
name, __version__, __author__, __contact__, __homepage__)
```
|
{
"source": "jfjlaros/ordered-map",
"score": 3
}
|
#### File: ordered-map/tests/test_library.py
```python
from ordered_map.ordered_map import (
_deserialise, _from_list, _merge, _serialise, _to_list)
from ordered_map import read, write
def test_deserialise() -> None:
assert _deserialise('a.b=c\n') == {'a': {'b': 'c'}}
assert _deserialise('a.b=c') == {'a': {'b': 'c'}}
assert _deserialise('a.b = c') == {'a': {'b': 'c'}}
assert _deserialise('a.b.c=d') == {'a': {'b': {'c': 'd'}}}
def test_serialise() -> None:
assert _serialise({'a': {'b': 'c'}}) == 'a.b=c\n'
assert _serialise({'a': {'b': {'c': 'd'}}}) == 'a.b.c=d\n'
def test_merge() -> None:
assert _merge({'a': 'b'}, {'x': 'y'}) == {'a': 'b', 'x': 'y'}
assert _merge({'a': 'b'}, {'a': 'y'}) == {'a': {'b', 'y'}}
assert _merge({'a': {'b': 'c'}}, {'a': {'y': 'z'}}) == {
'a': {'b': 'c', 'y': 'z'}}
assert _merge({'a': {'b': 'c'}}, {'a': {'b': 'z'}}) == {
'a': {'b': {'c', 'z'}}}
def test_to_list() -> None:
assert _to_list({'0': 'a'}) == ['a']
assert _to_list({'1': 'a'}) == {'1': 'a'}
assert _to_list({'0': 'a', '1': 'b'}) == ['a', 'b']
assert _to_list({'1': 'a', '0': 'b'}) == ['b', 'a']
assert _to_list({'0': 'a', '2': 'b'}) == {'0': 'a', '2': 'b'}
assert _to_list({'a': {'0', '1'}}) == {'a': {'0', '1'}}
assert _to_list({'a': {'0': 'x', '1': 'y'}}) == {'a': ['x', 'y']}
assert _to_list({'0': {'a': 'b'}, '1': {'x': 'y'}}) == [
{'a': 'b'}, {'x': 'y'}]
def test_from_list() -> None:
assert _from_list(['a']) == {'0': 'a'}
assert _from_list({'1': 'a'}) == {'1': 'a'}
assert _from_list(['a', 'b']) == {'0': 'a', '1': 'b'}
assert _from_list(['b', 'a']) == {'1': 'a', '0': 'b'}
assert _from_list({'0': 'a', '2': 'b'}) == {'0': 'a', '2': 'b'}
assert _from_list({'a': {'0', '1'}}) == {'a': {'0', '1'}}
assert _from_list({'a': ['x', 'y']}) == {'a': {'0': 'x', '1': 'y'}}
assert _from_list([{'a': 'b'}, {'x': 'y'}]) == {
'0': {'a': 'b'}, '1': {'x': 'y'}}
def test_read_skip() -> None:
assert read('') == []
assert read('\n') == []
assert read('\r') == []
assert read('# Comment.') == []
assert read(' a=b') == []
assert read('\ta=b') == []
def test_read_single() -> None:
assert read('a=b') == {'a': 'b'}
def test_read_multi() -> None:
assert read('a=b\nx=y') == {'a': 'b', 'x': 'y'}
def test_write_skip() -> None:
assert write({}) == ''
assert write([]) == ''
def test_write_single() -> None:
assert write({'a': 'b'}) == '{}\na=b\n'.format(62 * '#')
def test_write_multi() -> None:
assert write({'a': 'b', 'x': 'y'}) == '{}\na=b\n{}\nx=y\n'.format(
62 * '#', 62 * '#')
```
|
{
"source": "jfjlaros/snvmixtools",
"score": 3
}
|
#### File: snvmixtools/snvmixtools/snvmix_parse.py
```python
class SNVMixRecord(object):
def __init__(self, line):
"""
:param line: a line of SNVMix2 output
:type line: str
"""
data = line.strip().split()
location = data[0].split(':')
self.chromosome = location[0]
self.position = int(location[1])
self.reference = data[1]
self.alternative = data[2]
details = data[3].split(',')
self.reference_count = int(details[0].split(':')[1])
self.alternative_count = int(details[1].split(':')[1])
self.genotype_likelihood = map(float, details[2:5])
self.genotype = int(details[5])
#__init__
def __str__(self):
return "{}:{} {} {} {}:{},{}:{},{},{}\n".format(
self.chromosome, self.position, self.reference, self.alternative,
self.reference, self.reference_count, self.alternative,
self.alternative_count,
",".join(map("{:.10f}".format, self.genotype_likelihood)),
self.genotype)
#SNVMixRecord
def walker(handle):
"""
"""
for line in handle.readlines():
yield SNVMixRecord(line)
#walker
```
|
{
"source": "jfjlaros/spreadscript",
"score": 2
}
|
#### File: spreadscript/spreadscript/spreadscript.py
```python
import os
import subprocess
import uno
import unohelper
from com.sun.star.connection import NoConnectException
from com.sun.star.lang import IllegalArgumentException
class SpreadScript(object):
def __init__(self, file_name=None):
"""Initialise the class.
:arg str file_name: File name.
"""
self._desktop = None
self._start_soffice()
self._connect_soffice()
if file_name:
self.open(file_name)
def __del__(self):
"""Close the soffice instance."""
if self._desktop:
self.close()
def _start_soffice(self):
"""Start soffice in the background."""
process_id = os.fork()
if not process_id:
subprocess.call(
'soffice --accept="socket,host=localhost,port=2002;urp;" ' +
'--norestore --nologo --nodefault --headless', shell=True)
exit()
def _connect_soffice(self):
"""Connect to a running soffice instance."""
context = uno.getComponentContext()
resolver = context.ServiceManager.createInstanceWithContext(
'com.sun.star.bridge.UnoUrlResolver', context)
while True:
try:
context = resolver.resolve(
'uno:socket,host=localhost,port=2002;urp;' +
'StarOffice.ComponentContext')
except NoConnectException:
pass
else:
break
self._desktop = context.ServiceManager.createInstanceWithContext(
'com.sun.star.frame.Desktop', context)
def _get_cell_text(self, column, row):
return self._interface.getCellByPosition(column, row).getString()
def _get_cell_value(self, column, row):
return self._interface.getCellByPosition(column, row).getValue()
def _get_cell_formula(self, column, row):
return self._interface.getCellByPosition(column, row).getFormula()
def _get_link(self, column, row):
return self._get_cell_formula(column, row)[1:].replace(
'$', '').split('.')
def _set_cell_value(self, sheet, cell, value):
self._sheets.getByName(sheet).getCellRangeByName(cell).setValue(value)
def _read_table(self, column):
"""Read names and values from a table.
:arg int column: Upper-left coordinate of the table content.
:returns dict: Table content.
"""
inputs = {}
row = 3
while True:
name = self._get_cell_text(column, row)
value = self._get_cell_value(column + 1, row)
if not name:
break
inputs[name] = value
row += 1
return inputs
def _write_table(self, column, data):
"""Write values to a table.
:arg int column: Upper-left coordinate of the table content.
:arg dict data: Data to be written.
"""
row = 3
while True:
name = self._get_cell_text(column, row)
if not name:
break
if name in data:
sheet, cell = self._get_link(column + 1, row)
self._set_cell_value(sheet, cell, data[name])
row += 1
def open(self, file_name):
"""Open a spreadsheet.
:arg str file_name: File name.
"""
doc_url = unohelper.systemPathToFileUrl(os.path.abspath(file_name))
try:
self._desktop.loadComponentFromURL(doc_url, '_blank', 0, ())
except IllegalArgumentException as error:
raise ValueError('no such file or format not supported')
self._sheets = self._desktop.getCurrentComponent().Sheets
if 'Interface' not in self._sheets:
raise ValueError('no sheet named "Interface" found')
self._interface = self._sheets.Interface
def close(self):
"""Close the soffice instance."""
self._desktop.terminate()
def read_input(self):
return self._read_table(1)
def write_input(self, data):
self._write_table(1, data)
def read_output(self):
return self._read_table(4)
```
|
{
"source": "jf---/joplin-api",
"score": 2
}
|
#### File: joplin-api/tests/test_resources.py
```python
import pytest
import json
from joplin_api import JoplinApi
@pytest.mark.asyncio
async def test_get_resources(get_token):
joplin = JoplinApi(token=get_token)
res = await joplin.get_resources()
assert res.status_code == 200
@pytest.mark.asyncio
async def test_create_get_update_delete_download_resource(get_token):
joplin = JoplinApi(token=get_token)
properties = {'title': 'test resource'}
assert 'title' in properties
file_name = 'tests/cactus.png'
res = await joplin.create_resource(file_name, **properties)
resource_id = json.loads(res.text)['id']
assert res.status_code == 200
res = await joplin.get_resource(resource_id)
assert res.status_code == 200
properties = {'title': 'test update resource'}
file_name = 'tests/update_cactus.png'
res = await joplin.update_resources(resource_id, **properties)
assert res.status_code == 200
res = await joplin.download_resources(resource_id)
assert res.status_code == 200
res = await joplin.delete_resources(resource_id)
assert res.status_code == 200
```
|
{
"source": "jfk1408/msbase.py",
"score": 2
}
|
#### File: msbase.py/msbase/subprocess_.py
```python
import subprocess
import os
import sys
import traceback
import glob
from os.path import join
from multiprocessing import Pool, Value
import multiprocessing
import time
from termcolor import cprint
from threading import Thread
from queue import Queue, Empty
from msbase.logging import logger
def timed(func):
def function_wrapper(*args, **kwargs):
now = time.time()
ret = func(*args, **kwargs)
logger.info("%s(%s, %s) spent %.2fs" %
(func.__qualname__, args, kwargs, time.time() - now))
return ret
return function_wrapper
ON_POSIX = 'posix' in sys.builtin_module_names
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def call_std(args, cwd=None, env={}, output=True, timeout_s=None):
if output:
p = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=1,
close_fds=ON_POSIX, cwd=cwd, env=dict(os.environ, **env))
start_time = time.time()
stdout = ""
q_stdout = Queue()
t_stdout = Thread(target=enqueue_output, args=(p.stdout, q_stdout))
t_stdout.daemon = True
t_stdout.start()
stderr = ""
q_stderr = Queue()
t_stderr = Thread(target=enqueue_output, args=(p.stderr, q_stderr))
t_stderr.daemon = True
t_stderr.start()
while True:
return_code = p.poll()
if return_code is not None:
break
try:
stdout_line = str(q_stdout.get_nowait(), "utf-8")
except Empty:
stdout_line = ''
try:
stderr_line = str(q_stderr.get_nowait(), "utf-8")
except Empty:
stderr_line = ''
if stdout_line:
stdout += stdout_line
logger.info(stdout_line.rstrip())
if stderr_line:
stderr += stderr_line
logger.warning(stderr_line.rstrip())
if timeout_s is not None and time.time() - start_time > timeout_s:
p.kill()
return (-1, "", "TIMEOUT!")
while True:
try:
stdout_line = str(q_stdout.get(timeout=.1), "utf-8")
except Empty:
break
stdout += stdout_line
logger.info(stdout_line.rstrip())
if timeout_s is not None and time.time() - start_time > timeout_s:
p.kill()
return (-1, "", "TIMEOUT!")
while True:
try:
stderr_line = str(q_stderr.get(timeout=.1), "utf-8")
except Empty:
break
stderr += stderr_line
logger.warning(stderr_line.rstrip())
if timeout_s is not None and time.time() - start_time > timeout_s:
p.kill()
return (-1, "", "TIMEOUT!")
return (return_code, stdout, stderr)
else:
code = subprocess.call(args, cwd=cwd, env=dict(os.environ, **env), timeout=timeout_s)
return (code, None, None)
@timed
def try_call_std(args, cwd=None, env={}, verbose=True,
output=True, noexception=False, timeout_s=None):
'''An asynchronously logged process executor
that returns essential information all you need
'''
if verbose:
cprint("+ " + " ".join(args), "blue")
code, stdout, stderr = call_std(args, cwd, env, output, timeout_s=timeout_s)
if not noexception and code != 0:
if verbose:
print("STDOUT: ")
print(stdout)
print("STDERR: ")
cprint(stderr, "red")
raise Exception(str(code) + ": calling " + " ".join(args) + " failed")
else:
return stdout, stderr, code
def multiprocess(task, inputs, n: int, verbose=True, return_dict=True, throws=False, debug_mode=False):
'''How to use this effectively:
1. Use debug_mode=True to switch to tracked for-loop
'''
if debug_mode:
results = []
for arg in inputs:
start_time = time.time()
logger.info("Working on %s" % arg)
results.append(task(arg))
logger.info("Time spent: %.2f" % (time.time() - start_time))
return results
counter = Value('i', 0)
total = float(len(inputs))
start_time = time.time()
global run
def run(input):
with counter.get_lock():
if verbose:
logger.info("%fs - progress: %f" % (time.time() - start_time, counter.value / total))
counter.value += 1
try:
return (True, task(input))
except Exception as e:
return (False, "%s\n%s" % (e, traceback.format_exc()))
with Pool(n) as p:
results = p.map(run, inputs)
if verbose:
logger.info("total spent time: %f" % (time.time() - start_time))
if throws:
ret = []
for ok, r in results:
if not ok:
raise Exception(str(r))
ret.append(r)
return ret
if return_dict:
return dict(zip(inputs, results))
else:
return results
```
|
{
"source": "JFK24/seqQscorer",
"score": 3
}
|
#### File: example_dataset/reads_annotation/utils.py
```python
import subprocess
def getSystemCall(call):
process = subprocess.Popen(call, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
out = out.decode(locale.getdefaultlocale()[1])
err = err.decode(locale.getdefaultlocale()[1])
if process.returncode:
print("call failed, call was: %s" % ' '.join(call), file=sys.stderr)
print("Message was: %s" % str(out), file=sys.stderr)
print("Error code was %s, stderr: %s" % (process.returncode, err), file=sys.stderr, end='')
raise Exception('runSystemCall Exception')
return out, err
```
#### File: seqQscorer/utils/parser.py
```python
import os
import numpy as np
import pandas as pd
global FastQC_value_map
FastQC_value_map = {'FAIL': 0, 'WARN': 1, 'PASS': 2}
def get_FastQC_features(feature_file_path):
features = {}
with open(feature_file_path, 'r') as feature_file:
for line in feature_file:
line = line.strip().split('\t')
feature_name = line[1].replace(' ', '_')
value = FastQC_value_map.get(line[0], np.nan)
features['FastQC_'+feature_name] = value
return features
def parse_BowtieSE(lines):
lines = lines.split('\n')
features = {}
features['BowtieSE_no_mapping'] = float(lines[2].split('(')[1].split('%')[0])
features['BowtieSE_uniquely'] = float(lines[3].split('(')[1].split('%')[0])
features['BowtieSE_multiple'] = float(lines[4].split('(')[1].split('%')[0])
features['BowtieSE_overall'] = float(lines[5].split('%')[0])
# for mixed Bowtie
features['BowtieMI_no_mapping'] = features['BowtieSE_no_mapping']
features['BowtieMI_uniquely'] = features['BowtieSE_uniquely']
features['BowtieMI_multiple'] = features['BowtieSE_multiple']
features['BowtieMI_overall'] = features['BowtieSE_overall']
return features
def parse_BowtiePE(lines):
lines = lines.split('\n')
features = {}
features['BowtiePE_con_no_mapping'] = float(lines[2].split('(')[1].split('%')[0])
features['BowtiePE_con_uniquely'] = float(lines[3].split('(')[1].split('%')[0])
features['BowtiePE_con_multiple'] = float(lines[4].split('(')[1].split('%')[0])
features['BowtiePE_dis_uniquely'] = float(lines[7].split('(')[1].split('%')[0])
features['BowtiePE_cod_no_mapping'] = float(lines[11].split('(')[1].split('%')[0])
features['BowtiePE_cod_uniquely'] = float(lines[12].split('(')[1].split('%')[0])
features['BowtiePE_cod_multiple'] = float(lines[13].split('(')[1].split('%')[0])
features['BowtiePE_overall'] = float(lines[14].split('%')[0])
# for mixed Bowtie
features['BowtieMI_no_mapping'] = features['BowtiePE_con_no_mapping']
features['BowtieMI_uniquely'] = features['BowtiePE_con_uniquely']
features['BowtieMI_multiple'] = features['BowtiePE_con_multiple']
features['BowtieMI_overall'] = features['BowtiePE_overall']
# for SE Bowtie
features['BowtieSE_no_mapping'] = features['BowtiePE_con_no_mapping']
features['BowtieSE_uniquely'] = features['BowtiePE_con_uniquely']
features['BowtieSE_multiple'] = features['BowtiePE_con_multiple']
features['BowtieSE_overall'] = features['BowtiePE_overall']
return features
def get_Bowtie_features(feature_file_path):
lines = open(feature_file_path, 'r').read()
if 'concordantly' in lines and 'discordantly' in lines:
return parse_BowtiePE(lines)
else:
return parse_BowtieSE(lines)
def get_readsAnno_features(feature_file_path):
features = {}
with open(feature_file_path, 'r') as f:
f.readline()
for line in f:
line = line.strip().split('\t')
feature_name = line[1]
feature_name = feature_name.replace('"', '')
feature_name = feature_name.replace("'", '')
feature_name = feature_name.replace(' (<=300)', '')
feature_name = feature_name.replace(' ', '_')
feature_name = 'readsAnno_'+feature_name
features[feature_name] = float(line[2])
return features
def get_TSS_features(feature_file_path):
tss = pd.read_csv(feature_file_path, sep='\t')
tss_dist = list(map(str,tss['tss_dist']))
feature_names = ['TSS_'+name if name[0] == '-' else 'TSS_+'+name for name in tss_dist]
feature_values = list(tss['perc'])
return dict(zip(feature_names, feature_values))
```
|
{
"source": "jfkcooper/SScanSS-2",
"score": 4
}
|
#### File: core/geometry/colour.py
```python
from ..math.vector import Vector4
from ..math.misc import clamp
class Colour:
"""Creates Colour object that represents a normalized [0, 1] RGBA colour.
:param red: Red channel value between 0 and 1
:type red: float
:param green: Green channel value between 0 and 1
:type green: float
:param blue: Blue channel value between 0 and 1
:type blue: float
:param alpha: Alpha channel value between 0 and 1.
:type alpha: float
"""
def __init__(self, red, green, blue, alpha=1.0):
self.__colour = Vector4()
self.r = red
self.g = green
self.b = blue
self.a = alpha
@property
def r(self):
"""Gets and sets value of red channel
:return: red value
:rtype: float
"""
return self.__colour.x
@r.setter
def r(self, value):
self.__colour.x = clamp(value)
@property
def g(self):
"""Gets and sets value of green channel
:return: green value
:rtype: float
"""
return self.__colour.y
@g.setter
def g(self, value):
self.__colour.y = clamp(value)
@property
def b(self):
"""Gets and sets value of blue channel
:return: blue value
:rtype: float
"""
return self.__colour.z
@b.setter
def b(self, value):
self.__colour.z = clamp(value)
@property
def a(self):
"""Gets and sets value of alpha channel
:return: alpha value
:rtype: float
"""
return self.__colour.w
@a.setter
def a(self, value):
self.__colour.w = clamp(value)
def invert(self):
"""Inverts the RGB channels i.e (1-r, 1-g, 1-b, a) of colour
:return: inverse of colour
:rtype: Colour
"""
return Colour(1 - self.r, 1 - self.g, 1 - self.b, self.a)
@property
def rgba(self):
"""Gets un-normalized colour values
:return: un-normalized RGBA colour [0-255]
:rtype: numpy.ndarray
"""
return (self.__colour[:] * 255).astype(int)
@property
def rgbaf(self):
"""Gets normalized colour values
:return: normalized RGBA colour [0-1]
:rtype: numpy.ndarray
"""
return self.__colour[:]
@staticmethod
def normalize(red=0, green=0, blue=0, alpha=255):
"""Create Colour object by converting to normalized RGBA from
un-normalized values
:param red: Red channel value between 0 and 255
:type red: int
:param green: Green channel value between 0 and 255
:type green: int
:param blue: Blue channel value between 0 and 255
:type blue: int
:param alpha: Alpha channel value between 0 and 255.
:type alpha: int
:return: normalized RGBA colour
:rtype: Colour
"""
return Colour(red / 255, green / 255, blue / 255, alpha / 255)
@staticmethod
def white():
"""Creates white colour
:return: white colour
:rtype: Colour
"""
return Colour(1.0, 1.0, 1.0)
@staticmethod
def black():
"""Creates black colour
:return: black colour
:rtype: Colour
"""
return Colour(0.0, 0.0, 0.0)
def __getitem__(self, index):
return self.__colour[index]
def __str__(self):
return f'rgba({self.r}, {self.g}, {self.b}, {self.a})'
def __repr__(self):
return f'Colour({self.r}, {self.g}, {self.b}, {self.a})'
```
#### File: core/geometry/volume.py
```python
from enum import Enum, unique
import numpy as np
from scipy.interpolate import CubicSpline, interp1d
from ..math.matrix import Matrix44
class Curve:
"""Creates a Curve object used to generate transfer function for volumes
:param inputs: input volume intensities
:type inputs: numpy.ndarray
:param outputs: output colour alpha
:type outputs: numpy.ndarray
:param bounds: minimum and maximum intensity in volume
:type bounds: Tuple[float, float]
:param curve_type: Type of fir for curve
:type curve_type: Curve.Type
"""
@unique
class Type(Enum):
"""Type of curve"""
Cubic = 'Cubic'
Linear = 'Linear'
def __init__(self, inputs, outputs, bounds, curve_type):
self.inputs = inputs
self.outputs = outputs
self.bounds = bounds
self.type = curve_type
self.f = None
self.transfer_function = np.tile(np.linspace(0.0, 1.0, num=256, dtype=np.float32)[:, None], (1, 4))
if len(inputs) > 1:
if curve_type == self.Type.Cubic:
self.f = CubicSpline(inputs, outputs)
else:
self.f = interp1d(inputs, outputs, kind='linear', bounds_error=False, assume_sorted=True)
value = self.evaluate(np.linspace(bounds[0], bounds[-1], num=256))
self.transfer_function[:, 3] = value
self.transfer_function = self.transfer_function.flatten()
def evaluate(self, inputs):
"""Computes the outputs alpha values for the input intensity
:param inputs: input volume intensities
:type inputs: numpy.ndarray
:return: output colour alpha
:rtype: numpy.ndarray
"""
if self.f is None:
outputs = np.clip(np.full(len(inputs), self.outputs[0]), 0.0, 1.0)
else:
outputs = np.clip(self.f(inputs), 0.0, 1.0)
outputs[inputs < self.inputs[0]] = self.outputs[0]
outputs[inputs > self.inputs[-1]] = self.outputs[-1]
return outputs
class Volume:
"""Creates a Volume object. This is the result of loading in a tomography scan, either from a nexus file,
or a set of TIFF files. It is the equivalent of the mesh object but for tomography data
:param data: N x M x L array of intensities, created by stacking L TIFF images, each of dimension N x M
:type data: numpy.ndarray
:param x: N array of pixel coordinates
:type x: numpy.ndarray
:param y: M array of pixel coordinates
:type y: numpy.ndarray
:param z: L array of pixel coordinates
:type z: numpy.ndarray
"""
def __init__(self, data, x, y, z):
self.data = data
self.x = x
self.y = y
self.z = z
self.histogram = np.histogram(data, bins=256)
inputs = np.array([self.histogram[1][0], self.histogram[1][-1]])
outputs = np.array([0.0, 1.0])
if inputs[0] == inputs[1]:
inputs = inputs[1:]
outputs = outputs[1:]
self.curve = Curve(inputs, outputs, inputs, Curve.Type.Cubic)
x_spacing = (x[-1] - x[0]) / (len(x) - 1)
y_spacing = (y[-1] - y[0]) / (len(y) - 1)
z_spacing = (z[-1] - z[0]) / (len(z) - 1)
x_origin = x[0] + (x[-1] - x[0]) / 2
y_origin = y[0] + (y[-1] - y[0]) / 2
z_origin = z[0] + (z[-1] - z[0]) / 2
self.voxel_size = np.array([x_spacing, y_spacing, z_spacing], np.float32)
self.transform = Matrix44.fromTranslation([x_origin, y_origin, z_origin])
@property
def shape(self):
"""Returns shape of volume i.e. width, height, depth
:return: shape of volume
:rtype: Tuple[int, int, int]
"""
return self.data.shape
@property
def extent(self):
"""Returns extent or diagonal of volume
:return: extent of volume
:rtype: numpy.ndarray[float]
"""
return self.voxel_size * self.shape
```
#### File: sscanss/editor/dialogs.py
```python
import json
import numpy as np
from PyQt5 import QtCore, QtGui, QtWidgets
from sscanss.core.instrument import Link, circle_point_analysis, generate_description
from sscanss.core.math import clamp
from sscanss.core.util import create_scroll_area
from .widgets import ScriptWidget, JawsWidget, PositionerWidget, DetectorWidget
class Controls(QtWidgets.QDialog):
"""Creates a widget that creates and manages the instrument control widgets.
The widget creates instrument controls if the instrument description file is correct,
otherwise the widget will be blank.
:param parent: main window instance
:type parent: MainWindow
"""
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.setWindowTitle('Instrument Control')
layout = QtWidgets.QVBoxLayout()
self.setLayout(layout)
self.tabs = QtWidgets.QTabWidget()
self.tabs.setMinimumWidth(600)
self.tabs.setMinimumHeight(600)
self.tabs.tabBarClicked.connect(self.updateTabs)
layout.addWidget(self.tabs)
self.last_tab_index = 0
self.last_stack_name = ''
self.last_collimator_name = {}
def createWidgets(self):
"""Creates widgets for positioner, jaws, detector, and script"""
self.tabs.clear()
positioner_widget = PositionerWidget(self.parent)
if self.last_stack_name and self.last_stack_name in self.parent.instrument.positioning_stacks.keys():
positioner_widget.changeStack(self.last_stack_name)
positioner_widget.stack_combobox.activated[str].connect(self.setStack)
self.tabs.addTab(create_scroll_area(positioner_widget), 'Positioner')
self.tabs.addTab(create_scroll_area(JawsWidget(self.parent)), 'Jaws')
collimator_names = {}
for name in self.parent.instrument.detectors:
pretty_name = name if name.lower() == 'detector' else f'{name} Detector'
detector_widget = DetectorWidget(self.parent, name)
self.tabs.addTab(create_scroll_area(detector_widget), pretty_name)
collimator_name = self.last_collimator_name.get(name, '')
if collimator_name:
collimator_names[name] = collimator_name
detector_widget.combobox.setCurrentText(collimator_name)
detector_widget.changeCollimator()
detector_widget.collimator_changed.connect(self.setCollimator)
self.last_collimator_name = collimator_names
self.script_widget = ScriptWidget(self.parent)
self.tabs.addTab(create_scroll_area(self.script_widget), 'Script')
self.tabs.setCurrentIndex(clamp(self.last_tab_index, 0, self.tabs.count()))
def reset(self):
"""Resets stored states"""
self.last_tab_index = 0
self.last_stack_name = ''
self.last_collimator_name = {}
def setStack(self, stack_name):
"""Stores the last loaded positioning stack. This preserves the active stack
between description file modifications
:param stack_name: name of active positioning stack
:type stack_name: str
"""
self.last_stack_name = stack_name
def setCollimator(self, detector, collimator_name):
"""Stores the last loaded collimator on a detector. This preserves the active
collimator between description file modifications
:param detector: name of detector
:type detector: str
:param collimator_name: name of active collimator
:type collimator_name: str
"""
self.last_collimator_name[detector] = collimator_name
def updateTabs(self, index):
"""Stores the last open tab.
:param index: tab index
:type index: int
"""
self.last_tab_index = index
if self.tabs.tabText(index) == 'Script':
self.script_widget.updateScript()
class CalibrationWidget(QtWidgets.QDialog):
"""Creates a widget for performing kinematic calibration and displaying the residual errors.
:param parent: main window instance
:type parent: MainWindow
:param points: measured 3D points for each joint
:type points: List[numpy.ndarray]
:param joint_types: types of each joint
:type joint_types: List[Link.Type]
:param joint_offsets: measured offsets for each measurement
:type joint_offsets: List[numpy.ndarray]
:param joint_homes: home position for each measurement
:type joint_homes: List[float]
"""
def __init__(self, parent, points, joint_types, joint_offsets, joint_homes):
super().__init__(parent)
self.points = points
self.offsets = joint_offsets
self.robot_name = 'Positioning System'
self.order = list(range(len(points)))
self.names = [f'Joint {i + 1}' for i in range(len(points))]
self.homes = joint_homes
self.types = joint_types
main_layout = QtWidgets.QVBoxLayout()
self.setLayout(main_layout)
# create stacked layout
self.stack = QtWidgets.QStackedLayout()
main_layout.addLayout(self.stack)
self.stack1 = QtWidgets.QWidget()
self.stack2 = QtWidgets.QWidget()
self.stack.addWidget(self.stack1)
self.stack.addWidget(self.stack2)
self.createCalibrationForm()
self.createResultTable()
self.setLayout(main_layout)
self.setMinimumSize(800, 720)
self.setWindowTitle('Kinematic Calibration')
def calibrate(self):
"""Creates kinematic model of a robot from measurement and displays result"""
self.results = circle_point_analysis(self.points, self.types, self.offsets, self.homes)
self.json = generate_description(self.robot_name, self.results.base, self.results.tool, self.order, self.names,
self.types, self.results.joint_axes, self.results.joint_origins, self.homes,
self.offsets)
self.displayResiduals()
self.stack.setCurrentIndex(1)
def changeRobotName(self, value):
"""Changes name of the robot
:param value: name of robot
:type value: str
"""
self.robot_name = value
self.validateForm()
def changeOrder(self, values):
"""Changes the display order of joints
:param values: joint indices arranged in the display order
:type values: List[int]
"""
size = len(self.points)
try:
order = [int(value) - 1 for value in values.split(',')]
if len(set(order)) != size:
raise ValueError
if min(order) != 0 or max(order) != size - 1:
raise ValueError
self.order = order
except ValueError:
self.order = []
self.validateForm()
def changeJointNames(self, index, value):
"""Changes the name of the joint at given index
:param index: joint index
:type index: int
:param value: joint name
:type value: str
"""
self.names[index] = value
self.validateForm()
def changeHome(self, index, value):
"""Changes the home position of the joint at given index
:param index: joint index
:type index: int
:param value: joint home position
:type value: str
"""
self.homes[index] = value
def changeType(self, index, value):
"""Changes the type of the joint at given index
:param index: joint index
:type index: int
:param value: joint type
:type value: str
"""
self.types[index] = Link.Type(value.lower())
def createCalibrationForm(self):
"""Creates inputs for the calibration arguments"""
layout = QtWidgets.QVBoxLayout()
layout.setSpacing(5)
layout.setContentsMargins(0, 0, 0, 0)
row_layout = QtWidgets.QHBoxLayout()
self.error_text = QtWidgets.QLabel()
self.error_text.setWordWrap(True)
self.error_text.setStyleSheet('color: red;')
self.calibrate_button = QtWidgets.QPushButton('Generate Model')
self.calibrate_button.clicked.connect(self.calibrate)
row_layout.addStretch(1)
row_layout.addWidget(self.error_text, 4)
row_layout.addStretch(1)
row_layout.addWidget(self.calibrate_button)
layout.addLayout(row_layout)
layout.addSpacing(10)
row_layout = QtWidgets.QHBoxLayout()
row_layout.addWidget(QtWidgets.QLabel('Name of Positioner:\t'))
name_line_edit = QtWidgets.QLineEdit(self.robot_name)
name_line_edit.textChanged.connect(self.changeRobotName)
row_layout.addWidget(name_line_edit, 2)
row_layout.addStretch(1)
layout.addLayout(row_layout)
row_layout = QtWidgets.QHBoxLayout()
order_line_edit = QtWidgets.QLineEdit(','.join(str(x + 1) for x in self.order))
order_line_edit.textChanged.connect(self.changeOrder)
row_layout.addWidget(QtWidgets.QLabel('Custom Order:\t'))
row_layout.addWidget(order_line_edit, 2)
row_layout.addStretch(1)
layout.addLayout(row_layout)
divider = QtWidgets.QFrame()
divider.setFrameShape(QtWidgets.QFrame.HLine)
divider.setFrameShadow(QtWidgets.QFrame.Sunken)
layout.addSpacing(10)
layout.addWidget(QtWidgets.QLabel('<p style="font-size:14px">Joint Information</p>'))
layout.addWidget(divider)
layout.addSpacing(5)
# Define Scroll Area
scroll_area = QtWidgets.QScrollArea()
scroll_area.setWidgetResizable(True)
scroll_area.setFrameShape(QtWidgets.QFrame.NoFrame)
widget = QtWidgets.QWidget()
sub_layout = QtWidgets.QVBoxLayout()
sub_layout.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
widget.setLayout(sub_layout)
sub_layout.setSpacing(5)
sub_layout.setContentsMargins(10, 0, 10, 0)
scroll_area.setWidget(widget)
layout.addWidget(scroll_area)
for i in range(len(self.points)):
name_line_edit = QtWidgets.QLineEdit(self.names[i])
name_line_edit.textChanged.connect(lambda value, index=i: self.changeJointNames(index, value))
joint_type_combobox = QtWidgets.QComboBox()
joint_type_combobox.setView(QtWidgets.QListView())
joint_type_combobox.addItems([t.value.title() for t in Link.Type])
joint_type_combobox.setCurrentText(self.types[i].value.title())
joint_type_combobox.currentTextChanged.connect(lambda value, index=i: self.changeType(index, value))
joint_home_spinbox = QtWidgets.QDoubleSpinBox()
joint_home_spinbox.setDecimals(3)
joint_home_spinbox.setRange(-10000, 10000)
joint_home_spinbox.setValue(self.homes[i])
joint_home_spinbox.valueChanged.connect(lambda value, index=i: self.changeHome(index, value))
row_layout = QtWidgets.QHBoxLayout()
column_layout = QtWidgets.QVBoxLayout()
column_layout.addWidget(QtWidgets.QLabel(f'Name of Joint {i + 1}:\t'))
column_layout.addWidget(name_line_edit)
column_layout.addStretch(1)
row_layout.addLayout(column_layout, 4)
row_layout.addStretch(1)
sub_layout.addLayout(row_layout)
row_layout = QtWidgets.QHBoxLayout()
column_layout = QtWidgets.QVBoxLayout()
column_layout.addWidget(QtWidgets.QLabel(f'Type of Joint {i + 1}:\t'))
column_layout.addWidget(joint_type_combobox)
column_layout.addStretch(1)
row_layout.addLayout(column_layout, 2)
column_layout = QtWidgets.QVBoxLayout()
column_layout.addWidget(QtWidgets.QLabel(f'Home for Joint {i + 1}:\t'))
column_layout.addWidget(joint_home_spinbox)
column_layout.addStretch(1)
row_layout.addLayout(column_layout, 2)
row_layout.addStretch(1)
sub_layout.addLayout(row_layout)
divider = QtWidgets.QFrame()
divider.setFrameShape(QtWidgets.QFrame.HLine)
divider.setFrameShadow(QtWidgets.QFrame.Sunken)
sub_layout.addWidget(divider)
sub_layout.addStretch(1)
self.stack1.setLayout(layout)
def copyModel(self):
"""Copies json description of robot to the clipboard"""
QtWidgets.QApplication.clipboard().setText(json.dumps(self.json, indent=2))
def saveModel(self):
"""Saves json description of the robot to file"""
filename, _ = QtWidgets.QFileDialog.getSaveFileName(self, "Choose a file name", '.', "JSON File (*.json)")
if not filename:
return
with open(filename, 'w') as json_file:
json.dump(self.json, json_file, indent=2)
def createResultTable(self):
"""Creates widget to show calibration errors"""
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
sub_layout = QtWidgets.QHBoxLayout()
self.filter_combobox = QtWidgets.QComboBox()
self.filter_combobox.setView(QtWidgets.QListView())
self.filter_combobox.addItems(['All', *[f'{i + 1}' for i in range(len(self.points))]])
self.filter_combobox.currentIndexChanged.connect(self.displayResiduals)
self.copy_model_button = QtWidgets.QPushButton('Copy Model')
self.copy_model_button.clicked.connect(self.copyModel)
self.save_model_button = QtWidgets.QPushButton('Save Model')
self.save_model_button.clicked.connect(self.saveModel)
sub_layout.addWidget(QtWidgets.QLabel('Show Joint: '))
sub_layout.addWidget(self.filter_combobox)
sub_layout.addStretch(1)
sub_layout.addWidget(self.copy_model_button)
sub_layout.addWidget(self.save_model_button)
layout.addLayout(sub_layout)
layout.addSpacing(10)
self.result_label = QtWidgets.QLabel()
self.tabs = QtWidgets.QTabWidget()
self.tabs.setTabPosition(QtWidgets.QTabWidget.West)
self.model_error_table = QtWidgets.QTableWidget()
self.model_error_table.setColumnCount(4)
self.model_error_table.setHorizontalHeaderLabels(['X', 'Y', 'Z', 'Norm'])
self.model_error_table.setAlternatingRowColors(True)
self.model_error_table.setMinimumHeight(300)
self.model_error_table.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.model_error_table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.model_error_table.horizontalHeader().setMinimumSectionSize(40)
self.model_error_table.horizontalHeader().setDefaultSectionSize(40)
self.tabs.addTab(self.model_error_table, 'Model Error')
self.fit_error_table = QtWidgets.QTableWidget()
self.fit_error_table.setColumnCount(4)
self.fit_error_table.setHorizontalHeaderLabels(['X', 'Y', 'Z', 'Norm'])
self.fit_error_table.setAlternatingRowColors(True)
self.fit_error_table.setMinimumHeight(300)
self.fit_error_table.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.fit_error_table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.fit_error_table.horizontalHeader().setMinimumSectionSize(40)
self.fit_error_table.horizontalHeader().setDefaultSectionSize(40)
self.tabs.addTab(self.fit_error_table, 'Fitting Error')
self.tabs.currentChanged.connect(self.displayResiduals)
layout.addWidget(self.result_label)
layout.addWidget(self.tabs)
self.stack2.setLayout(layout)
def displayResiduals(self):
"""Populates table widgets with residual error"""
tol = 0.1
active_tab = self.tabs.currentIndex()
joint_to_show = self.filter_combobox.currentIndex() - 1
if active_tab == 0:
table = self.model_error_table
residuals = self.results.model_errors
else:
table = self.fit_error_table
residuals = self.results.fit_errors
residuals = np.vstack(residuals) if joint_to_show == -1 else residuals[joint_to_show]
norm = np.linalg.norm(residuals, axis=1)
result_text = '<p style="font-size:14px">The Average {} is ' \
'<span style="color:{};font-weight:500;">{:.3f}</span> mm</p>'
mean = np.mean(norm)
colour = 'Tomato' if mean > tol else 'SeaGreen'
self.result_label.setText(result_text.format(self.tabs.tabText(active_tab), colour, mean))
table.setRowCount(residuals.shape[0])
for row, vector in enumerate(residuals):
x = QtWidgets.QTableWidgetItem(f'{vector[0]:.3f}')
x.setTextAlignment(QtCore.Qt.AlignCenter)
y = QtWidgets.QTableWidgetItem(f'{vector[1]:.3f}')
y.setTextAlignment(QtCore.Qt.AlignCenter)
z = QtWidgets.QTableWidgetItem(f'{vector[2]:.3f}')
z.setTextAlignment(QtCore.Qt.AlignCenter)
n = QtWidgets.QTableWidgetItem(f'{norm[row]:.3f}')
n.setTextAlignment(QtCore.Qt.AlignCenter)
tomato = QtGui.QBrush(QtGui.QColor('Tomato'))
if abs(vector[0]) > tol:
x.setData(QtCore.Qt.BackgroundRole, tomato)
if abs(vector[1]) > tol:
y.setData(QtCore.Qt.BackgroundRole, tomato)
if abs(vector[2]) > tol:
z.setData(QtCore.Qt.BackgroundRole, tomato)
if norm[row] > tol:
n.setData(QtCore.Qt.BackgroundRole, tomato)
table.setItem(row, 0, x)
table.setItem(row, 1, y)
table.setItem(row, 2, z)
table.setItem(row, 3, n)
def validateForm(self):
"""Validates calibration inputs"""
error = []
size = len(self.names)
if not self.robot_name:
error.append('"Name of Positioner" cannot be blank')
if not self.order:
error.append(f'"Custom order" should contain comma separated indices for joints 1 to {size}.')
for index, name in enumerate(self.names):
if not name:
error.append(f'"Name of Joint {index + 1}" cannot be blank')
if len(set(self.names)) != len(self.names):
error.append('Joint names must be unique')
self.error_text.setText('\n'.join(error))
self.calibrate_button.setDisabled(len(error) != 0)
class FindWidget(QtWidgets.QDialog):
"""Creates a widget that searches the Instrument file text and highlights the next occurrence.
Can chose to match case, or require search to be the whole word
:param parent: main window instance
:type parent: MainWindow
"""
def __init__(self, parent):
super().__init__(parent)
self.setWindowTitle('Find')
self.search_box = QtWidgets.QLineEdit()
self.search_box.setPlaceholderText("Search..")
self.match_case = QtWidgets.QCheckBox()
self.match_case.setText("Match Case")
self.whole_word = QtWidgets.QCheckBox()
self.whole_word.setText("Match whole word")
self.find_button = QtWidgets.QPushButton("Find")
self.search_box.textChanged.connect(self.resetSearch)
self.match_case.stateChanged.connect(self.resetSearch)
self.whole_word.stateChanged.connect(self.resetSearch)
self.find_button.clicked.connect(self.search)
self.status_box = QtWidgets.QLabel()
layout = QtWidgets.QGridLayout()
layout.addWidget(self.search_box, 0, 0)
layout.addWidget(self.match_case, 1, 0)
layout.addWidget(self.whole_word, 2, 0)
layout.addWidget(self.find_button, 2, 1)
layout.addWidget(self.status_box, 3, 0)
self.setLayout(layout)
def search(self):
"""Performs a search for the input_text in the editor window"""
input_text = self.search_box.text()
case = self.match_case.isChecked()
whole_word = self.whole_word.isChecked()
if self.fist_search_flag:
findable = self.parent().editor.findFirst(input_text, False, case, whole_word, False, True, 0, 0)
self.fist_search_flag = False
else:
findable = self.parent().editor.findFirst(input_text, False, case, whole_word, False)
if not findable:
self.status_box.setText("No more entries found.")
else:
self.status_box.clear()
def resetSearch(self):
"""Resets the FindWidget window"""
self.fist_search_flag = True
self.status_box.setText("")
```
#### File: SScanSS-2/tests/test_calibration.py
```python
import json
import unittest
import numpy as np
from sscanss.core.instrument import circle_point_analysis, generate_description, Link
from sscanss.core.instrument.create import extract_positioner
from sscanss.core.instrument.calibration import correct_line, correct_circle_axis, robot_world_calibration
class TestCalibration(unittest.TestCase):
def testGeometryCorrection(self):
axis = np.array([0.0, 0.0, -1.0])
center = np.zeros(3)
offsets = np.array([180, 0, 90, 70])
points = np.array([[-1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, -1.0, 0.0]])
new_axis = correct_circle_axis(axis, center, points, offsets)
np.testing.assert_array_almost_equal(new_axis, [0, 0, 1], decimal=5)
np.testing.assert_array_almost_equal(correct_circle_axis(new_axis, center, points, offsets),
new_axis,
decimal=5)
axis = np.array([-1.0, 0.0, 0.0])
center = np.zeros(3)
offsets = np.array([100, 0, 50])
points = np.array([[50.0, 0.0, 0.0], [-50.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
new_center, new_axis = correct_line(axis, center, points, offsets)
np.testing.assert_array_almost_equal(new_axis, [1, 0, 0], decimal=5)
np.testing.assert_array_almost_equal(new_center, [-50, 0, 0], decimal=5)
new_center, new_axis = correct_line(new_axis, center, points, offsets)
np.testing.assert_array_almost_equal(new_axis, [1, 0, 0], decimal=5)
np.testing.assert_array_almost_equal(new_center, [-50, 0, 0], decimal=5)
def testCPA(self):
points = [
np.array([
[12.0, 0.0, 1.5],
[11.41421356, 1.41421356, 1.5],
[10.0, 2.0, 1.5],
[8.58578644, 1.41421356, 1.5],
[8.0, 0.0, 1.5],
]),
np.array([
[10.0, 0.0, 1.5],
[10.29289322, -0.70710678, 1.5],
[11.0, -1.0, 1.5],
[11.70710678, -0.70710678, 1.5],
[12.0, 0.0, 1.5],
]),
]
offsets = [np.array([0.0, 45.0, 90.0, 135.0, 180.0]), np.array([-180.0, -135.0, -90.0, -45.0, 0.0])]
types = [Link.Type.Revolute, Link.Type.Revolute]
homes = [0.0, 0.0]
result = circle_point_analysis(points, types, offsets, homes)
np.testing.assert_array_almost_equal(result.joint_axes[0], [0.0, 0.0, 1.0], decimal=5)
np.testing.assert_array_almost_equal(result.joint_axes[1], [0.0, 0.0, 1.0], decimal=5)
np.testing.assert_array_almost_equal(result.joint_origins[0], [0.0, 0.0, 0.0], decimal=5)
np.testing.assert_array_almost_equal(result.joint_origins[1], [1.0, 0.0, 0.0], decimal=5)
base, tool = np.identity(4), np.identity(4)
base[:3, 3] = [10.0, 0.0, 1.5]
tool[:3, 3] = [1.0, 0.0, 0.0]
np.testing.assert_array_almost_equal(result.base, base, decimal=5)
np.testing.assert_array_almost_equal(result.tool, tool, decimal=5)
np.testing.assert_array_almost_equal(np.vstack(result.fit_errors), np.zeros((10, 3)), decimal=5)
np.testing.assert_array_almost_equal(np.vstack(result.model_errors), np.zeros((10, 3)), decimal=5)
offsets = [
np.array([0.0, 100.0, 200.0, 300.0, 400, 500.0]),
np.array([-180.0, -108.0, -36.0, 36.0, 108.0, 180.0]),
np.array([-200.0, -120.0, -40.0, 40.0, 120, 200.0]),
np.array([-200.0, -120.0, -40.0, 40.0, 120, 200.0]),
]
points = [
np.array([
[0, 0, 0],
[0.004324125, 0.007919232, 100.0577353],
[0.00519, 0.009611275, 200.0346462],
[0.00936, 0.0229328, 299.9897745],
[0.016288942, -0.00449079, 399.9475168],
[-0.019166718, 0.01355, 499.934],
]),
np.array([
[-37.702407, -100.3246853, 0.060174943],
[-72.4308, -47.72367282, 0.02528772],
[-33.11670555, 1.571, -0.002],
[25.89742938, -20.58587784, -0.0142],
[23.05826241, -83.51255368, 0.032545921],
[-37.6133, -100.3567116, 0.048915878],
]),
np.array([
[-0.008, -199.7817, 0.173440527],
[-0.03388, -119.9040682, 0.132],
[-0.012866725, -40.03464456, 0.0608],
[0.02147, 40.09068065, -0.003563545],
[-0.001905469, 120.1877634, -0.077537662],
[0.0085, 200.1388357, -0.126678],
]),
np.array([
[-200.1472381, 0.04181174, 0.048689129],
[-120.0620691, 0.035838747, 0.044916269],
[-40.039, 0.029215491, 0.015246372],
[40.04469207, 0.020326262, -0.001128861],
[120.0471608, 0.030719316, -0.00639],
[200.0948445, 0.045893343, -0.055839322],
]),
]
types = [Link.Type.Prismatic, Link.Type.Revolute, Link.Type.Prismatic, Link.Type.Prismatic]
homes = [0.0, 0.0, 0.0, 0.0]
result = circle_point_analysis(points, types, offsets, homes)
np.testing.assert_array_almost_equal(result.joint_axes[0], [-1.59357e-05, 1.25323e-05, 1.0], decimal=5)
np.testing.assert_array_almost_equal(result.joint_axes[1], [-2.27729e-04, -6.01415e-04, -1.0], decimal=5)
np.testing.assert_array_almost_equal(result.joint_axes[2], [7.59974e-05, 1.0, -7.83437e-04], decimal=5)
np.testing.assert_array_almost_equal(result.joint_axes[3], [1.0, -1.37606e-06, -2.47331e-04], decimal=5)
np.testing.assert_array_almost_equal(result.joint_origins[0], [0.0, 0.0, 0.0], decimal=5)
np.testing.assert_array_almost_equal(result.joint_origins[1], [-18.87706, -50.12061, 0.0254302], decimal=5)
np.testing.assert_array_almost_equal(result.joint_origins[2], [-0.0111, 0.11102, 0.03246], decimal=5)
np.testing.assert_array_almost_equal(result.joint_origins[3], [-0.01692, 0.02885, 0.01364], decimal=5)
base = np.identity(4)
base[:3, 3] = [0.00665, 0.00512, -0.00605]
np.testing.assert_array_almost_equal(result.base, base, decimal=5)
np.testing.assert_array_almost_equal(result.tool, np.identity(4), decimal=5)
self.assertAlmostEqual(np.vstack(result.fit_errors).max(), 0.0203117, 5)
self.assertAlmostEqual(np.vstack(result.model_errors).max(), 0.18427, 5)
def testDescriptionGeneration(self):
robot_name = "Two Link"
joint_names = ["a", "b"]
types = [Link.Type.Revolute, Link.Type.Prismatic]
homes = [0, 50]
order = [1, 0]
base = np.array([[0, -1, 0, 10.0], [0, 0, -1, 0], [1, 0, 0, 1.5], [0, 0, 0, 1]])
tool = np.array([[0, 0, -1, 1.0], [-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
axes = [np.array([1.0, 0.0, 0.0]), np.array([0.0, 0.0, 1.0])]
origins = [np.array([0.0, 0.0, 0.0]), np.array([0.0, 0.0, 1.0])]
offsets = [np.array([-180, 0.0, 180]), np.array([100.0, 50.0, 0.0])]
lower_limits = [-np.pi, 0]
upper_limits = [np.pi, 100]
desc = generate_description(robot_name, base, tool, order, joint_names, types, axes, origins, homes, offsets)
robot = extract_positioner(desc)
self.assertEqual(robot.name, robot_name)
self.assertListEqual(robot.order, order)
np.testing.assert_array_almost_equal(robot.base, base, decimal=5)
np.testing.assert_array_almost_equal(robot.tool, tool, decimal=5)
for index, link in enumerate(robot.links):
self.assertEqual(link.name, joint_names[index])
self.assertEqual(link.type, types[index])
self.assertEqual(link.offset, homes[index])
self.assertEqual(link.lower_limit, lower_limits[index])
self.assertEqual(link.upper_limit, upper_limits[index])
np.testing.assert_array_almost_equal(link.joint_axis, axes[index], decimal=5)
np.testing.assert_array_almost_equal(link.home, origins[::-1][index], decimal=5)
self.assertNotEqual(json.dumps(desc), "")
def testRobotWorldCalibration(self):
base_to_end = [
np.array([
[-0.53170025, -0.20820506, 0.82094187, 67.585304],
[0.64494753, -0.72779989, 0.23313124, 89.949501],
[0.54894227, 0.65342039, 0.52125275, 11.512821],
[0.00000000, 0.00000000, 0.00000000, 1.0000000],
]),
np.array([
[-0.16817957, -0.93374872, 0.31595743, 161.25485],
[0.97287369, -0.10557473, 0.20584196, 50.856190],
[-0.15884751, 0.34200510, 0.92617506, -4.0582910],
[0.00000000, 0.00000000, 0.00000000, 1.0000000],
]),
np.array([
[-0.65820038, 0.25170165, 0.70951992, 24.240074],
[0.75066280, 0.14775321, 0.64395213, 7.0124917],
[0.057249967, 0.95645982, -0.28619426, -16.307690],
[0.00000000, 0.00000000, 0.00000000, 1.0000000],
]),
np.array([
[0.39836720, 0.42523378, -0.81269914, 97.336334],
[-0.090480983, -0.86350000, -0.49616647, 87.571709],
[-0.91275239, 0.27119026, -0.30551448, 0.27887172],
[0.00000000, 0.00000000, 0.00000000, 1.0000000],
]),
]
sensor_to_tool = [
np.array([
[-0.99998355, 0.0043395865, 0.0037561110, 2.8639844e-005],
[-0.0043548788, -0.99998224, -0.0040727942, 2.1180047e-005],
[0.0037383700, -0.0040890849, 0.99998468, -5.5399629e-005],
[0.00000000, 0.00000000, 0.00000000, 1.0000000],
]),
np.array([
[-0.71019822, 0.50131381, -0.49427021, 53.210167],
[-0.00058504642, -0.70250660, -0.71167701, 75.527397],
[-0.70400161, -0.50514257, 0.49921221, 53.496731],
[0.00000000, 0.00000000, 0.00000000, 1.0000000],
]),
np.array([
[-0.70914352, -0.49534339, -0.50174731, 53.955917],
[9.0223330e-005, -0.71169728, 0.70248622, -75.499924],
[-0.70506412, 0.49811831, 0.50474024, 52.796341],
[0.00000000, 0.00000000, 0.00000000, 1.0000000],
]),
np.array([
[0.99834156, 0.057526905, 0.0021761090, 0.17139678],
[-0.057260111, 0.99619621, -0.065683857, 6.5794134],
[-0.0059464201, 0.065450318, 0.99783814, 0.23322107],
[0.00000000, 0.00000000, 0.00000000, 1.0000000],
]),
]
tool, base = robot_world_calibration(base_to_end, sensor_to_tool)
expected_tool = [
[0.91168422, 0.2246568, -0.34403665, -8.85228243],
[0.27665157, 0.28345605, 0.9182138, -10.1062245],
[0.30380224, -0.9322993, 0.19627075, 5.39666712],
[0.0, 0.0, 0.0, 1.0],
]
expected_base = [
[0.28941257, -0.4618411, -0.83841705, 22.47052877],
[0.94446686, 0.2801974, 0.1716734, -100.95790596],
[0.15563645, -0.84154157, 0.51728627, 64.39097877],
[0.0, 0.0, 0.0, 1.0],
]
np.testing.assert_array_almost_equal(expected_tool, tool, decimal=5)
np.testing.assert_array_almost_equal(expected_base, base, decimal=5)
```
|
{
"source": "JFKenso/fitbit_insights",
"score": 2
}
|
#### File: main/lambda/FitBitIngestion.py
```python
from __future__ import print_function
import base64
import urllib2
import urllib
import sys
import json
import os
import boto3
from datetime import date, timedelta
from boto3.dynamodb.conditions import Key, Attr
from botocore.exceptions import ClientError
ddb = boto3.client('dynamodb')
ddbService = boto3.resource('dynamodb')
ml = boto3.client('machinelearning')
sns = boto3.client('sns')
#Use this URL to refresh the access token
TokenURL = "https://api.fitbit.com/oauth2/token"
#Some contants defining API error handling responses
TokenRefreshedOK = "Token refreshed OK"
ErrorInAPI = "Error when making API call that I couldn't handle"
#Get the config from the config file. This is the access and refresh tokens
def GetConfig():
tableName = "Fitbit_Authorization_Table"
table = ddbService.Table(tableName)
configuredUsers = table.scan(
FilterExpression=Attr('Status').eq("A")
)
return configuredUsers['Items']
def GetUserTokens(FitbitUserID):
tableName = "Fitbit_Authorization_Table"
table = ddbService.Table(tableName)
userData = table.scan(
FilterExpression=Attr('Status').eq("A") & Key('FitbitUserID').eq(FitbitUserID)
)
at = userData['Items'][0]['Access_Token']
rt = userData['Items'][0]['Refresh_Token']
return at, rt
def WriteConfig(AccToken,RefToken, userCfg):
table = "Fitbit_Authorization_Table"
newItem = {
"FitbitUserID": {"S": userCfg['FitbitUserID']},
"Access_Token": {"S": AccToken},
"Refresh_Token": {"S": RefToken},
"Mobile": {"S": userCfg['Mobile']},
"Token_Type": {"S": "na"},
"Status": {"S": "A"},
"ClientID": {"S": userCfg['ClientID']},
"ClientSecret": {"S": userCfg['ClientSecret']}
}
response = ddb.put_item(TableName = table, Item=newItem)
#Make a HTTP POST to get a new
def GetNewAccessToken(userCfg):
RefToken = userCfg['Refresh_Token']
OAuthTwoClientID = userCfg['ClientID']
ClientOrConsumerSecret = userCfg['ClientSecret']
#Form the data payload
BodyText = {'grant_type' : 'refresh_token', 'refresh_token' : RefToken}
#URL Encode it
BodyURLEncoded = urllib.urlencode(BodyText)
print("Using this as the body when getting access token >>" + BodyURLEncoded )
#Start the request
tokenreq = urllib2.Request(TokenURL,BodyURLEncoded)
#Add the headers, first we base64 encode the client id and client secret with a : inbetween and create the authorisation header
tokenreq.add_header('Authorization', 'Basic ' + base64.b64encode(OAuthTwoClientID + ":" + ClientOrConsumerSecret))
tokenreq.add_header('Content-Type', 'application/x-www-form-urlencoded')
#Fire off the request
try:
tokenresponse = urllib2.urlopen(tokenreq)
#See what we got back. If it's this part of the code it was OK
FullResponse = tokenresponse.read()
#Need to pick out the access token and write it to the config file. Use a JSON manipluation module
ResponseJSON = json.loads(FullResponse)
#Read the access token as a string
NewAccessToken = str(ResponseJSON['access_token'])
NewRefreshToken = str(ResponseJSON['refresh_token'])
#Write the access token to the ini file
WriteConfig(NewAccessToken,NewRefreshToken, userCfg)
print("New access token output >>> " + FullResponse)
except urllib2.URLError as e:
#Gettin to this part of the code means we got an error
print ("An error was raised when getting the access token. Need to stop here")
print (e.code)
print (e.read())
#This makes an API call. It also catches errors and tries to deal with them
def MakeAPICall(InURL, userCfg):
#Start the request
req = urllib2.Request(InURL)
#Add the access token in the header
req.add_header('Authorization', 'Bearer ' + userCfg['Access_Token'])
print("Calling URI: " + InURL)
print ("I used this access token " + userCfg['Access_Token'])
#Fire off the request
try:
#Do the request
response = urllib2.urlopen(req)
#Read the response
FullResponse = response.read()
#Return values
return True, FullResponse
#Catch errors, e.g. A 401 error that signifies the need for a new access token
except urllib2.URLError as e:
print ("Got this HTTP error: " + str(e.code))
HTTPErrorMessage = e.read()
print ("This was in the HTTP error message: " + HTTPErrorMessage)
#See what the error was
if (e.code == 401) and (HTTPErrorMessage.find("Access token expired") > 0):
print("Trying to refresh access token")
GetNewAccessToken(userCfg)
return False, TokenRefreshedOK
#Return that this didn't work, allowing the calling function to handle it
return False, ErrorInAPI
#Main part of the code
def lambda_handler(event, context):
today = date.today().strftime('%Y-%m-%d')
yesterday = date.today() - timedelta(1)
yesterday_date = yesterday.strftime('%Y-%m-%d')
tomorrow = date.today() + timedelta(1)
tomorrow_date = tomorrow.strftime('%Y-%m-%d')
#This is the Fitbit URL to use for the API call
FitbitProfileURL = "https://api.fitbit.com/1/user/-/profile.json"
FitbitHeartrateURL = "https://api.fitbit.com/1/user/-/activities/heart/date/"+today+"/1d/1min/time/00:00/23:59.json"
FitbitStepsURL = "https://api.fitbit.com/1/user/-/activities/steps/date/"+today+"/1d/15min.json"
FitbitSleepURL = "https://api.fitbit.com/1.2/user/-/sleep/date/"+tomorrow_date+".json"
FitbitActivityURL = "https://api.fitbit.com/1/user/-/activities/list.json?beforeDate=today&sort=desc&offset=0&limit=5"
#Get the config
userConfigs = GetConfig()
for userCfg in userConfigs:
try:
FitbitUserID = ""
Access_Token = ""
Refresh_Token = ""
#Make the Profile API call
APICallOK, ProfileData = MakeAPICall(FitbitProfileURL, userCfg)
table = "ProfileData"
if APICallOK:
parsed_profile = json.loads(ProfileData)
weight = str(parsed_profile['user']['weight'])
FitbitUserID = str(parsed_profile['user']['encodedId'])
age = str(parsed_profile['user']['age'])
averageDailySteps = str(parsed_profile['user']['averageDailySteps'])
height = str(parsed_profile['user']['height'])
localCity = str(parsed_profile['user']['timezone'])
userCfg['firstName'] = str(parsed_profile['user']['firstName'])
#print ("weight: " + weight)
#print ProfileData
#print weight
#print FitbitUserID
item = {
"FitbitUserID": {"S": FitbitUserID},
"RecordDate": {"S": yesterday_date},
"weight": {"N": weight},
"averageDailySteps": {"N": averageDailySteps},
"age": {"N": age},
"height": {"N": height},
"localCity": {"S": localCity}
}
response = ddb.put_item(TableName = table, Item = item);
else:
if (ProfileData == TokenRefreshedOK):
print ("Refreshed the access token. Can go again")
Access_Token, Refresh_Token = GetUserTokens(userCfg['FitbitUserID'])
if (FitbitUserID == ""):
FitbitUserID = userCfg['FitbitUserID']
userCfg['Access_Token'] = Access_Token
userCfg['Refresh_Token'] = Refresh_Token
else:
print( ErrorInAPI )
sns_message = "Running for " + FitbitUserID + " userCfg: " + userCfg['FitbitUserID']
sns.publish(TopicArn='arn:aws:sns:us-east-1:445802302022:JFeldmanSMSAlert', Message=sns_message)
#Make the Heartrate API call
APICallOK, HeartRateData = MakeAPICall(FitbitHeartrateURL, userCfg)
table = "HeartRateData"
if APICallOK:
parsed_hr = json.loads(HeartRateData)
intradayObject = parsed_hr['activities-heart-intraday']['dataset']
for struct in intradayObject:
recordDateTime = str(yesterday_date) + " " + str(struct['time'])
item = {
"FitbitUserID": {"S": FitbitUserID},
"RecordDate": {"S": recordDateTime},
"heartrate": {"S": str(struct['value'])}
}
response = ddb.put_item(TableName = table, Item = item);
#print("put response: " + str(response))
pass
#print (HeartRateData)
else:
if (HeartRateData == TokenRefreshedOK):
print ("Refreshed the access token. Can go again")
Access_Token, Refresh_Token = GetUserTokens(FitbitUserID)
userCfg['Access_Token'] = Access_Token
userCfg['Refresh_Token'] = Refresh_Token
else:
print( ErrorInAPI )
#Make the Setps API call
APICallOK, StepsData = MakeAPICall(FitbitStepsURL, userCfg)
table = "DailyStepsData"
if APICallOK:
# First record daily steps
parsed_steps = json.loads(StepsData)
steps = str(parsed_steps['activities-steps'][0]['value'])
print("steps: " + steps)
item = {
"FitbitUserID": {"S": FitbitUserID},
"RecordDate": {"S": str(yesterday_date)},
"steps": {"S": steps}
}
response = ddb.put_item(TableName = table, Item = item);
# Then iterate through in 15 minute increments
table = "GranularStepsData"
intradayObject = parsed_steps['activities-steps-intraday']['dataset']
for struct in intradayObject:
recordDateTime = str(yesterday_date) + " " + str(struct['time'])
item = {
"FitbitUserID": {"S": FitbitUserID},
"RecordDate": {"S": recordDateTime},
"steps": {"S": str(struct['value'])}
}
response = ddb.put_item(TableName = table, Item = item);
pass
else:
if (StepsData == TokenRefreshedOK):
print ("Refreshed the access token. Can go again")
Access_Token, Refresh_Token = GetUserTokens(FitbitUserID)
userCfg['Access_Token'] = Access_Token
userCfg['Refresh_Token'] = Refresh_Token
else:
print( ErrorInAPI )
#Make the Sleep API call
APICallOK, SleepData = MakeAPICall(FitbitSleepURL, userCfg)
table = "DailySleepData"
if APICallOK:
# First record daily steps
parsed_sleep = json.loads(SleepData)
totalDeep = parsed_sleep['summary']['stages']['deep']
totalLight = parsed_sleep['summary']['stages']['light']
totalRem = parsed_sleep['summary']['stages']['rem']
totalWake = parsed_sleep['summary']['stages']['wake']
totalMinsAsleep = parsed_sleep['summary']['totalMinutesAsleep']
totalTimeInBed = parsed_sleep['summary']['totalTimeInBed']
for sleepObj in parsed_sleep['sleep']:
dateOfSleep = str(sleepObj['dateOfSleep'])
sleepEfficiency = sleepObj['efficiency']
wakeup = str(sleepObj['endTime'])
isMainSleep = str(sleepObj['isMainSleep'])
if(sleepEfficiency < 50):
sns_message = userCfg['firstName'] + "'s slep last night was awful. Danger danger danger!!!"
sns.publish(TopicArn='arn:aws:sns:us-east-1:445802302022:JFeldmanSMSAlert',
Message=sns_message)
elif(sleepEfficiency < 80) and (sleepEfficiency >= 50):
sns_message = userCfg['firstName'] + "'s slep last night was pretty poor. Today's mood could be grumpy."
sns.publish(TopicArn='arn:aws:sns:us-east-1:445802302022:JFeldmanSMSAlert',
Message=sns_message)
else:
sns_message = userCfg['firstName'] + "'s slep last night was good. Today's mood should be happy. If there's something you've been meaning to tell your hubby, today is the day."
sns.publish(TopicArn='arn:aws:sns:us-east-1:445802302022:JFeldmanSMSAlert',
Message=sns_message)
item = {
"FitbitUserID": {"S": FitbitUserID},
"RecordDate": {"S": str(dateOfSleep)},
"totalDeep": {"S": str(totalDeep)},
"totalLight": {"S": str(totalLight)},
"totalRem": {"S": str(totalRem)},
"totalWake": {"S": str(totalWake)},
"totalMinsAsleep": {"S": str(totalMinsAsleep)},
"totalTimeInBed": {"S": str(totalTimeInBed)},
"sleepEfficiency": {"S": str(sleepEfficiency)},
"wakeup": {"S": str(wakeup)},
"isMainSleep": {"S": str(isMainSleep)}
}
print("Daily Sleep Data: " + str(item))
table = "DailySleepData"
response = ddb.put_item(TableName = table, Item = item);
table = "DetailedSleepData"
for sleepSegments in sleepObj['levels']['data']:
sleepSegmentTime = sleepSegments['dateTime']
sleepSegmentLevel = sleepSegments['level']
sleepSegmentSeconds = str(sleepSegments['seconds'])
item = {
"FitbitUserID": {"S": FitbitUserID},
"SleepSegmentTime": {"S": sleepSegmentTime},
"SleepSegmentLevel": {"S": sleepSegmentLevel},
"SleepSegmentSeconds": {"S": sleepSegmentSeconds}
}
response = ddb.put_item(TableName = table, Item = item);
pass
pass
else:
if (SleepData == TokenRefreshedOK):
print ("Refreshed the access token. Can go again")
Access_Token, Refresh_Token = GetUserTokens(FitbitUserID)
userCfg['Access_Token'] = Access_Token
userCfg['Refresh_Token'] = Refresh_Token
else:
print( ErrorInAPI )
except Exception as e:
if hasattr(e, 'message'):
print("Unexpected error: " + e.message)
sns_message = "Unexpected error: " + str(e.message)
else:
print("Unexpected error: " + e)
sns_message = "Unexpected error: " + str(e)
sns.publish(TopicArn='arn:aws:sns:us-east-1:445802302022:JFeldmanSMSAlert', Message=sns_message)
```
|
{
"source": "jfkinslow/flask-mailing",
"score": 2
}
|
#### File: flask-mailing/tests/test_message.py
```python
import pytest
from flask_mailing.schemas import Message, MultipartSubtypeEnum
from flask_mailing.msg import MailMsg
import os
CONTENT = "file test content"
def test_initialize():
message = Message(
subject="test subject",
recipients=["<EMAIL>"],
body="test",
subtype="plain"
)
assert message.subject == "test subject"
def test_recipients_properly_initialized():
message = Message(
subject="test subject",
recipients=[],
body="test",
subtype="plain"
)
assert message.recipients == []
def test_add_recipient_method():
message = Message(
subject="test subject",
recipients=[],
body="test",
subtype="plain"
)
message.add_recipient("<EMAIL>")
assert message.recipients == ["<EMAIL>"]
def test_sendto_properly_set():
msg = Message(subject="subject", recipients=["<EMAIL>", "<EMAIL>"],
cc=["<EMAIL>"], bcc=["<EMAIL>"], reply_to=["<EMAIL>"])
assert len(msg.recipients) == 2
assert len(msg.cc) == 1
assert len(msg.bcc) == 1
assert len(msg.reply_to) == 1
def test_plain_message():
message = Message(
subject="test subject",
recipients=["<EMAIL>"],
body="test",
subtype="plain"
)
assert message.body == "test"
def test_charset():
message = Message(
subject="test subject",
recipients=["<EMAIL>"],
body="test",
subtype="plain"
)
assert message.charset == "utf-8"
def test_message_str():
message = Message(
subject="test subject",
recipients=["<EMAIL>"],
body="test",
subtype="plain"
)
assert type(message.body) == str
def test_plain_message_with_attachments():
directory = os.getcwd()
attachement = directory + "/files/attachement.txt"
msg = Message(subject="testing",
recipients=["<EMAIL>"],
attachments=[attachement],
body="test mail body")
with open(attachement, "w") as file:
file.write(CONTENT)
assert len(msg.attachments) == 1
def test_plain_message_with_attach_method():
directory = os.getcwd()
attachement = directory + "/files/attachement_1.txt"
msg = Message(subject="testing",
recipients=["<EMAIL>"],
body="test mail body")
with open(attachement, "w") as file:
file.write(CONTENT)
with open(attachement, "rb") as fp:
msg.attach("attachement_1.txt", fp.read())
assert len(msg.attachments) == 1
def test_empty_subject_header():
message = Message(
subject="",
recipients=["<EMAIL>"],
body="test",
subtype="plain"
)
assert len(message.subject) == 0
def test_bcc():
msg = Message(subject="subject", recipients=[],
bcc=["<EMAIL>"])
assert len(msg.bcc) == 1
assert msg.bcc == ["<EMAIL>"]
def test_replyto():
msg = Message(subject="subject", recipients=[],
reply_to=["<EMAIL>"])
assert len(msg.reply_to) == 1
assert msg.reply_to == ["<EMAIL>"]
def test_cc():
msg = Message(subject="subject", recipients=[],
cc=["<EMAIL>"])
assert len(msg.cc) == 1
assert msg.cc == ["<EMAIL>"]
def test_multipart_subtype():
message = Message(
subject="test subject",
recipients=["<EMAIL>"],
body="test",
subtype="plain"
)
assert message.multipart_subtype == MultipartSubtypeEnum.mixed
@pytest.mark.asyncio
async def test_msgid_header():
message = Message(
subject="test subject",
recipients=["<EMAIL>"],
body="test",
subtype="plain"
)
msg = MailMsg(**message.dict())
msg_object = await msg._message('<EMAIL>')
assert msg_object['Message-ID'] is not None
@pytest.mark.asyncio
async def test_message_charset():
message = Message(
subject="test subject",
recipients=["<EMAIL>"],
body="test",
subtype="plain"
)
msg = MailMsg(**message.dict())
msg_object = await msg._message('<EMAIL>')
assert msg_object._charset is not None
assert msg_object._charset == "utf-8"
```
|
{
"source": "jfkinslow/screeps_notify",
"score": 3
}
|
#### File: services/messengers/http.py
```python
import services.config as config
import requests
class http:
def __init__(self, settings):
self.settings = settings
def sendMessage(self, notification, shard):
print('sending message from http')
data = {
'user': config.settings['screeps_username'],
'message': notification
}
headers = {'user-agent': 'screeps_notify'}
if 'api-key' in self.settings:
headers['x-api-key'] = self.settings['api-key']
if 'http_user' in self.settings:
r = requests.post(self.settings['url'],
json=data,
headers=headers,
auth=(self.settings['http_user'],
self.settings['http_password']))
else:
r = requests.post(self.settings['url'],
json=data,
headers=headers)
if r.status_code != requests.codes.ok:
raise ValueError(
'http request returned an error %s, the response is:\n%s'
% (r.status_code, r.text))
return r.status_code == requests.codes.ok
```
#### File: services/messengers/slack.py
```python
import json
import services.config as config
import requests
import re
class slack:
def __init__(self, settings):
self.settings = settings
def sendMessage(self, notification, shard):
print('sending message from slack')
def addLinks(matchobj):
roomname = matchobj.group(1).upper()
return "<https://screeps.com/a/#!/room/%s/%s|%s %s>" % (shard, roomname, shard.capitalize(), roomname)
message = re.sub(r'([E|W][\d]+[N|S][\d]+)',
addLinks,
notification,
flags=re.IGNORECASE)
slack_data = {'text': message}
if 'channel' in self.settings:
slack_data['channel'] = self.settings['channel']
if 'username' in self.settings:
slack_data['username'] = self.settings['username']
if 'icon_emoji' in self.settings:
slack_data['icon_emoji'] = self.settings['icon_emoji']
r = requests.post(self.settings['webhook_url'],
data=json.dumps(slack_data),
headers={
'Content-Type': 'application/json',
'user-agent': 'screeps_notify'
})
if r.status_code != requests.codes.ok:
raise ValueError(
'Request to slack returned an error %s, the response is:\n%s'
% (r.status_code, r.text))
return r.status_code == requests.codes.ok
```
|
{
"source": "jfklima/prog_pratica",
"score": 3
}
|
#### File: prog_pratica/exercicios_antigos/barateiro.py
```python
import pytest
def soma(a, b):
return a + b
def test_soma_com_valores_maiores_que_1():
assert soma(0, 1) == 1
produtos = {}
quantidade_de_produtos = int(input("Quantos produtos? "))
for _ in range(quantidade_de_produtos):
produto = input('Produto escolhido: ')
preco = float(input(f'Preço {produto}: '))
produtos[produto] = preco
menor_preco = min(produtos.values())
produto_menor_preco = ''
for produto, preco in produtos.items():
if preco == menor_preco:
produto_menor_preco = produto
print(f'Pruduto mais barato: {produto_menor_preco} , Preço: R$ {menor_preco}')
```
#### File: prog_pratica/exercicios_antigos/ex_01.py
```python
from math import inf
def minimo_e_maximo(sequencia_numerica):
''' Retorna o minimo e o maximo de uma sequência numérica aleatória.
Complexidade:
execução: O(n)
espaço: O(3)
'''
maximo = -inf # 1
minimo = +inf # 1
for elem in sequencia_numerica: # 1
if elem > maximo: # 2
maximo = elem # 1
if elem < minimo: # 2
minimo = elem # 2
return minimo, maximo # 1
def recursivo_minmax(sequencia_numerica):
def r_minimo(sequencia):
primeiro = sequencia[0]
if len(sequencia) == 1:
return primeiro
else:
menor = r_minimo(sequencia[1:])
return menor if menor < primeiro else primeiro
def r_maximo(sequencia):
primeiro = sequencia[0]
if len(sequencia) == 1:
return primeiro
else:
maior = r_maximo(sequencia[1:])
return maior if maior > primeiro else primeiro
return r_minimo(sequencia_numerica), r_maximo(sequencia_numerica)
def recursivo_minmax_1x(sequencia_numerica):
primeiro = sequencia_numerica[0]
if len(sequencia_numerica) == 1:
return primeiro, primeiro
else:
return
# print(minimo_e_maximo([1, 2, 3, 4]))
# print(minimo_e_maximo([1, 3, 10, 12, 44, 2, 24, 25]))
# print(minimo_e_maximo([88, 66, 10, 2, 8]))
print(recursivo_minmax([1, 2, 3, 4]))
```
|
{
"source": "jfklorenz/Python-RMedian",
"score": 3
}
|
#### File: Python-RMedian/src/rmedian.py
```python
import math
import random
import statistics
# ==================================================
# RMedian
def rmedian(X, k, d, cnt = [], rec = 0, n0 = 0):
if rec == 0:
n0 = len(X)
if cnt == []:
cnt = [0 for _ in range(len(X))]
S, XS, L, C, R = phase1(X, k, d)
S, XS, L, C, R, cnt = phase2(S, XS, L, C, R, cnt)
return phase3(X, k, d, L, C, R, cnt, rec, n0)
# ==================================================
# Phase 1
def phase1(X, k, d):
# Initiation
n = len(X)
random.shuffle(X)
S = X[:k]
XS = X[k:]
S.sort()
# Keeping the list entries below k/2
if 2*(k*math.log2(n))**0.5 < k/2:
lst = [2*(k*math.log2(n))**0.5]
if 3*(k*math.log2(n))**0.5 < k/2:
lst.append(3*(k*math.log2(n))**0.5)
while d*lst[len(lst) - 1] < k/2:
lst.append(d*lst[len(lst) - 1])
lst.append(k/2)
else:
lst = [k/2]
# Buckets
L = [[] for _ in range(len(lst) - 1)]
R = [[] for _ in range(len(lst) - 1)]
C = []
for s in S[math.floor(k / 2 - lst[0]): math.ceil(k / 2 + lst[0])]:
C.append(s)
for i in range(1, len(lst)):
for s in S[math.floor(k / 2 - lst[i]): math.floor(k / 2 - lst[i - 1])]:
L[i - 1].append(s)
for s in S[math.ceil(k / 2 + lst[i - 1]): math.ceil(k / 2 + lst[i])]:
R[i - 1].append(s)
return S, XS, L, C, R
# ==================================================
# Phase 2
def phase2(S, XS, L, C, R, cnt):
mark = [False for _ in range(2 ** 20)]
b = len(L)
random.shuffle(XS)
for x_i in XS:
med = 0
for j in reversed(range(0, b - 1)):
current = 2 ** 50
random.shuffle(L[j])
for l in L[j]:
if cnt[l] < current:
x_A = l
if mark[x_A] == True:
c = 1
else:
c = 2
cnt[x_i] += 1
cnt[x_A] += 1
if x_i < x_A:
if j + c < b:
mark[x_i] = True
L[j + c].append(x_i)
med = -1
else:
med = -2
break
current2 = 2 ** 50
random.shuffle(R[j])
for r in R[j]:
if cnt[r] < current2:
x_B = r
if mark[x_B] == True:
c = 1
else:
c = 2
cnt[x_i] += 1
cnt[x_B] += 1
if x_i > x_B:
if j + c < b:
mark[x_i] = True
R[j + c].append(x_i)
med = 1
else:
med = 2
break
if med == 0:
C.append(x_i)
elif med == -2:
L[len(L) - 1].append(x_i)
elif med == 2:
R[len(R) - 1].append(x_i)
return S, XS, L, C, R, cnt
# ==================================================
def phase3(X, k, d, L, C, R, cnt, rec, n0):
n = len(X)
sumL, sumR = 0, 0
for l in L:
sumL += len(l)
for r in R:
sumR += len(r)
s = sumL - sumR
# Det Median
if max(sumL, sumR) > n / 2:
res = 'DET'
return statistics.median(C), cnt, res, rec
# Expand
if s < 0:
rs = []
for r in R:
rs += r
random.shuffle(rs)
for i in range(-s):
C.append(rs[i])
for r in R:
if rs[i] in r:
r.remove(rs[i])
elif s > 0:
ls = []
for l in L:
ls += l
random.shuffle(ls)
for i in range(s):
C.append(ls[i])
for l in L:
if ls[i] in l:
l.remove(ls[i])
# AKS
if len(C) < math.log(n0) ** 4:
res = 'AKS'
return statistics.median(C), cnt, res, rec
rec += 1
return rmedian(C, k, d, cnt, rec, n0)
# ==================================================
X = [i for i in range(1025)]
print(rmedian(X, 16, 2))
```
#### File: Python-RMedian/tests/phase1_test.py
```python
import math
import random
import pytest
# ==================================================
# Phase 1
def phase1(X, k, d):
# Initiation
n = len(X)
random.shuffle(X)
S = X[:k]
XS = X[k:]
S.sort()
# Keeping the list entries below k/2
if 2*(k*math.log2(n))**0.5 < k/2:
lst = [2*(k*math.log2(n))**0.5]
if 3*(k*math.log2(n))**0.5 < k/2:
lst.append(3*(k*math.log2(n))**0.5)
while d*lst[len(lst) - 1] < k/2:
lst.append(d*lst[len(lst) - 1])
lst.append(k/2)
else:
lst = [k/2]
# Buckets
L = [[] for _ in range(len(lst) - 1)]
R = [[] for _ in range(len(lst) - 1)]
C = []
for s in S[math.floor(k / 2 - lst[0]): math.ceil(k / 2 + lst[0])]:
C.append(s)
for i in range(1, len(lst)):
for s in S[math.floor(k / 2 - lst[i]): math.floor(k / 2 - lst[i - 1])]:
L[i - 1].append(s)
for s in S[math.ceil(k / 2 + lst[i - 1]): math.ceil(k / 2 + lst[i])]:
R[i - 1].append(s)
return S, XS, L, C, R
# ==================================================
# Unittest : Parameter
@pytest.mark.parametrize(('n'), [
# Randomized input
random.randint(2**9, 2**15),
# Manuel input
2**10, 2**12, 2**14, 2**12 + 1, 2**12 - 1
])
# ==================================================
# Unittest : Test
def test_p1(n):
# Generating Tastcase
X0 = [i for i in range(n)]
k0 = int(n ** (2 / 3))
d0 = int(n ** (1 / 12))
S0, XS0, L0, C0, R0 = phase1(X0, k0, d0)
X1 = [i for i in range(n)]
k1 = int(n / math.log(n, 2)**(1/3))
d1 = int(math.log(n, 2)**(1/3))
S1, XS1, L1, C1, R1 = phase1(X1, k1, d1)
sumL0, sumR0, sumL1, sumR1 = 0, 0, 0, 0
for l0 in L0:
sumL0 += len(l0)
for l1 in L1:
sumL1 += len(l1)
for r0 in R0:
sumR0 += len(r0)
for r1 in R1:
sumR1 += len(r1)
# Test
assert sumL0 == sumR0 # ||L|| = ||R||
assert sumL1 == sumR1 # ||L|| = ||R||
assert len(L0) == len(R0) # |L| = |R|
assert len(L1) == len(R1) # |L| = |R|
assert sumL0 + len(C0) + sumR0 == k0 # |L| + |C| + |R| = k
assert sumL1 + len(C1) + sumR1 == k1 # |L| + |C| + |R| = k
return
# ==================================================
```
#### File: Python-RMedian/tests/phase2_test.py
```python
import pytest
import random
import math
# ==================================================
# Phase 1
def phase1(X, k, d):
# Initiation
n = len(X)
random.shuffle(X)
S = X[:k]
XS = X[k:]
S.sort()
# Keeping the list entries below k/2
if 2*(k*math.log2(n))**0.5 < k/2:
lst = [2*(k*math.log2(n))**0.5]
if 3*(k*math.log2(n))**0.5 < k/2:
lst.append(3*(k*math.log2(n))**0.5)
while d*lst[len(lst) - 1] < k/2:
lst.append(d*lst[len(lst) - 1])
lst.append(k/2)
else:
lst = [k/2]
# Buckets
L = [[] for _ in range(len(lst) - 1)]
R = [[] for _ in range(len(lst) - 1)]
C = []
for s in S[math.floor(k / 2 - lst[0]): math.ceil(k / 2 + lst[0])]:
C.append(s)
for i in range(1, len(lst)):
for s in S[math.floor(k / 2 - lst[i]): math.floor(k / 2 - lst[i - 1])]:
L[i - 1].append(s)
for s in S[math.ceil(k / 2 + lst[i - 1]): math.ceil(k / 2 + lst[i])]:
R[i - 1].append(s)
return S, XS, L, C, R
# ==================================================
# Phase 2
def phase2(S, XS, L, C, R, cnt):
mark = [False for _ in range(len(XS) + len(S))]
b = len(L)
random.shuffle(XS)
for x_i in XS:
med = 0
for j in reversed(range(0, b - 1)):
current = 2 ** 50
random.shuffle(L[j])
for l in L[j]:
if cnt[l] < current:
x_A = l
if mark[x_A] == True:
c = 1
else:
c = 2
cnt[x_i] += 1
cnt[x_A] += 1
if x_i < x_A:
if j + c < b:
mark[x_i] = True
L[j + c].append(x_i)
med = -1
else:
med = -2
break
current2 = 2 ** 50
random.shuffle(R[j])
for r in R[j]:
if cnt[r] < current2:
x_B = r
if mark[x_B] == True:
c = 1
else:
c = 2
cnt[x_i] += 1
cnt[x_B] += 1
if x_i > x_B:
if j + c < b:
mark[x_i] = True
R[j + c].append(x_i)
med = 1
else:
med = 2
break
if med == 0:
C.append(x_i)
elif med == -2:
L[len(L) - 1].append(x_i)
elif med == 2:
R[len(R) - 1].append(x_i)
return S, XS, L, C, R, cnt
# ==================================================
# Unittest : Parameter
@pytest.mark.parametrize(('n'), [
# Randomized input
random.randint(2**9, 2**15),
# Manuel input
2**10, 2**12, 2**14, 2**12 + 1, 2**12 - 1
])
# ==================================================
# Unittest : Test
def test_p1(n):
# Generating Tastcase
X0 = [i for i in range(n)]
cnt0 = [0 for _ in range(n)]
k0 = int(n ** (2 / 3))
d0 = int(n ** (1 / 12))
S0, XS0, L0, C0, R0, = phase1(X0, k0, d0)
S0, XS0, L0, C0, R0, cnt0 = phase2(S0, XS0, L0, C0, R0, cnt0)
X1 = [i for i in range(n)]
cnt1 = [0 for _ in range(n)]
k1 = int(n / math.log(n, 2)**(1/3))
d1 = int(math.log(n, 2)**(1/3))
S1, XS1, L1, C1, R1 = phase1(X1, k1, d1)
S1, XS1, L1, C1, R1, cnt1 = phase2(S1, XS1, L1, C1, R1, cnt1)
if n % 2 == 0:
assert int((n / 2) + 1) in C0
assert int((n / 2) + 1) in C1
assert cnt0[int((n / 2) + 1)] <= len(L0) + len(R0)
assert cnt1[int((n / 2) + 1)] <= len(L1) + len(R1)
elif n % 2 == 1:
assert int(n / 2) in C0
assert int(n / 2) in C1
assert cnt0[int(n / 2)] <= len(L0) + len(R0)
assert cnt1[int(n / 2)] <= len(L1) + len(R1)
# Test
return
# ==================================================
```
|
{
"source": "jfklorenz/Python-RMinimum",
"score": 4
}
|
#### File: Python-RMinimum/tests/phase3_test.py
```python
import pytest
import random
import math
# ==================================================
# Phase 3
def phase3(W, k, M, cnt):
"""
Phase 3 of the RMinimum algorithm. It takes the winner set from phase 1 and generates n/2k subsets each of size k.
Then it filters out all elements in each subset that is larger than the respective element from the
tournament winner set from phase 2, then merges all subsets.
:param W: Winner set from phase 1
:type W: List
:param k: Tuning parameter responsible for the size and amout of subsets
:type k: INT
:param M: Tournament winner set from phase 2
:type M: List
:param cnt: Saves the fragile complexity for each element
:type cnt: List
:return: Wnew, cnt
:param Wnew: Set of merged subsets with elements smaller than its respective filter element
:type Wnew: List
"""
# Generate subsets
random.shuffle(W)
W_i = [W[i * k:(i + 1) * k] for i in range((len(W) + k - 1) // k)]
W_i_filt = [0 for _ in range(len(W_i))]
# Filter subsets
for i in range(len(W_i_filt)):
W_i_filt[i] = [elem for elem in W_i[i] if elem < M[i]]
cnt[M[i]] += len(W_i[i])
for elem in W_i[i]:
cnt[elem] += 1
# Merge subsets
Wnew = [w for sublist in W_i_filt for w in sublist]
return W_i, W_i_filt, Wnew, cnt
# ==================================================
# Unittest : Parameter
@pytest.mark.parametrize(('n', 'k'), [
# Randomized input
(2 * random.randint(2**9, 2**15), random.randint(2, 2**10-1)), # n in [2^10, 2^16], k in [2, 2^10 - 1]
# Manuel input
(2**10 - 2, 2**5), (2**10 + 2, 2**5), # n extreme
(2**10, 1), (2**10, 2), (2**10, 2**9), (2**10, 2**10 - 1), # k extreme
(2**10 - 2, 3), (2**10 + 2, 2**10) # k & n extreme
])
# ==================================================
# Unittest : Test
def test_p3(n, k):
# Generating Testcase
# X = [0, ..., 3/4 * n - 1, 3/4 * n + n/k, ..., n + n/k]
# M = [3/4 * n, ..., 3/4 * n + n/k - 1]
W = [i for i in range(int(n + math.ceil(n / k)))]
M = [i for i in range(int(3 / 4 * n), int(3 / 4 * n + math.ceil(n / k)))]
for m in M:
if m in W:
W.remove(m)
cnt = [0 for _ in range(int(n + math.ceil(n / k)))]
W_split, W_split_filt, W_filt, cnt = phase3(W, k, M, cnt)
# Test
# The amount of buckets is correct
assert len(W_split) == math.ceil(n / k)
# Buckets have the correct size
assert max(len(w) for w in W_split) == k
# Filter test: no element in W[i] was larger than the min[i]
for i in range(len(W_split_filt)):
if W_split_filt[i] == []:
assert True
else:
assert max(W_split_filt[i]) < M[i]
# Each min element was compared with each element in W[i]
sum = 0
for i in range(len(M)):
sum += cnt[M[i]]
assert math.floor(n/k) * k <= sum <= math.ceil(n/k)*k
# Each element from W was compared once against its respective min element
for w in W:
assert cnt[w] == 1
return
# ==================================================
```
|
{
"source": "JFK/python-tornado-site-template",
"score": 2
}
|
#### File: www/mylib/loader.py
```python
from tornado.web import Application
from raven.contrib.tornado import AsyncSentryClient
import importlib
import yaml
import models
from tornado.options import options
import logging
def app(name, settings):
u"""起動するアプリケーションをロードする
"""
options.log_file_prefix = settings["logging_path"]
logging.getLogger().setLevel(settings['logging_level'])
models.load(settings['db'])
with open(settings['server_apps_conf_path'], 'r') as f:
app_conf = yaml.load(f)
server = app_conf['common'][name]
if settings['env'] in app_conf:
server.update(app_conf[settings['env']][name])
ui = {'ui_modules': {}, 'ui_methods': {}}
for ui_key in ui.keys():
for package in server.get(ui_key, {}).keys():
for key in server[ui_key][package].keys():
name = server[ui_key][package][key]
module= importlib.import_module("mylib.%s.%s" % \
(ui_key, package))
ui[ui_key].update({key: getattr(module, name)})
settings.update(ui)
routes = []
for package in server['handlers'].keys():
for uri in server['handlers'][package].keys():
name = server['handlers'][package][uri]
handler = importlib.import_module("handlers.%s" % package)
routes.append((r"%s" % uri, getattr(handler, name)))
application = Application(routes, **settings)
try:
application.sentry_client = AsyncSentryClient(settings['sentry'])
except:
pass
return dict(
app = application,
port = server['port'],
worker = server['worker_processes']
)
```
#### File: www/tests/test_user.py
```python
import unittest
import datetime
from mongoengine import connect
from mongoengine import Document
from mongoengine import StringField
from mongoengine import EmailField
from mongoengine import DateTimeField
from mongoengine import ValidationError
from mongoengine.connection import get_db, get_connection
from models.user import *
import os
class TestMongoEngine(unittest.TestCase):
def setUp(self):
# データベースに接続
addr = '127.0.0.1'
port = 27017
connect('test', host=addr, port=port)
self.conn = get_connection()
self.db = get_db()
def tearDown(self):
# コレクションの削除
User.drop_collection()
def test_create(self):
"""
create テスト
"""
user = User(email='<EMAIL>', first_name='hoge', last_name='foo')
user.save()
u_obj = User.objects.first()
u_obj.first_name = "change"
u_obj.save()
self.assertEqual(user.first_name, "hoge")
user.reload()
self.assertEqual(user.first_name, "change")
def test_validation(self):
"""
validation テスト
"""
user = User()
self.assertRaises(ValidationError, user.validate)
user.email = '<EMAIL>'
user.validate()
user.first_name = 10
self.assertRaises(ValidationError, user.validate)
def test_read(self):
user = User(email='<EMAIL>', first_name='hoge', last_name='foo')
user.save()
collection = self.db[User._get_collection_name()]
u_obj = collection.find_one({'email': '<EMAIL>'})
self.assertEqual(u_obj['email'], '<EMAIL>')
self.assertEqual(u_obj['first_name'], 'hoge')
self.assertEqual(u_obj['last_name'], 'foo')
self.assertEqual(u_obj['_id'], user.id)
u_err = User(email='root@localhost')
self.assertRaises(ValidationError, u_err.save)
try:
u_err.save(validate=False)
except ValidationError:
self.fail()
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "JFK/spider",
"score": 3
}
|
#### File: www/projects/sample.py
```python
import re
import logging
# from projects import queue as q
BASE_URL = 'http://snapdish.co/books/'
MAX_JOB_COUNT = 1
WAIT = 1
INTERVAL = 86400
def keyword(text):
logging.info(text)
def response(spider, soup, tag, **kwargs):
logging.info('sample response...')
urls = []
option = {}
for a in soup.find_all('a'):
href = a.get('href')
if href and re.match('/books/', href) and href != '/books/':
# you can enqueue here
# q(spider.redis, qname='normal').enqueue(keyword, a.text)
urls.append('http://snapdish.co%s' % href)
return (tag, urls, option)
```
|
{
"source": "jflaboe/PythonLambdaUtils",
"score": 3
}
|
#### File: src/plu/deploy.py
```python
import json
import os
import shutil
import time
import zipfile
import boto3
def run(args):
if len(args) < 3:
print("Usage:\n\npython -m plu deploy <package> [...<package2>]\npython -m plu deploy all")
packages = args[2:]
if "all" in packages:
deploy_all()
else:
for p in packages:
deploy_package(p)
def deploy_all():
for p in get_all_lambdas():
deploy_package(p)
def get_all_lambdas():
with open(".plu") as f:
return [v for v in json.loads(f.read()).values()]
def deploy_package(package_name):
print("Deploying Lambda for: {}".format(package_name))
print("Copying dependencies...")
copy_modules(package_name)
print("Copying main modules...")
copy_top_level_files(package_name)
print("Zipping up contents...")
zip_deployment_directory_contents(package_name)
print("Deploying to AWS...")
upload_lambda(package_name)
print("Deployment complete. Cleaning up...")
remove_zip_file(package_name)
remove_deployment_directory(package_name)
def remove_deployment_directory(package_name):
shutil.rmtree(get_deployment_directory_path(package_name))
def get_deployment_directory_path(package_name):
return "./{}/tmp-deploy".format(package_name)
def zip_deployment_directory_contents(package_name):
with zipfile.ZipFile(get_zip_file_path(package_name), 'w', zipfile.ZIP_DEFLATED) as zipf:
zipdir(get_deployment_directory_path(package_name), zipf)
def remove_zip_file(package_name):
os.remove(get_zip_file_path(package_name))
def get_zip_file_path(package_name):
return "./{}/tmp-deploy-zip.zip".format(package_name)
def copy_modules(package_name):
shutil.copytree("./{}/env/Lib/site-packages".format(package_name), get_deployment_directory_path(package_name))
def copy_top_level_files(package_name):
for filename in os.listdir("./{}".format(package_name)):
if len(filename) > 3 and filename[-3:] == ".py":
with open("./{}/{}".format(package_name, filename)) as f:
data = f.read().split("\n")
for i in range(len(data)):
if data[i].startswith("from ."):
data[i] = data[i][7:]
with open("{}/{}".format(get_deployment_directory_path(package_name), filename), "w") as g:
g.write("\n".join(data))
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file),
os.path.relpath(os.path.join(root, file),
path))
def upload_lambda(package_name):
client = boto3.client("lambda", region_name=get_aws_region())
pname = get_project_name()
with open(get_zip_file_path(package_name), "rb") as z:
try:
#check if function exists
client.get_function(FunctionName=pname + "_" + package_name)
response = client.update_function_code(
FunctionName=pname + "_" + package_name,
ZipFile=z.read()
)
except:
arn = input("Role ARN for this Lambda function?: ")
client.create_function(
FunctionName=pname + "_" + package_name,
Runtime="python3.9",
Handler="lambda_function.lambda_handler",
Role=arn,
Code={
"ZipFile": z.read()
}
)
env = get_environment_variables()
if not env is None:
print("Deployment started, waiting for completion...")
time.sleep(15)
client.update_function_configuration(
FunctionName=pname + "_" + package_name,
Environment={
"Variables": env
}
)
def get_environment_variables():
if os.path.isfile(".plu.env") is True:
with open(".plu.env") as f:
return json.loads(f.read())
return None
def get_aws_region():
if os.path.isfile(".plu.conf") is True:
with open(".plu.conf") as f:
data = json.loads(f.read())
if "region" in data:
return data['region']
else:
region = input("AWS Region for lambda upload?: ")
data["region"] = region
with open(".plu.conf", "w") as f:
f.write(json.dumps(data))
else:
region = input("AWS Region for lambda upload?: ")
if region in ["us-west-2", "us-west-1", "us-east-1", "us-east-2"]:
with open(".plu.conf", "w") as f:
f.write(json.dumps({"region": region}))
return region
return "us-west-2"
def get_project_name():
if os.path.isfile(".plu.conf") is True:
with open(".plu.conf") as f:
data = json.loads(f.read())
if "project" in data:
return data['project']
else:
pname = input("Project name?: ")
data["project"] = pname
with open(".plu.conf", "w") as f:
f.write(json.dumps(data))
else:
pname = input("Project name?: ")
with open(".plu.conf", "w") as f:
f.write(json.dumps({"project": pname}))
return pname
return "us-west-2"
```
|
{
"source": "jflad17/pilot_logbook",
"score": 3
}
|
#### File: routes/user/login.py
```python
from datetime import timedelta
from fastapi import APIRouter, Body, Depends, HTTPException
from sqlalchemy import update, insert
from fastapi.security import OAuth2PasswordRequestForm
from core.security import ACCESS_TOKEN_EXPIRE_MINUTES, authenticate, create_access_token, get_user, get_password_hash, validate_password, verify_password
from dependencies import get_db
import models
import schemas
router = APIRouter(
tags=["login"],
)
@router.post('/token', response_model=schemas.Token)
async def login(rememberMe: bool | None = Body(None), changed_password: str | None = Body(None), form_data: OAuth2PasswordRequestForm = Depends(), db=Depends(get_db)):
username = form_data.username
password = <PASSWORD>
user = get_user(db, username=username)
expiration = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
if rememberMe == True:
expiration = timedelta(days=30)
if user:
if user.resetPassword:
if changed_password:
validate_password(changed_password)
password_hash = get_password_hash(changed_password)
db.execute(
update(models.User)
.where(models.User.idUser == user.idUser)
.values(password=<PASSWORD>, resetPassword=0)
)
db.commit()
else:
raise HTTPException(status_code=403, detail="Password must be reset.")
else:
if user.loginAttempts >= user.maxLoginAttempts:
raise HTTPException(status_code=402, detail="Account locked, too many login attempts.")
if not verify_password(password, user.password):
user.loginAttempts += 1
db.commit()
if user.loginAttempts >= user.maxLoginAttempts:
raise HTTPException(status_code=402, detail="Account locked, too many login attempts.")
if authenticate(db, username, password):
access_token = create_access_token(data={"sub": username}, expires_delta=expiration)
print(user)
return {"user": user, "access_token": access_token, "token_type": "bearer"}
else:
raise HTTPException(status_code=401, detail="Incorrect password")
else:
raise HTTPException(status_code=400, detail="User doesn't exist!")
```
#### File: backend/core/config_example.py
```python
import os
from pydantic import AnyHttpUrl, BaseSettings, validator
class Settings(BaseSettings):
DEV = True
SERVER = False
BACKEND_CORS_ORIGINS: list[AnyHttpUrl] = ["http://127.0.0.1:9000", "http://127.0.0.1:3000"]
@validator("BACKEND_CORS_ORIGINS", pre=True)
def assemble_cors_origins(cls, v: str | list[str]) -> list[str] | str:
if isinstance(v, str) and not v.startswith("["):
return [i.strip() for i in v.split(",")]
elif isinstance(v, (list, str)):
return v
raise ValueError(v)
PROJECT_NAME: str = "LogbookAPI"
PROJECT_ROOT = os.path.dirname(os.path.dirname(__file__))
MYSQL_SERVER: str = ""
MYSQL_USER: str = ""
MYSQL_PASSWORD: str = ""
MYSQL_DB: str = ""
SQLALCHEMY_POOLSIZE: int = 2
SQLALCHEMY_DATABASE_URI: str = (
f"mysql+mysqldb://{MYSQL_USER}:{MYSQL_PASSWORD}@{MYSQL_SERVER}/{MYSQL_DB}"
)
class Config:
case_sensitive = True
settings = Settings()
```
|
{
"source": "JFlaherty347/Pokemon-Red-AI",
"score": 3
}
|
#### File: JFlaherty347/Pokemon-Red-AI/pkmnDiscretizer.py
```python
class pokemonRedDiscretizer(gym.ActionWrapper):
"""
Wrap a gym-retro environment and make it use discrete
actions for the Space Invaders game.
"""
def __init__(self, env):
super(pokemonRedDiscretizer, self).__init__(env)
buttons = ["A", "LEFT", "RIGHT"]
actions = [["BUTTON"], ['LEFT'], ['RIGHT'], ["BUTTON","LEFT"], ["BUTTON", "RIGHT"]]
self._actions = []
"""
What we do in this loop:
For each action in actions
- Create an array of 3 False (3 = nb of buttons)
For each button in action: (for instance ['LEFT']) we need to make that left button index = True
- Then the button index = LEFT = True
In fact at the end we will have an array where each array is an action and each elements True of this array
are the buttons clicked.
"""
for action in actions:
arr = np.array([False] * 3)
for button in action:
arr[buttons.index(button)] = True
self._actions.append(arr)
self.action_space = gym.spaces.Discrete(len(self._actions))
def action(self, a): # pylint: disable=W0221
return self._actions[a].copy()
```
#### File: JFlaherty347/Pokemon-Red-AI/sbTest.py
```python
import retro
import gym
import os
import time
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.deepq.policies import MlpPolicy
from stable_baselines import DQN
from stable_baselines.common.env_checker import check_env
from stable_baselines.common.cmd_util import make_vec_env
# party size = d163
# money = d347
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
def main():
retro.data.Integrations.add_custom_path(
os.path.join(SCRIPT_DIR, "custom_integrations")
)
print("PokemonRed-GameBoy" in retro.data.list_games(inttype=retro.data.Integrations.ALL))
env = retro.make("PokemonRed-GameBoy", inttype=retro.data.Integrations.ALL)
print(env)
print(env.action_space)
time.sleep(3)
env = make_vec_env(lambda: env, n_envs=1)
# check_env(env, warn=True)
time.sleep(3)
model = DQN(MlpPolicy, env, verbose=1)
model.learn(total_timesteps=25000)
obs = env.reset()
while True:
action, _states = model.predict(obs)
obs, rewards, dones, info = env.step(action)
env.render()
env.close()
if __name__ == "__main__":
main()
```
|
{
"source": "jflairie/sheetwork",
"score": 2
}
|
#### File: core/config/project.py
```python
import time
from pathlib import Path
from typing import Dict, Union
from sheetwork.core.exceptions import ProjectFileParserError
from sheetwork.core.flags import FlagParser
from sheetwork.core.logger import GLOBAL_LOGGER as logger
from sheetwork.core.ui.printer import yellow
from sheetwork.core.utils import PathFinder, deprecate
from sheetwork.core.yaml.yaml_helpers import open_yaml, validate_yaml
from sheetwork.core.yaml.yaml_schema import project_schema
class Project:
"""Sets up everything there is to know about the project config.
Will give precedence to CLI Args --see override functions.
"""
PROJECT_FILENAME = "sheetwork_project.yml"
# this is some garbage to make sure we don't sleep when we test the deprecation handling
# ! DEPRECATION "always_create"
IS_TEST = False
def __init__(self, flags: FlagParser) -> None:
"""Constructs project object.
Args:
flags (FlagParser): Inited flags object.
"""
self.project_dict: Dict[str, Union[str, bool]] = dict()
self.target_schema: str = str()
self.object_creation_dct: Dict[str, bool] = dict()
self.destructive_create_table: bool = False
self.flags = flags
# directories (first overwritten by flags, then by project) This may not always be able to
# be like this we might wanna give prio to CLI but for now this removes some complication.
self.project_file_fullpath: Path = Path("dumpy_path")
self.profile_dir: Path = Path("~/.sheetwork/").expanduser()
self.sheet_config_dir: Path = Path.cwd()
# override defaults
self.override_paths_from_flags()
self.load_project_from_yaml()
self.decide_object_creation()
self.override_object_creation_from_flags()
logger.debug(f"Project name: {self.project_name}")
def load_project_from_yaml(self):
if self.project_file_fullpath == Path("dumpy_path"):
_, self.project_file_fullpath = PathFinder().find_nearest_dir_and_file(
type(self).PROJECT_FILENAME
)
project_yaml = open_yaml(self.project_file_fullpath)
is_valid_yaml = validate_yaml(project_yaml, project_schema)
if project_yaml and is_valid_yaml:
self.project_dict = project_yaml
self.project_name = project_yaml.get("name")
self.target_schema = project_yaml.get("target_schema", self.target_schema)
if project_yaml.get("paths"):
self.profile_dir = (
Path(project_yaml["paths"].get("profile_dir", self.profile_dir))
.expanduser()
.resolve()
)
self.sheet_config_dir = (
Path(project_yaml["paths"].get("sheet_config_dir", self.sheet_config_dir))
.expanduser()
.resolve()
)
else:
raise ProjectFileParserError(
f"Error trying to load project config from {self.project_file_fullpath}. "
"Check it exists or that it is valid."
)
# ! DEPRECATION "always_create"
def handle_deprecations(self, colour: str = "red") -> None:
if self.project_dict.get("always_create"):
msg = (
"'always_create' will be deprecated in a future major release "
"'always_create' now means 'always_create_table'. "
"Prefer using 'always_create_table' instead or 'always_create_all_objects' if you "
"want to make sheetwork create all objects (database, schemas and tables)."
)
deprecate(message=msg, colour=colour)
if type(self).IS_TEST is False:
time.sleep(4)
def decide_object_creation(self) -> None:
self.handle_deprecations()
create_everything_label = "always_create_objects"
object_creation_mapping = {
# ! DEPRECATE "always_create"
"create_table": ["always_create_table", "always_create"],
"create_schema": ["always_create_schema"],
}
for object_type, rule in object_creation_mapping.items():
if self.project_dict.get(create_everything_label):
create = [True]
else:
create = [True for x in rule if self.project_dict.get(x) is True]
self.object_creation_dct.update({object_type: True in create})
self.destructive_create_table = (
True
if self.project_dict.get("destructive_create_table", self.destructive_create_table)
is True
else False
)
logger.debug(yellow(f"Object creation dict:\n {self.object_creation_dct}"))
logger.debug(yellow(str(self.project_dict)))
def override_paths_from_flags(self):
if self.flags.project_dir:
self.project_file_fullpath = Path(self.flags.project_dir, type(self).PROJECT_FILENAME)
if self.flags.profile_dir:
self.profile_dir = Path(self.flags.profile_dir)
if self.flags.sheet_config_dir:
self.sheet_config_dir = Path(self.flags.sheet_config_dir)
def override_object_creation_from_flags(self) -> None:
if self.flags.create_table:
logger.debug(yellow("going to create table"))
self.object_creation_dct.update({"create_table": True})
if self.flags.create_schema:
logger.debug(yellow("going to create schema"))
self.object_creation_dct.update({"create_schema": True})
logger.debug(yellow(f"Object creation dict after override\n {self.object_creation_dct}"))
if self.flags.destructive_create_table:
logger.debug(yellow("going to perform destuctive table creation"))
self.destructive_create_table = True
```
#### File: sheetwork/core/sheetwork.py
```python
import sys
from typing import List, Optional, Tuple, Union
import pandas
from gspread.exceptions import APIError
from retrying import retry
from sheetwork.core.adapters.base.connection import BaseConnection, BaseCredentials
from sheetwork.core.adapters.base.impl import BaseSQLAdapter
from sheetwork.core.adapters.factory import AdapterContainer
from sheetwork.core.cleaner import SheetCleaner
from sheetwork.core.clients.google import GoogleSpreadsheet
from sheetwork.core.config.config import ConfigLoader
from sheetwork.core.config.profile import Profile
from sheetwork.core.flags import FlagParser
from sheetwork.core.logger import GLOBAL_LOGGER as logger
from sheetwork.core.ui.printer import red, timed_message, yellow
from sheetwork.core.utils import check_columns_in_df
class SheetBag:
"""Main object orchestrates sheet loading, cleaning, and db pushing.
Raises:
ColumnNotFoundInDataFrame: If a column on which a rename or casting is asked for cannot be
found in the DataFrame resulting from the obtained sheet.
Returns:
SheetBag: Loaded, and possibly cleaned sheet object with db interaction methods.
"""
def __init__(self, config: ConfigLoader, flags: FlagParser, profile: Profile):
"""Constructor of SheetBag class.
Args:
config (ConfigLoader): initialised Sheetwork config class containing required params to
orchestrate SheetBag successfully.
flags (FlagParser): class containing defaults or parsed CLI arguments
profile (Profile): class containing info such as credentials db type etc required for
SheetBag to know what to do.
"""
self.sheet_df: pandas.DataFrame = pandas.DataFrame()
self.flags = flags
self.config = config
self.target_schema = config.target_schema
self.target_table = config.target_table
self.profile = profile
self.push_anyway = False
self.sheet_key: str = str(config.sheet_config.get("sheet_key", str()))
self.credentials_adapter: Optional[BaseCredentials] = None
self.connection_adapter: Optional[BaseConnection] = None
self.sql_adapter: Optional[BaseSQLAdapter] = None
self.init_adapters()
def init_adapters(self) -> None:
adapter_container = self._get_adapter_modules()
self.credentials_adapter = adapter_container.credentials_adapter( # type:ignore
self.profile
)
self.connection_adapter = adapter_container.connection_adapter( # type:ignore
self.credentials_adapter
)
self.sql_adapter = adapter_container.sql_adapter( # type:ignore
self.connection_adapter, self.config
)
def _get_adapter_modules(self) -> AdapterContainer:
adapters = AdapterContainer()
adapters.register_adapter(self.profile)
adapters.load_plugins()
return adapters
@retry(stop_max_attempt_number=3, wait_exponential_multiplier=1000, wait_exponential_max=10000)
def _obtain_googlesheet(self) -> pandas.DataFrame:
df = pandas.DataFrame()
try:
worksheet = str(self.config.sheet_config.get("worksheet", str()))
google_sheet = GoogleSpreadsheet(self.profile, self.sheet_key)
google_sheet.authenticate()
google_sheet.open_workbook()
df = google_sheet.make_df_from_worksheet(worksheet_name=worksheet)
except APIError as e:
error = str(e)
if any(x in error for x in ["RESOURCE_EXHAUSTED", "UNAVAILABLE", "INTERNAL"]) and any(
x in error for x in ["100", "500", "503"]
):
raise
return df
def load_sheet(self):
"""Loads a google sheet, and calls clean up steps if applicable.
Sheet must have been shared with account admin email address used in storage.
Raises:
TypeError: When loader does not return results that can be converted into a pandas
DataFrame a type error will be raised.
"""
if self.flags.sheet_name:
logger.info(timed_message(f"Importing: {self.flags.sheet_name}"))
logger.debug(f"Importing data from: {self.config.sheet_config['sheet_key']}")
else:
logger.info(
timed_message(f"Importing data from: {self.config.sheet_config.get('sheet_key')}")
)
df = self._obtain_googlesheet()
if not isinstance(df, pandas.DataFrame):
raise TypeError("import_sheet did not return a pandas DataFrame")
logger.debug(f"Columns imported from sheet: {df.columns.tolist()}")
# Perform exclusions, renamings and cleanups before releasing the sheet.
df = self.exclude_columns(df)
df = self.rename_columns(df)
self.push_anyway, df = self.run_cleanup(df)
logger.debug(f"Columns after cleanups and exclusions: {df.columns}")
logger.debug(f"Loaded SHEET HEAD: {df}")
self.sheet_df = df
def rename_columns(self, df: pandas.DataFrame):
if self.config.sheet_column_rename_dict:
_, _ = check_columns_in_df(df, list(self.config.sheet_column_rename_dict.keys()))
df = df.rename(columns=self.config.sheet_column_rename_dict) # type: ignore
return df
def exclude_columns(self, df: pandas.DataFrame) -> pandas.DataFrame:
"""Drops columns referred to by their identifier.
The identifier is the exact string in the google sheet when
a list is provided in the "excluded_columns" field of a sheet yml file.
Args:
df (pandas.DataFrame): DataFrame downloaded from google sheet.
Returns:
pandas.DataFrame: Either the same dataframe as originally provided or one with dropped
columns as required.
"""
cols_to_exclude: Union[str, List[str]] = self.config.sheet_config.get( # type: ignore
"excluded_columns", list(str())
)
if cols_to_exclude:
_, filtered_columns = check_columns_in_df(df, cols_to_exclude, warn_only=True)
if filtered_columns:
df = df.drop(filtered_columns, axis=1)
return df
return df
@staticmethod
def _collect_and_check_answer(post_cleanup: bool = False):
acceptable_answers = ["y", "n", "a"]
user_input = str()
while user_input not in acceptable_answers:
if user_input is not None:
logger.info("Choose 'y':yes, 'n':no, 'a':abort'")
if post_cleanup:
user_input = input("Would you like to push to db? (y/n):")
else:
user_input = input("Would you like to perform cleanup? (y/n/a): ")
if user_input.lower() == "y":
return True
if user_input.lower() == "n":
return False
if user_input.lower() == "a":
logger.info(red("User aborted."))
sys.exit(1)
@staticmethod
def _show_dry_run_preview(sheet_df: pandas.DataFrame):
print("\nDataFrame DataTypes: \n\n" + str(sheet_df.dtypes))
print("\nDataFrame Header: \n\n" + str(sheet_df.head(10)))
def run_cleanup(self, df: pandas.DataFrame) -> Tuple[bool, pandas.DataFrame]:
clean_up = True
# check for interactive mode
if self.flags.interactive:
logger.info(
yellow(
"PRE-CLEANING PREVIEW: The DataFrame you would push to the database would look like this:"
)
)
self._show_dry_run_preview(df)
clean_up = self._collect_and_check_answer()
if clean_up is True:
logger.debug("Performing clean ups")
clean_df = SheetCleaner(
df, bool(self.config.sheet_config.get("snake_case_camel", False))
).cleanup()
if self.flags.dry_run or self.flags.interactive:
logger.info(yellow("\nPOST-CLEANING PREVIEW:"))
self._show_dry_run_preview(clean_df)
carry_on = self._collect_and_check_answer(post_cleanup=True)
if not carry_on:
logger.info(timed_message(red("User Aborted.")))
sys.exit(1)
return True, clean_df
return True, df
def push_sheet(self):
logger.info(timed_message("Pushing sheet to database..."))
logger.debug(f"Column override dict is a {type(self.config.sheet_columns)}")
logger.debug(f"Sheet columns: {self.config.sheet_columns}")
logger.debug(f"Columns in final df: {self.sheet_df.columns.tolist()}")
self.sql_adapter.upload(self.sheet_df, self.target_schema)
def check_table(self):
self.sql_adapter.check_table(self.target_schema, self.target_table)
def run(self):
self.load_sheet()
if self.push_anyway:
self.push_sheet()
self.check_table()
else:
logger.info(yellow("Nothing pushed since you were in --dry_run mode."))
```
#### File: sheetwork/tests/cleanups_test.py
```python
import pathlib
from pathlib import Path
import numpy as np
import pandas
from .mockers import CASING_DF, DIRTY_DF, SNAKE_CASED_COLS, generate_test_df
TESTING_PATH = pathlib.Path(__file__).parent.absolute()
def test_cleanup():
from sheetwork.core.cleaner import SheetCleaner
clean_df_path = Path(TESTING_PATH, "clean_df.json")
dirty_df = generate_test_df(DIRTY_DF)
clean_df = SheetCleaner(dirty_df).cleanup()
expected_df = pandas.read_json(
clean_df_path,
dtype={"col_with_empty_string": "object"},
)
expected_df = expected_df.fillna(np.nan)
assert clean_df.equals(expected_df)
def test_snake_to_camel():
from sheetwork.core.cleaner import SheetCleaner
cased_df = generate_test_df(CASING_DF)
recased_df = SheetCleaner(cased_df, True).cleanup()
assert recased_df.columns.tolist() == SNAKE_CASED_COLS
```
#### File: sheetwork/tests/profile_test.py
```python
from pathlib import Path
import pytest
from .mockers import EXPECTED_DEV_TEST_PROFILE
FIXTURE_DIR = Path(__file__).resolve().parent
@pytest.mark.datafiles(FIXTURE_DIR)
def test_read_profile(datafiles):
from sheetwork.core.config.profile import Profile
from sheetwork.core.config.project import Project
from sheetwork.core.flags import FlagParser
from sheetwork.core.main import parser
flags = FlagParser(parser, project_dir=str(datafiles), profile_dir=str(datafiles))
project = Project(flags)
profile = Profile(project, "dev")
profile.read_profile()
assert profile.profile_dict == EXPECTED_DEV_TEST_PROFILE
```
|
{
"source": "jflan91/Recipe-API",
"score": 2
}
|
#### File: recipe/tests/test_app.py
```python
from recipe.apps import RecipeConfig
from django.test import TestCase
class RecipeAppTest(TestCase):
def test_app(self):
test_recipe = RecipeConfig.name
self.assertEqual(test_recipe, 'recipe')
```
|
{
"source": "JFLandrigan/eagles",
"score": 3
}
|
#### File: Supervised/utils/metric_utils.py
```python
import numpy as np
import pandas as pd
from sklearn.metrics import (
accuracy_score,
f1_score,
precision_score,
recall_score,
roc_auc_score,
precision_recall_curve,
auc,
mean_squared_error,
mean_absolute_error,
r2_score,
)
from math import sqrt
def root_mean_squared_error(y_true, preds):
return sqrt(mean_squared_error(y_true, preds))
def mean_absolute_percentage_error(y_true, y_pred):
df = pd.DataFrame({"y_true": y_true, "y_pred": y_pred})
df = df[df["y_true"] != 0].copy(deep=True)
y_true = df["y_true"]
y_pred = df["y_pred"]
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def precision_recall_auc(y_true=None, pred_probabs=None):
clf_precision, clf_recall, _ = precision_recall_curve(y_true, pred_probabs)
score = auc(clf_recall, clf_precision)
return score
def init_model_metrics(metrics=[]):
"""
Function to init dictionary that stores metric functions and metric scores
:param metrics: list of strings for metrics to store in dictionary
:return: dictionary that with _func _score metric pairings
"""
metric_dictionary = {}
# Classification Metrics
if "accuracy" in metrics:
metric_dictionary["accuracy_func"] = accuracy_score
metric_dictionary["accuracy_scores"] = np.array([])
if "f1" in metrics:
metric_dictionary["f1_func"] = f1_score
metric_dictionary["f1_scores"] = np.array([])
if "precision" in metrics:
metric_dictionary["precision_func"] = precision_score
metric_dictionary["precision_scores"] = np.array([])
if "recall" in metrics:
metric_dictionary["recall_func"] = recall_score
metric_dictionary["recall_scores"] = np.array([])
if "roc_auc" in metrics:
metric_dictionary["roc_auc_func"] = roc_auc_score
metric_dictionary["roc_auc_scores"] = np.array([])
if "precision_recall_auc" in metrics:
metric_dictionary["precision_recall_auc_func"] = precision_recall_auc
metric_dictionary["precision_recall_auc_scores"] = np.array([])
# Regression Metrics
if "mse" in metrics:
metric_dictionary["mse_func"] = mean_squared_error
metric_dictionary["mse_scores"] = np.array([])
if "rmse" in metrics:
metric_dictionary["rmse_func"] = root_mean_squared_error
metric_dictionary["rmse_scores"] = np.array([])
if "mae" in metrics:
metric_dictionary["mae_func"] = mean_absolute_error
metric_dictionary["mae_scores"] = np.array([])
if "mape" in metrics:
metric_dictionary["mape_func"] = mean_absolute_percentage_error
metric_dictionary["mape_scores"] = np.array([])
if "r2" in metrics:
metric_dictionary["r2_func"] = r2_score
metric_dictionary["r2_scores"] = np.array([])
return metric_dictionary
def calc_metrics(
metrics=None,
metric_dictionary=None,
y_test=None,
preds=None,
pred_probs=None,
avg="binary",
):
for metric in metrics:
if metric not in [
"f1",
"precision",
"recall",
"roc_auc",
"precision_recall_auc",
]:
metric_dictionary[metric + "_scores"] = np.append(
metric_dictionary[metric + "_scores"],
metric_dictionary[metric + "_func"](y_test, preds),
)
elif metric in ["f1", "precision", "recall"]:
metric_dictionary[metric + "_scores"] = np.append(
metric_dictionary[metric + "_scores"],
metric_dictionary[metric + "_func"](y_test, preds, average=avg),
)
elif metric in ["roc_auc", "precision_recall_auc"]:
metric_dictionary[metric + "_scores"] = np.append(
metric_dictionary[metric + "_scores"],
metric_dictionary[metric + "_func"](y_test, pred_probs),
)
return metric_dictionary
```
#### File: eagles/Unsupervised/unsupervised_tuner.py
```python
from eagles.Unsupervised.utils import plot_utils as pu
from eagles.Unsupervised.utils import cluster_eval_utils as ceu
from eagles.Unsupervised.utils import logger_utils as lu
import numpy as np
import pandas as pd
from IPython.display import display
from sklearn.cluster import KMeans, AgglomerativeClustering, DBSCAN
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.metrics import silhouette_score
from kneed import KneeLocator
import logging
logger = logging.getLogger(__name__)
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
def _find_max_sil(res_dict):
max_ind = res_dict["scores"].argmax()
num_clusters = res_dict["n_clusters"][max_ind]
max_sil_score = res_dict["scores"][max_ind]
return num_clusters, max_sil_score
def _init_method(model=None, params={}):
if model is None:
logger.warning("No model passed in")
return
if model == "kmeans":
mod = KMeans(**params)
elif model == "agglomerativeclustering":
mod = AgglomerativeClustering(**params)
elif model == "dbscan":
mod = DBSCAN(**params)
else:
mod = model
return mod
def find_optimal_clusters(
data=None,
ft_cols=[],
cluster_method="kmeans",
metric="max_sil",
min_num_clusters=2,
max_num_clusters=10,
params={},
scale=None,
plot_dims=[],
summary_stats=[],
run_stat_comps=True,
plot_scale=None,
random_seed=None,
log="log",
log_name=None,
log_path=None,
log_note=None,
):
"""
Takes in data and model and fits specified unsupervised model to the data. Then uses the specified metric to find
the optimal number of clusters. The optimal number of clusters is passed to eval_clusters to evaluate the
clusters for differences
:param data: default None, expects pandas dataframe with names columns
:param ft_cols: default empty list: expects list containing string names of the columns to use for clustering.
If default then uses all cols
:param cluster_method: default "kmeans", expects string name of the model to be applied (i.e. kmeans,
agglomerativeclustering, dbscan
:param metric: default "max_sil", expects string for metric to determine the optimal number of clusters
:param min_num_clusters: default 2, int specifying the lower bound of the number clusters
:param max_num_clusters: default 10, int specifyuing the upper bound of the number clusters
:param params: default empty dict, paramter dictionary for the model being tested
:param scale: default None, expects either "minmax", "standard" or sklearn scaler object
:param plot_dims: default empty list, expects list of dimensions to plot the result clusters across
:param summary_stats: default empty list, expects list of grouping statistics to apply to data \
during cluster comparisons
:param run_stat_comps: default True, boolean indicating whether or not to run cluster comparisons
:param plot_scale: default None, expects either "minmax" or "standard" to indicate scaling of features for plots
:param random_seed: default None, int specifying the random seed value for the analysis
:param log: string or list default None, Expects either a string ("log", "data", "mod") or a list containing these
keywords to tell the logger what to log. Note when a list is passed in the function will create a directory to store
the logged out components.
:param log_name: str default None, prefix name of logged out data. Ignored if log is None
:param log_path: str default None, path to save log data to. Ignored if no log is None
:param log_note: str default None, Note to be used in the log that is saved out. Ignored if no log
:return: returns pandas df with attached cluster labels
"""
if min_num_clusters == max_num_clusters:
logger.warning("WARNING MIN AND MAX NUM CLUSTERS SHOULD NOT BE EQUAL")
return
if random_seed is None:
random_seed = np.random.randint(1000, size=1)[0]
print("Random Seed Value: " + str(random_seed))
if len(ft_cols) == 0:
ft_cols = [col for col in data.columns]
data = data[ft_cols].copy(deep=True)
if scale:
if scale == "standard":
scaler = StandardScaler()
data = scaler.fit_transform(data[ft_cols])
elif scale == "minmax":
scaler = MinMaxScaler()
data = scaler.fit_transform(data[ft_cols])
else:
data = scale.fit_transfrom(data)
data = pd.DataFrame(data)
data.columns = ft_cols
# if kmeans of agglom loop through to find the optimal clusters
if cluster_method in ["kmeans", "agglomerativeclustering"]:
res_dict = {"n_clusters": np.array([]), "scores": np.array([])}
# loop through the number of clusters and create dictionary of num clusters with metrics
for i in range(min_num_clusters, max_num_clusters, 1):
params["n_clusters"] = i
res_dict["n_clusters"] = np.append(res_dict["n_clusters"], i)
model = _init_method(model=cluster_method, params=params)
pred_labels = model.fit_predict(data[ft_cols])
if metric in ["max_sil"]:
res_dict["scores"] = np.append(
res_dict["scores"], silhouette_score(data, pred_labels)
)
elif metric == "knee_wss":
res_dict["scores"] = np.append(res_dict["scores"], model.inertia_)
else:
logger.warning("WARNING METRIC NOT SUPPORTED")
return
print("Finished fitting model with " + str(i) + " clusters", end="\r")
print("", end="\n")
elif cluster_method in ["dbscan"]:
model = _init_method(model=cluster_method, params=params)
model.fit_predict(data[ft_cols])
else:
logger.warning("Non supported model passed in")
return
# Once looped through and found the scores across the range of clusters then get final set based on the best score
if cluster_method in ["kmeans", "agglomerativeclustering"]:
if metric == "max_sil":
opt_n_clusters, max_sil_score = _find_max_sil(res_dict=res_dict)
opt_n_clusters = int(opt_n_clusters)
print("Best silhoutte score: " + str(max_sil_score))
elif metric == "knee_wss":
kn = KneeLocator(
x=res_dict["n_clusters"],
y=res_dict["scores"],
curve="convex",
direction="decreasing",
)
opt_n_clusters = int(kn.knee)
pu.plot_score_curve(data=res_dict, metric=metric, opt_n_clusters=opt_n_clusters)
elif cluster_method in ["dbscan"]:
opt_n_clusters = len(set(model.labels_)) - (1 if -1 in model.labels_ else 0)
print("Optimal number of clusters: " + str(opt_n_clusters) + "\n")
eval_clusters(
data=data,
n_clusters=opt_n_clusters,
method=cluster_method,
params=params,
ft_cols=ft_cols,
plot_dims=plot_dims,
summary_stats=summary_stats,
run_stat_comps=run_stat_comps,
plot_scale=plot_scale,
log=log,
log_name=log_name,
log_path=log_path,
log_note=log_note,
)
return data
def eval_clusters(
data=None,
ft_cols=[],
n_clusters=2,
method="kmeans",
params={},
scale=None,
plot_dims=[],
summary_stats=[],
run_stat_comps=True,
plot_scale=None,
log="log",
log_name=None,
log_path=None,
log_note=None,
):
"""
Function to find and compare clusters across specified dimensions
:param data: default None, expects pandas dataframe with names columns
:param ft_cols: default empty list: expects list containing string names of the columns to use for clustering.
:param n_clusters: default 2, int specifying the number of desired clusters
:param method: default "kmeans", expects string name of the model to be applied (i.e. kmeans,
agglomerativeclustering, dbscan
:param params: default empty dict, paramter dictionary for the model being used
:param scale: default None, expects either "minmax", "standard" or sklearn scaler object
:param plot_dims: default empty list, expects list of dimensions to plot the result clusters across
:param summary_stats: default empty list, expects list of grouping statistics to apply to data \
during cluster comparisons
:param run_stat_comps: default True, boolean indicating whether or not to run cluster comparisons
:param plot_scale: default None, expects either "minmax" or "standard" to indicate scaling of features for plots
:param log: string or list default None, Expects either a string ("log", "data", "mod") or a list containing these
keywords to tell the logger what to log. Note when a list is passed in the function will create a directory to store
the logged out components.
:param log_name: str default None, prefix name of logged out data. Ignored if log is None
:param log_path: str default None, path to save log data to. Ignored if no log is None
:param log_note: str default None, Note to be used in the log that is saved out. Ignored if no log
:return: returns pandas df with attached cluster labels
"""
if len(ft_cols) == 0:
ft_cols = [col for col in data.columns]
data = data[ft_cols].copy(deep=True)
if scale:
if scale == "standard":
scaler = StandardScaler()
data = scaler.fit_transform(data[ft_cols])
elif scale == "minmax":
scaler = MinMaxScaler()
data = scaler.fit_transform(data[ft_cols])
else:
data = scale.fit_transfrom(data)
data = pd.DataFrame(data)
data.columns = ft_cols
if (
method in ["kmeans", "agglomerativeclustering"]
and "n_cluster" not in params.keys()
):
params["n_clusters"] = n_clusters
model = _init_method(model=method, params=params)
pred_labels = model.fit_predict(data[ft_cols])
data["Cluster"] = model.labels_
data["Cluster"] = data["Cluster"].astype(str)
sil_score = silhouette_score(data, pred_labels)
print("Silhouette Score: " + str(round(sil_score, 2)))
if type(model).__name__ == "Pipeline":
if type(model.named_steps["model"]).__name__ == "KMeans":
print(
"WSS Total: "
+ str(round(model.named_steps["model"].inertia_, 2))
+ "\n"
)
elif method == "kmeans":
print("WSS Total: " + str(round(model.inertia_, 2)) + "\n")
if len(plot_dims) == 0:
plot_dims = ft_cols + ["Cluster"]
print("Number of Observations per Cluster")
print(str(data["Cluster"].value_counts()) + "\n\n")
base_cluster_stats = ceu.create_summary_table(
data=data, plot_dims=plot_dims, summary_stats=summary_stats
)
base_cluster_stats = round(base_cluster_stats, 2)
print("Base Cluster Stats \n")
display(base_cluster_stats.T)
print("\n\n")
if run_stat_comps:
sig_test_results, post_hoc_comps = ceu.run_cluster_comps(
data=data, ft_cols=ft_cols
)
if sig_test_results.shape[0] == 0:
print("No significant differences found between clusters")
else:
print("Significance Testing Results \n")
print(str(round(sig_test_results, 2)) + "\n\n")
if post_hoc_comps.shape[0] == 0:
print("No pairwise significant difference")
else:
print("Pairwise Differences \n")
print(str(round(post_hoc_comps, 2)) + "\n\n")
pu.plot_mean_cluster_scores(data=data, plot_scale=plot_scale)
pu.plot_ft_relationships(data=data, plot_dims=plot_dims)
if log:
log_data = {
"n_clusters": n_clusters,
"features": ft_cols,
"Silhouette Score": round(sil_score, 2),
"data": data,
"params": model.get_params(),
"base_cluster_stats": round(base_cluster_stats, 2),
}
if type(model).__name__ == "Pipeline":
log_data["method"] = type(model).__name__
pipe_steps = "Pipe steps: "
for k in model.named_steps.keys():
pipe_steps = pipe_steps + type(model.named_steps[k]).__name__ + " "
log_data["pipe_steps"] = pipe_steps
else:
log_data["method"] = type(model).__name__
if type(model).__name__ == "Pipeline":
if type(model.named_steps["model"]).__name__ == "KMeans":
log_data["WSS"] = round(model.named_steps["model"].inertia_, 2)
elif method == "kmeans":
log_data["WSS"] = round(model.inertia_, 2)
if log_note:
log_data["note"] = log_note
lu.log_results(fl_name=log_name, fl_path=log_path, log_data=log_data)
return data
```
|
{
"source": "JFlashy96/ImageClassifier",
"score": 3
}
|
#### File: JFlashy96/ImageClassifier/helper.py
```python
from collections import OrderedDict
import copy
from PIL import Image
from torch import nn
from torch import optim
from torch.autograd import Variable
from torchvision import datasets, transforms, models
import argparse
import json
import matplotlib.pyplot as plt
import numpy as np
import os, random, sys
import time
import torch
import torch.nn.functional as F
import torchvision
"""
Helpfer functions to aid the training of model
and prediction of input image
"""
def load_data(root="./flowers"):
data_dir = root
train_dir = data_dir + "/train"
valid_dir = data_dir + "/valid"
test_dir = data_dir + "/test"
"""
Normalize the means and standard deviations of the images to what the network expects
#Normalization keeps the weight near zero which tends to make backpropogation more stable.
TODO: Understand and be able to apply backpropogation optimization techniques.
"""
std = [0.229, 0.224, 0.225]
means = [0.485, 0.456, 0.406]
train_transform = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(means, std)])
val_transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(means, std)])
test_transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(means, std)])
print("Initializing Datasets and DataLoaders")
# TODO: Load the datasets with ImageFolder
# Load the datasets with ImageFolder
train_data = datasets.ImageFolder(train_dir, transform=train_transform)
val_data = datasets.ImageFolder(valid_dir, transform=val_transform)
test_data = datasets.ImageFolder(test_dir, transform=test_transform)
image_datasets = [train_data, val_data, test_data]
# TODO: Using the image datasets and the trainforms, define the dataloaders
# Dataloader definitions using the image datasets and the transforms
train_loader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_data, batch_size=32)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=32)
return train_loader, val_loader, test_loader, train_data
def setup_network(arch, device, hidden_units, learning_rate):
model = getattr(models, arch)(pretrained=True)
for param in model.parameters():
param.requires_grad = False
# Define a new, untrained feed-forward network as a classifier, using ReLU activations and
# dropout.
classifier = nn.Sequential(OrderedDict([
('fcl', nn.Linear(25088, 1024)),
('drop', nn.Dropout(p=0.5)),
('relu', nn.ReLU()),
('fc2', nn.Linear(1024, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
# Train the classifier layers using backpropagation using the pre-trained network to get the features.
# Track the loss and accuracy on the validation set to determine the best hyperparameters.
model = model.to(device)
if torch.cuda.is_available() and device == 'gpu':
model.cuda()
# Gather the parameters to be optimized/updated in this run.
# If we are finetuning we will be updating all parameters. However,
# if we are doing feature extract method, we will only update
# the parameters that we have just initialized. i.e., the parameters
# with requires_grad is True.
feature_extract = True
params_to_update = model.parameters()
print("Param to learn:")
if feature_extract:
params_to_update = []
for name, param in model.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print("\t", name)
else:
for name, param in model.named_parameters():
if param.requires_grad == True:
print("\t", name)
optimizer = optim.SGD(params_to_update, lr=0.01)
return model, optimizer, classifier
def save_checkpoint(train_data, model, arch, learning_rate, classifier, num_epochs, optimizer):
model.class_to_idx = train_data.class_to_idx
checkpoint = {'input_size': 25088,
'output_size': 102,
'arch': arch,
'learning_rate': learning_rate,
'batch_size': 64,
'classifier': classifier,
'num_epochs': num_epochs,
'optimizer': optimizer.state_dict(),
'state_dict': model.state_dict(),
'class_to_idx': model.class_to_idx}
torch.save(checkpoint, 'checkpoint.pth')
def load_checkpoint(filename, arch, device):
checkpoint = torch.load(filename)
learning_rate = checkpoint['learning_rate']
model, optimizer, classifier = setup_network(arch,device,learning_rate)
model = getattr(torchvision.models, checkpoint['arch'])(pretrained=True)
model.num_epochs = checkpoint['num_epochs']
model.class_to_idx = checkpoint['class_to_idx']
model.load_state_dict(checkpoint['state_dict'])
return model
def train_model(model, dataloaders, criterion, optimizer,device, num_epochs=25):
since = time.time()
# list to keep track of model performance accuracy over epochs
val_acc_history = []
best_acc = 0.0
best_model_wts = copy.deepcopy(model.state_dict())
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
train_mode = 0
valid_mode = 1
# Each epoch has a training and validation mode
for mode in [train_mode, valid_mode]:
if mode == train_mode:
model.train() # set model to training mode
else:
model.eval() # set model to evaluation mode
running_loss = 0.0
running_corrects = 0
pass_count = 0
# Iterate over data.
for inputs, labels in dataloaders[mode]:
pass_count += 1
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# only track history if in train
# Get model output and calculate loss
output = model.forward(inputs)
loss = criterion(output, labels)
_, preds = torch.max(output, 1)
# Backward. Optimize only if in training mode
if mode == train_mode:
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_acc = running_corrects.double() / len(dataloaders[mode].dataset)
if mode == train_mode:
print("\nEpoch: {}/{} ".format(epoch+1, num_epochs),
"\nTraining Loss: {:.4f} ".format(running_loss/pass_count))
else:
print("Validation Loss: {:.4f} ".format(running_loss/pass_count),
"Accuracy: {:.4f}".format(epoch_acc))
running_loss = 0
time_elapsed = time.time() - since
print("\nTotal time: {:.0f}m {:.0f}s".format(time_elapsed//60, time_elapsed % 60))
def predict(image_path, model, topk, device):
# Predict the class (or classes) of an image using a pretrained deep learing model.
# TODO: Implement the code to predict the class from an image file
# move the model to cuda
if device == "cuda":
# Move model parameters to the GPU
model.cuda()
print("Number of GPUS:", torch.cuda.device_count())
print("Device name:", torch.cuda.get_device_name(torch.cuda.device_count()-1))
else:
model.cpu()
# turn off dropout
model.eval()
# The image
image = process_image(image_path)
# transfer to tensor
image = torch.from_numpy(np.array([image])).float()
# The image becomes the input
image = Variable(image)
if device == "cuda":
image = image.cuda()
output = model.forward(image)
probabilities = torch.exp(output).data
# getting the topk
# 0 --> probabilities
# proabilities is a list of the topk
probabilities = torch.topk(probabilities, topk)[0].tolist()[0]
return probabilities
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
im = Image.open(image)
im = im.resize((256, 256))
value = 0.5 * (256-224)
im = im.crop((value, value, 256-value, 256-value))
im = np.array(im)/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
im = (im - mean) / std
return im.transpose(2,0,1)
```
|
{
"source": "jflatorreg/scikit-maad",
"score": 3
}
|
#### File: maad/features/features_func.py
```python
from __future__ import print_function
import numpy as np
import pandas as pd
from scipy import ndimage as ndi
import itertools as it
import matplotlib.pyplot as plt
from skimage.io import imsave
from skimage import transform, measure
from scipy import ndimage
from maad import sound
from skimage.filters import gaussian
from maad.util import format_rois, rois_to_imblobs, normalize_2d
def _sigma_prefactor(bandwidth):
"""
Function from skimage.
Parameters
----------
Returns
-------
"""
b = bandwidth
# See http://www.cs.rug.nl/~imaging/simplecell.html
return 1.0 / np.pi * np.sqrt(np.log(2) / 2.0) * \
(2.0 ** b + 1) / (2.0 ** b - 1)
def gabor_kernel_nodc(frequency, theta=0, bandwidth=1, gamma=1,
n_stds=3, offset=0):
"""
Return complex 2D Gabor filter kernel with no DC offset.
This function is a modification of the gabor_kernel function of scikit-image
Gabor kernel is a Gaussian kernel modulated by a complex harmonic function.
Harmonic function consists of an imaginary sine function and a real
cosine function. Spatial frequency is inversely proportional to the
wavelength of the harmonic and to the standard deviation of a Gaussian
kernel. The bandwidth is also inversely proportional to the standard
deviation.
Parameters
----------
frequency : float
Spatial frequency of the harmonic function. Specified in pixels.
theta : float, optional
Orientation in radians. If 0, the harmonic is in the x-direction.
bandwidth : float, optional
The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`
and `sigma_y` will decrease with increasing frequency. This value is
ignored if `sigma_x` and `sigma_y` are set by the user.
gamma : float, optional
gamma changes the aspect ratio (ellipsoidal) of the gabor filter.
By default, gamma=1 which means no aspect ratio (circle)
if gamma>1, the filter is larger (x-dir)
if gamma<1, the filter is higher (y-dir)
This value is ignored if `sigma_x` and `sigma_y` are set by the user.
sigma_x, sigma_y : float, optional
Standard deviation in x- and y-directions. These directions apply to
the kernel *before* rotation. If `theta = pi/2`, then the kernel is
rotated 90 degrees so that `sigma_x` controls the *vertical* direction.
n_stds : scalar, optional
The linear size of the kernel is n_stds (3 by default) standard
deviations
offset : float, optional
Phase offset of harmonic function in radians.
Returns
-------
g_nodc : complex 2d array
A single gabor kernel (complex) with no DC offset
References
----------
.. [1] http://en.wikipedia.org/wiki/Gabor_filter
.. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf
Examples
--------
>>> from skimage.filters import gabor_kernel
>>> from skimage import io
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> gk = gabor_kernel(frequency=0.2)
>>> plt.figure() # doctest: +SKIP
>>> io.imshow(gk.real) # doctest: +SKIP
>>> io.show() # doctest: +SKIP
>>> # more ripples (equivalent to increasing the size of the
>>> # Gaussian spread)
>>> gk = gabor_kernel(frequency=0.2, bandwidth=0.1)
>>> plt.figure() # doctest: +SKIP
>>> io.imshow(gk.real) # doctest: +SKIP
>>> io.show() # doctest: +SKIP
"""
# set gaussian parameters
b = bandwidth
sigma_pref = 1.0 / np.pi * np.sqrt(np.log(2) / 2.0) * (2.0 ** b + 1) / (2.0 ** b - 1)
sigma_y = sigma_pref / frequency
sigma_x = sigma_y/gamma
# meshgrid
x0 = np.ceil(max(np.abs(n_stds * sigma_x * np.cos(theta)),
np.abs(n_stds * sigma_y * np.sin(theta)), 1))
y0 = np.ceil(max(np.abs(n_stds * sigma_y * np.cos(theta)),
np.abs(n_stds * sigma_x * np.sin(theta)), 1))
y, x = np.mgrid[-y0:y0 + 1, -x0:x0 + 1]
# rotation matrix
rotx = x * np.cos(theta) + y * np.sin(theta)
roty = -x * np.sin(theta) + y * np.cos(theta)
# combine gambor and
g = np.zeros(y.shape, dtype=np.complex)
g[:] = np.exp(-0.5 * (rotx ** 2 / sigma_x ** 2 + roty ** 2 / sigma_y ** 2))
g /= 2 * np.pi * sigma_x * sigma_y # gaussian envelope
oscil = np.exp(1j * (2 * np.pi * frequency * rotx + offset)) # harmonic / oscilatory function
g_dc = g*oscil
# remove dc component by subtracting the envelope weighted by K
K = np.sum(g_dc)/np.sum(g)
g_nodc = g_dc - K*g
return g_nodc
def _plot_filter_bank(kernels, frequency, ntheta, bandwidth, gamma, **kwargs):
"""
Display filter bank
Parameters
----------
kernels: list
List of kernels from filter_bank_2d_nodc()
frequency: 1d ndarray of scalars
Spatial frequencies used to built the Gabor filters. Values should be
in [0;1]
ntheta: int
Number of angular steps between 0° to 90°
bandwidth: scalar, optional, default is 1
This parameter modifies the frequency of the Gabor filter
gamma: scalar, optional, default is 1
This parameter change the Gaussian window that modulates the continuous
sine.
1 => same gaussian window in x and y direction (circle)
<1 => elongation of the filter size in the y direction (elipsoid)
>1 => reduction of the filter size in the y direction (elipsoid)
**kwargs, optional. This parameter is used by plt.plot and savefig functions
figsize : tuple of integers, optional, default: (13,13)
width, height in inches.
dpi : integer, optional
Dot per inch.
For printed version, choose high dpi (i.e. dpi=300) => slow
For screen version, choose low dpi (i.e. dpi=96) => fast
interpolation : string, optional, default is 'nearest'
Pixels interpolation
aspect : string, optional, default is 'auto'
fontsize : scalar, optional, default is 8/0.22*hmax*100/dpi)
size of the font use to print the parameters of each filter
... and more, see matplotlib
Returns
-------
fig : Figure
The Figure instance
ax : Axis
The Axis instance
"""
params = []
for theta in range(ntheta):
theta = theta/ntheta * np.pi
for freq in frequency:
params.append([freq, theta, bandwidth, gamma])
w = []
h = []
for kernel in kernels:
ylen, xlen = kernel.shape
w.append(xlen)
h.append(ylen)
plt.gray()
fig = plt.figure()
dpi =kwargs.pop('dpi',fig.get_dpi())
figsize =kwargs.pop('figsize',(13,13))
interpolation =kwargs.pop('interpolation','nearest')
aspect =kwargs.pop('aspect','auto')
fig.set_figwidth(figsize[0])
fig.set_figheight(figsize[1])
w = np.asarray(w)/dpi
h = np.asarray(h)/dpi
wmax = np.max(w)*1.25
hmax = np.max(h)*1.05
fontsize =kwargs.pop('fontsize',8/0.22*hmax*100/dpi)
params_label = []
for param in params:
params_label.append('theta=%d f=%.2f \n bandwidth=%.1f \n gamma=%.1f'
% (param[1] * 180 / np.pi, param[0], param[2],
param[3]))
n = len(frequency)
for ii, kernel in enumerate(kernels):
ax = plt.axes([(ii%n)*wmax + (wmax-w[ii])/2,(ii//n)*hmax + (hmax-h[ii])/2,w[ii],h[ii]])
ax.imshow(np.real(kernel),interpolation=interpolation, aspect =aspect, **kwargs)
ax.set_xticks([])
ax.set_yticks([])
ax.set_ylabel(params_label[ii],fontsize=fontsize)
ax.axis('tight')
plt.show()
return ax, fig
def _plot_filter_results(im_ref, im_list, kernels, params, m, n):
"""
Display the result after filtering
Parameters
----------
im_ref : 2D array
Reference image
im_list : list
List of filtered images
kernels: list
List of kernels from filter_bank_2d_nodc()
m: int
number of columns
n: int
number of rows
Returns
-------
Returns
-------
fig : Figure
The Figure instance
ax : Axis
The Axis instance
"""
ncols = m
nrows = n
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(15, 5))
plt.gray()
fig.suptitle('Image responses for Gabor filter kernels', fontsize=12)
axes[0][0].axis('off')
# Plot original images
axes[0][1].imshow(im_ref, origin='lower')
axes[0][1].set_title('spectrogram', fontsize=9)
axes[0][1].axis('off')
plt.tight_layout
params_label = []
for param in params:
params_label.append('theta=%d,\nf=%.2f' % (param[1] * 180 / np.pi, param[0]))
ii = 0
for ax_row in axes[1:]:
plotGabor = True
for ax in ax_row:
if plotGabor == True:
# Plot Gabor kernel
print(params_label[ii])
ax.imshow(np.real(kernels[ii]), interpolation='nearest')
ax.set_ylabel(params_label[ii], fontsize=7)
ax.set_xticks([])
ax.set_yticks([])
plotGabor = False
else:
im_filtered = im_list[ii]
ax.imshow(im_filtered, origin='lower')
ax.axis('off')
plotGabor = True
ii=ii+1
plt.show()
return ax, fig
def filter_mag(im, kernel):
"""
Normalizes the image and computes im and real part of filter response using
the complex kernel and the modulus operation
Parameters
----------
im: 2D array
Input image to process
kernel: 2D array
Complex kernel (or filter)
Returns
-------
im_out: Modulus operand on filtered image
"""
im = (im - im.mean()) / im.std()
im_out = np.sqrt(ndi.convolve(im, np.real(kernel), mode='reflect')**2 +
ndi.convolve(im, np.imag(kernel), mode='reflect')**2)
return im_out
def filter_multires(im_in, kernels, npyr=4, rescale=True):
"""
Computes 2D wavelet coefficients at multiple octaves/pyramids
Parameters
----------
im_in: list of 2D arrays
List of input images to process
kernels: list of 2D arrays
List of 2D wavelets to filter the images
npyr: int
Number of pyramids to compute
rescale: boolean
Indicates if the reduced images should be rescaled
Returns
-------
im_out: list of 2D arrays
List of images filtered by each 2D kernel
"""
# Downscale image using gaussian pyramid
if npyr<2:
print('Warning: npyr should be int and larger than 2 for multiresolution')
im_pyr = tuple(transform.pyramid_gaussian(im_in, downscale=2,
max_layer=1, multichannel=False))
else:
im_pyr = tuple(transform.pyramid_gaussian(im_in, downscale=2,
max_layer=npyr-1, multichannel=False))
# filter 2d array at multiple resolutions using gabor kernels
im_filt=[]
for im in im_pyr: # for each pyramid
for kernel, param in kernels: # for each kernel
im_filt.append(filter_mag(im, kernel)) # magnitude response of filter
# Rescale image using gaussian pyramid
if rescale:
dims_raw = im_in.shape
im_out=[]
for im in im_filt:
ratio = np.array(dims_raw)/np.array(im.shape)
if ratio[0] > 1:
im = transform.rescale(im, scale = ratio, mode='reflect',
multichannel=False, anti_aliasing=True)
else:
pass
im_out.append(im)
else:
pass
return im_out
def filter_bank_2d_nodc(frequency, ntheta, bandwidth=1, gamma=1, display=False,
savefig=None, **kwargs):
"""
Build a Gabor filter bank with no offset component
Parameters
----------
frequency: 1d ndarray of scalars
Spatial frequencies used to built the Gabor filters. Values should be
in [0;1]
ntheta: int
Number of angular steps between 0° to 90°
bandwidth: scalar, optional, default is 1
This parameter modifies the frequency of the Gabor filter
gamma: scalar, optional, default is 1
This parameter change the Gaussian window that modulates the continuous
sine.
1 => same gaussian window in x and y direction (circle)
<1 => elongation of the filter size in the y direction (elipsoid)
>1 => reduction of the filter size in the y direction (elipsoid)
Returns
-------
params: 2d structured array
Parameters used to calculate 2D gabor kernels.
Params array has 4 fields (theta, freq, bandwidth, gamma)
kernels: 2d ndarray of scalars
Gabor kernels
"""
theta = np.arange(ntheta)
theta = theta / ntheta * np.pi
params=[i for i in it.product(theta,frequency)]
kernels = []
for param in params:
kernel = gabor_kernel_nodc(frequency=param[1],
theta=param[0],
bandwidth=bandwidth,
gamma=gamma,
offset=0,
n_stds=3)
kernels.append((kernel, param))
if display:
_, fig = _plot_filter_bank(kernels, frequency, ntheta, bandwidth,
gamma, **kwargs)
if savefig is not None :
dpi =kwargs.pop('dpi',96)
format=kwargs.pop('format','png')
filename = savefig+'_filter_bank2D.'+format
fig.savefig(filename, bbox_inches='tight', dpi=dpi, format=format,
**kwargs)
return params, kernels
def shape_features(im, im_blobs=None, resolution='low', opt_shape=None):
"""
Computes shape of 2D signal (image or spectrogram) at multiple resolutions
using 2D Gabor filters
Parameters
----------
im: 2D array
Input image to process
im_blobs: 2D array, optional
Optional binary array with '1' on the region of interest and '0' otherwise
opt: dictionary
options for the filter bank (kbank_opt) and the number of scales (npyr)
Returns
-------
shape: 1D array
Shape coeficients of each filter
params: 2D numpy structured array
Corresponding parameters of the 2D fileters used to calculate the
shape coefficient. Params has 4 fields (theta, freq, pyr_level, scale)
bbox:
If im_blobs provided, corresponding bounding box
"""
# unpack settings
opt_shape = opt_shape_presets(resolution, opt_shape)
npyr = opt_shape['npyr']
# build filterbank
params, kernels = filter_bank_2d_nodc(ntheta=opt_shape['ntheta'],
bandwidth=opt_shape['bandwidth'],
frequency=opt_shape['frequency'],
gamma=opt_shape['gamma'])
# filter images
im_rs = filter_multires(im, kernels, npyr, rescale=True)
# Get mean intensity
shape = []
if im_blobs is None:
for im in im_rs:
shape.append(np.mean(im))
rois_bbox=None
shape = [shape] # for dataframe formating below
else:
for im in im_rs:
labels = measure.label(im_blobs)
rprops = measure.regionprops(labels, intensity_image=im)
roi_mean = [roi.mean_intensity for roi in rprops]
shape.append(roi_mean)
rois_bbox = [roi.bbox for roi in rprops]
shape = list(map(list, zip(*shape))) # transpose shape
# organise parameters
params = np.asarray(params)
orient = params[:,0]*180/np.pi
orient = orient.tolist()*npyr
pyr_level = np.sort(np.arange(npyr).tolist()*len(params))+1
freq = params[:,1].tolist()*npyr
#params_multires = np.vstack((np.asarray(orient), freq, pyr_level))
nparams = len(params)*npyr
params_multires = np.zeros(nparams, dtype={'names':('theta', 'freq', 'pyr_level','scale'),
'formats':('f8', 'f8', 'f8','f8')})
params_multires['theta'] = orient
params_multires['freq'] = freq
params_multires['scale'] = 1/np.asarray(freq)
params_multires['pyr_level'] = pyr_level
params_multires = pd.DataFrame(params_multires)
# format shape into dataframe
cols=['shp_' + str(idx).zfill(3) for idx in range(1,len(shape[0])+1)]
shape = pd.DataFrame(data=np.asarray(shape),columns=cols)
# format rois into dataframe
rois_bbox = pd.DataFrame(rois_bbox, columns=['min_y','min_x',
'max_y','max_x'])
# compensate half-open interval of bbox from skimage
rois_bbox.max_y = rois_bbox.max_y - 1
rois_bbox.max_x = rois_bbox.max_x - 1
return rois_bbox, params_multires, shape
def centroid(im, im_blobs=None):
"""
Computes intensity centroid of the 2D signal (usually time-frequency representation)
along a margin, frequency (0) or time (1).
Parameters
----------
im: 2D array
Input image to process
im_blobs: 2D array, optional
Optional binary array with '1' on the region of interest and '0' otherwise
margin: 0 or 1
Margin of the centroid, frequency=1, time=0
Returns
-------
centroid: 1D array
centroid of image. If im_blobs provided, centroid for each region of interest
"""
centroid=[]
rois_bbox=[]
if im_blobs is None:
centroid = ndimage.center_of_mass(im)
else:
labels = measure.label(im_blobs)
rprops = measure.regionprops(labels, intensity_image=im)
centroid = [roi.weighted_centroid for roi in rprops]
rois_bbox = [roi.bbox for roi in rprops]
# variables to dataframes
centroid = pd.DataFrame(centroid, columns=['y', 'x'])
rois_bbox = pd.DataFrame(rois_bbox, columns=['min_y','min_x',
'max_y','max_x'])
# compensate half-open interval of bbox from skimage
rois_bbox.max_y = rois_bbox.max_y - 1
rois_bbox.max_x = rois_bbox.max_x - 1
return rois_bbox, centroid
def create_csv(shape_features, centroid_features, label_features = None,
display=False):
"""
Create a .csv file containing all the features (shapes, centroids and
labels)
Parameters
----------
shape_features : 2d nd array of scalars
Each column corresponds to a shape (linked to a kernel filter)
Each row corresponds to a ROI
centroid_features: 2d nd array of scalars (centroid in freq and time)
Centroid of image. If labels provided, centroid for each ROI (rows)
column 0 is 'cyear'
column 1 is 'cmonth'
column 2 is 'chour'
column 3 is 'cminute'
column 4 is 'csecond'
column 5 is 'cfreq'
label_features: 2d nd array of integers and strings, optional, default is
None
column 0 is 'labelID'
column 1 is 'labelName'
Each row corresponds to a ROI
Returns
-------
table : dataframe (Pandas)
The table contains all the features extracted from the spectrogram
"""
if label_features is not None:
table_label_features = pd.DataFrame({'labelID' : np.asarray(label_features)[:,0],
'labelName' : np.asarray(label_features)[:,1]})
table_shape_features = pd.DataFrame(data=shape_features,
columns=["shp" + str(i) for i in range(1,len(shape_features[0])+1)])
table_centroid_features = pd.DataFrame({'cyear' : centroid_features[:,0],
'cmonth': centroid_features[:,1],
'cday' : centroid_features[:,2],
'chour' : centroid_features[:,3],
'cminute': centroid_features[:,4],
'csecond': centroid_features[:,5],
'cfreq' : centroid_features[:,6]})
if label_features is not None:
table = pd.concat([table_label_features, table_centroid_features, table_shape_features], axis=1)
else:
table = pd.concat([table_centroid_features, table_shape_features], axis=1)
if display:
# ------------- FEATURES VIZUALIZATION WITH PANDAS ----------------
# table with a summray of the features value
table.describe()
# histograpm for each features
table.hist(bins=50, figsize=(15,15))
plt.show()
return table
def save_csv(filename, shape_features, centroid_features, label_features=None,
mode='w'):
"""
Create and save a .csv file containing all the features (shapes, centroids
and labels)
Parameters
----------
filename : string
full name (path and name) of the .csv file
mode : string, optional, default is 'w'
Python write mode. For example
'w' Truncate file to zero length or create text file for writing.
The stream is positioned at the beginning of the file.
'a' Open for writing. The file is created if it does not exist. The
stream is positioned at the end of the file. Subsequent writes
to the file will always end up at the then current end of file,
irrespective of any intervening fseek(3) or similar.
shape_features : 2d nd array of scalars
Each column corresponds to a shape (linked to a kernel filter)
Each row corresponds to a ROI
centroid_features: 2d nd array of scalars (centroid in freq and time)
Centroid of image. If labels provided, centroid for each ROI (rows)
column 0 is 'cyear'
column 1 is 'cmonth'
column 2 is 'chour'
column 3 is 'cminute'
column 4 is 'csecond'
column 5 is 'cfreq'
label_features: 2d nd array of integers and strings, optional, default is
None
column 0 is 'labelID'
column 1 is 'labelName'
Returns
-------
table : dataframe (Pandas)
The table contains all the features extracted from the spectrogram.
Keys are {'labelID', 'labelName, 'cyear', 'cmonth', 'cday', 'chour',
'cmin','csecond','cfreq','shp1,'shp2',...'shpn'}
"""
table = create_csv(shape_features, centroid_features, label_features)
table.to_csv(path_or_buf=filename,sep=',',mode=mode,header=True, index=False)
return table
def get_features_wrapper(im, ext, display=False, savefig=None, save_csv=None,
**kwargs):
"""
Computes shape of 2D signal (image or spectrogram) at multiple resolutions
using 2D Gabor filters
Parameters
----------
im: 2D array
Input image to process (spectrogram)
ext : list of scalars [left, right, bottom, top], optional, default: None
The location, in data-coordinates, of the lower-left and
upper-right corners. If `None`, the image is positioned such that
the pixel centers fall on zero-based (row, column) indices.
display : boolean, optional, default is False
Display the signal if True
savefig : string, optional, default is None
Root filename (with full path) is required to save the figures. Postfix
is added to the root filename.
save_csv : string, optional, default is None
Root filename (with full path) is required to save the table. Postfix
is added to the root filename.
**kwargs, optional. This parameter is used by plt.plot and savefig functions
figsize : tuple of integers,
width, height in inches.
title : string,
title of the figure
xlabel : string, optional,
label of the horizontal axis
ylabel : string, optional,
label of the vertical axis
cmap : string or Colormap object,
See https://matplotlib.org/examples/color/colormaps_reference.html
in order to get all the existing colormaps
examples: 'hsv', 'hot', 'bone', 'tab20c', 'jet', 'seismic',
'viridis'...
vmin, vmax : scalar
`vmin` and `vmax` are used in conjunction with norm to normalize
luminance data. Note if you pass a `norm` instance, your
settings for `vmin` and `vmax` will be ignored.
ext : scalars (left, right, bottom, top),
The location, in data-coordinates, of the lower-left and
upper-right corners. If `None`, the image is positioned such that
the pixel centers fall on zero-based (row, column) indices.
dpi : integer, optional
Dot per inch.
For printed version, choose high dpi (i.e. dpi=300) => slow
For screen version, choose low dpi (i.e. dpi=96) => fast
format : string, optional
Format to save the figure
... and more, see matplotlib
Returns
-------
table : dataframe (Pandas)
The table contains all the features extracted from the spectrogram.
Keys are {'labelID', 'labelName, 'cyear', 'cmonth', 'cday', 'chour',
'cmin','csecond','cfreq','shp0,'shp1',...'shpn'}
params_shape: 2D numpy structured array
Parameters used to calculate 2D gabor kernels.
params_shape has 5 fields (theta, freq, bandwidth, gamma, pyr_level)
Each row corresponds to a shape (shp1, shp2...shpn)
"""
freq=kwargs.pop('freq',(0.75, 0.5))
ntheta=kwargs.pop('ntheta',2)
bandwidth=kwargs.pop('bandwidth', 1)
gamma=kwargs.pop('gamma', 1)
npyr=kwargs.pop('npyr', 3)
date=kwargs.pop('date', None)
im_rois=kwargs.pop('im_rois', None)
label_features=kwargs.pop('label_features', None)
params, kernels = filter_bank_2d_nodc(frequency=freq, ntheta=ntheta,
bandwidth=bandwidth,gamma=gamma,
display=display, savefig=savefig)
# multiresolution image filtering (Gaussian pyramids)
im_filtlist = filter_multires(im, ext, kernels, params, npyr=npyr,
display=display, savefig=savefig)
# Extract shape features for each roi
params_shape, shape = shape_features(im_filtlist=im_filtlist,
params = params,
im_rois=im_rois)
# Extract centroids features for each roi
centroid_features = centroid(im=im, ext=ext, date=date, im_rois=im_rois)
if save_csv :
table = save_csv(save_csv+'.csv',
shape, centroid_features, label_features,
display=display)
else:
table = create_csv(shape, centroid_features, label_features,
display=display)
return table, params_shape
def save_figlist(fname, figlist):
"""
Save a list of figures to file.
Parameters
----------
fname: string
suffix name to save the figure. Extension indicates the format
of the image
Returns
-------
Nothing
"""
for i, fig in enumerate(figlist):
fname_save='%d_%s' % (i, fname)
imsave(fname_save,fig)
def opt_shape_presets(resolution, opt_shape=None):
"""
Set values for multiresolution analysis using presets or custom parameters
Parameters
----------
resolution: str
Chooses the opt_shape presets.
Supportes presets are: 'low', 'med', 'high' and 'custom'
opt_shape: dict
Key and values for shape settings.
Valid keys are: ntheta, bandwidth, frequency, gamma, npyr
Returns
-------
opt_shape: dict
A valid dictionary with shape settings
"""
# Factory presets
opt_shape_low = dict(ntheta=2,
bandwidth=1,
frequency=(2**-1, 2**-2),
gamma=2,
npyr = 4)
opt_shape_med = dict(ntheta=4,
bandwidth=1,
frequency=(2**-1, 2**-2),
gamma=2,
npyr = 6)
opt_shape_high = dict(ntheta=8,
bandwidth=1,
frequency=(2**-0.5, 2**-1, 2**-1.5, 2**-2),
gamma=2,
npyr = 6)
if resolution == 'low':
opt_shape = opt_shape_low
elif resolution == 'med':
opt_shape = opt_shape_med
elif resolution == 'high':
opt_shape = opt_shape_high
elif resolution == 'custom':
if opt_shape is not None: # check valid values on opt_shape
if all (opt in opt_shape for opt in ('ntheta', 'bandwidth', 'frequency', 'gamma', 'npyr')):
pass
else:
print('Warning: opt_shape must have all keys-values pairs:')
print('ntheta, bandwidth, frequency, gamma, npyr')
print('Setting resolution to low')
opt_shape = opt_shape_low
else:
print('Warning: if resolution is set to custom, a valid opt_shape dictionnary should be provided.')
print('Setting resolution to low')
opt_shape = opt_shape_low
else:
print('Resolution should be: low, med or high. Setting resolution to low')
opt_shape = opt_shape_low
return opt_shape
def plot_shape(shape_plt, params, display_values=False):
"""
Plot shape features in 2D representation
Parameters
----------
shape: 1D array
params: structured array returned by maad.features_rois.shape_features
Returns
-------
plot
"""
unique_theta = np.unique(params.theta)
# compute shape of matrix
dirs_size = unique_theta.size
scale_size = np.unique(params.freq).size * np.unique(params.pyr_level).size
# reshape feature vector
idx = params.sort_values(['theta','pyr_level','scale']).index
if isinstance(shape_plt, pd.DataFrame):
shape_plt = np.reshape(shape_plt.iloc[0,idx].values, (dirs_size, scale_size))
elif isinstance(shape_plt, np.ndarray):
shape_plt = np.reshape(shape_plt[idx], (dirs_size, scale_size))
unique_scale = params.scale * 2**params.pyr_level[idx]
# get textlab
textlab = shape_plt
textlab = np.round(textlab,2)
# plot figure
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.imshow(shape_plt, aspect='auto', origin='lower', interpolation='None', cmap='viridis')
if display_values:
for (j,i),label in np.ndenumerate(textlab):
ax.text(i,j,label,ha='center',va='center')
else:
pass
yticklab = unique_theta
xticklab = np.reshape(unique_scale.values,
(dirs_size, scale_size))
ax.set_xticks(np.arange(scale_size))
ax.set_xticklabels(np.round(xticklab,2)[0,:])
ax.set_yticks(np.arange(dirs_size))
ax.set_yticklabels(yticklab)
ax.set_xlabel('Scale')
ax.set_ylabel('Theta')
plt.show()
def compute_rois_features(s, fs, rois_tf, opt_spec, opt_shape, flims):
"""
Computes shape and central frequency features from signal at specified
time-frequency limits defined by regions of interest (ROIs)
Parameters
----------
s: ndarray
Singal to be analysed
fs: int
Sampling frequency of the signal
rois_tf: pandas DataFrame
Time frequency limits for the analysis. Columns should have at
least min_t, max_t, min_f, max_f. Can be computed with multiple
detection methods, such as find_rois_cwt
opt_spec: dictionnary
Options for the spectrogram with keys, window lenght 'nperseg' and,
window overlap in percentage 'overlap'
opt_shape: dictionary
Options for the filter bank (kbank_opt) and the number of scales (npyr)
flims: list of 2 scalars
Minimum and maximum boundary frequency values in Hertz
Returns
-------
feature_rois: pandas Dataframe
A dataframe with each column corresponding to a feature
Example
-------
s, fs = sound.load('spinetail.wav')
rois_tf = find_rois_cwt(s, fs, flims=(3000, 8000), tlen=2, th=0.003)
opt_spec = {'nperseg': 512, 'overlap': 0.5}
opt_shape = opt_shape_presets('med')
features_rois = compute_rois_features(s, fs, rois_tf, opt_spec,
opt_shape, flims)
"""
im, dt, df, ext = sound.spectrogram(s, fs, nperseg=opt_spec['nperseg'],
overlap=opt_spec['overlap'], fcrop=flims,
rescale=False, db_range=opt_spec['db_range'])
# format rois to bbox
ts = np.arange(ext[0], ext[1], dt)
f = np.arange(ext[2],ext[3]+df,df)
rois_bbox = format_rois(rois_tf, ts, f, fmt='bbox')
# roi to image blob
im_blobs = rois_to_imblobs(np.zeros(im.shape), rois_bbox)
# get features: shape, center frequency
im = normalize_2d(im, 0, 1)
#im = gaussian(im) # smooth image
bbox, params, shape = shape_features(im, im_blobs, resolution='custom',
opt_shape=opt_shape)
_, cent = centroid(im, im_blobs)
cent['frequency']= f[round(cent.y).astype(int)] # y values to frequency
# format rois to time-frequency
rois_out = format_rois(bbox, ts, f, fmt='tf')
# combine into a single df
rois_features = pd.concat([rois_out, shape, cent.frequency], axis=1)
return rois_features
def shape_features_raw(im, resolution='low', opt_shape=None):
"""
Computes raw shape of 2D signal (image or spectrogram) at multiple resolutions
using 2D Gabor filters. Contrary to shape_feature, this function delivers the raw
response of the spectrogram to the filter bank.
Parameters
----------
im: 2D array
Input image to process
resolution:
Resolution of analysis, i.e. number of filters used.
Three presets are provided, 'low', 'mid' and 'high', which control
the number of filters.
opt_shape: dictionary (optional)
options for the filter bank (kbank_opt) and the number of scales (npyr)
Returns
-------
shape_im: 1D array
Raw shape response of spectrogram to every filter of the filter bank
params: 2D numpy structured array
Corresponding parameters of the 2D fileters used to calculate the
shape coefficient. Params has 4 fields (theta, freq, pyr_level, scale)
"""
# unpack settings
opt_shape = opt_shape_presets(resolution, opt_shape)
npyr = opt_shape['npyr']
# build filterbank
params, kernels = filter_bank_2d_nodc(ntheta=opt_shape['ntheta'],
bandwidth=opt_shape['bandwidth'],
frequency=opt_shape['frequency'],
gamma=opt_shape['gamma'])
# filter images
im_rs = filter_multires(im, kernels, npyr, rescale=True)
# Get response of spectrogram to every filter of the filter bank
shape_im = dict()
for j, imx in enumerate(im_rs):
shape_im[j] = imx.ravel()
shape_im = pd.DataFrame(shape_im)
# organise parameters
params = np.asarray(params)
orient = params[:,0]*180/np.pi
orient = orient.tolist()*npyr
pyr_level = np.sort(np.arange(npyr).tolist()*len(params))+1
freq = params[:,1].tolist()*npyr
nparams = len(params)*npyr
params_multires = np.zeros(nparams, dtype={'names':('theta', 'freq', 'pyr_level','scale'),
'formats':('f8', 'f8', 'f8','f8')})
params_multires['theta'] = orient
params_multires['freq'] = freq
params_multires['scale'] = 1/np.asarray(freq)
params_multires['pyr_level'] = pyr_level
params_multires = pd.DataFrame(params_multires)
# format shape into dataframe
cols=['shp_' + str(idx).zfill(3) for idx in range(1,shape_im.shape[1]+1)]
shape_im = pd.DataFrame(data=np.asarray(shape_im),columns=cols)
return params_multires, shape_im
```
#### File: maad/rois/rois_1d.py
```python
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import pandas as pd
def sinc(s, cutoff, fs, atten=80, transition_bw=0.05, bandpass=True):
"""
Filter 1D signal with a Kaiser-windowed filter
Parameters:
----------
s : ndarray
input 1D signal
cutoff : ndarray
upper and lower frequencies (min_f, max_f)
atten : float
attenuation in dB
transition_bw : float
transition bandwidth in percent default 5% of total band
bandpass : bool
bandpass (True) or bandreject (False) filter, default is bandpass
Returns:
-------
s_filt (array): signal filtered
"""
width = (cutoff[1] - cutoff[0]) * transition_bw
numtaps, beta = signal.kaiserord(atten, width/(0.5*fs))
np.ceil(numtaps-1) // 2 * 2 + 1 # round to nearest odd to have Type I filter
taps = signal.firwin(numtaps, cutoff, window=('kaiser', beta),
scale=False, nyq=0.5*fs, pass_zero=not(bandpass))
s_filt = signal.lfilter(taps, 1, s)
return s_filt
def _corresp_onset_offset(onset, offset, tmin, tmax):
""" Check that each onsets have a corresponding offset
Parameters
----------
onset: ndarray
array with onset from find_rois_1d
offset: ndarray
array with offset from find_rois_1d
tmin: float
Start time of wav file (in s)
tmax:
End time of wav file (in s)
Return
------
onset : ndarray
onset with corresponding offset
offset : ndarray
offset with corresponding onset
"""
if onset[0] > offset[0]: # check start
onset = np.insert(onset,0,tmin)
else:
pass
if onset[-1] > offset[-1]: # check end
offset = np.append(offset,tmax)
else:
pass
return onset, offset
def _energy_windowed(s, wl=512, fs=None):
""" Computse windowed energy on signal
Computes the energy of the signals by windows of length wl. Used to amplify sectors where the density of energy is higher
Parameters
----------
s : ndarray
input signal
wl : float
length of the window to summarize the rms value
fs : float
frequency sampling of the signal, used to keep track of temporal information of the signal
Returns
-------
time : ndarray
temporal index vector
s_rms : ndarray
windowed rms signal
"""
s_aux = np.lib.pad(s, (0, wl-len(s)%wl), 'reflect') # padding
s_aux = s_aux**2
# s_aux = np.abs(s_aux) # absolute value. alternative option
s_aux = np.reshape(s_aux,(int(len(s_aux)/wl),wl))
s_rms = np.mean(s_aux,1)
time = np.arange(0,len(s_rms)) * wl / fs + wl*0.5/fs
return time, s_rms
def find_rois_cwt(s, fs, flims, tlen, th=0, display=False, save_df=False,
savefilename='rois.csv', **kwargs):
"""
Find region of interest (ROIS) based on predetermined temporal length and frequency limits
The general approach is based on continous wavelet transform following a three step process
1. Filter the signal with a bandpass sinc filter
2. Smoothing the signal by convolving it with a Mexican hat wavelet (Ricker wavelet) [See ref 1]
3. Binarize the signal applying a linear threshold
Parameters
----------
s : ndarray
input signal
flims : int
upper and lower frequencies (in Hz)
tlen : int
temporal length of signal searched (in s)
th : float, optional
threshold to binarize the output
display: boolean, optional, default is False
plot results if set to True, default is False
save_df : boolean, optional
save results to csv file
savefilename : str, optional
Name of the file to save the table as comma separatd values (csv)
Returns
-------
rois : pandas DataFrame
an object with temporal and frequencial limits of regions of interest
Reference
---------
[1] Bioinformatics (2006) 22 (17): 2059-2065. DOI:10.1093/bioinformatics/btl355 http://bioinformatics.oxfordjournals.org/content/22/17/2059.long
"""
# filter signal
s_filt = sinc(s, flims, fs, atten=80, transition_bw=0.8)
# rms: calculate window of maximum 5% of tlen. improves speed of cwt
wl = 2**np.floor(np.log2(tlen*fs*0.05))
t, s_rms = _energy_windowed(s_filt, int(wl), fs)
# find peaks
cwt_width = [round(tlen*fs/wl/2)]
npad = 5 ## seems to work with 3, but not sure
s_rms = np.pad(s_rms, np.int(cwt_width[0]*npad), 'reflect') ## add pad
s_cwt = signal.cwt(s_rms, signal.ricker, cwt_width)
s_cwt = s_cwt[0][np.int(cwt_width[0]*npad):len(s_cwt[0])-np.int(cwt_width[0]*npad)] ## rm pad
# find onset and offset of sound
segments_bin = np.array(s_cwt > th)
onset = t[np.where(np.diff(segments_bin.astype(int)) > 0)]+t[0] # there is delay because of the diff that needs to be accounted
offset = t[np.where(np.diff(segments_bin.astype(int)) < 0)]+t[0]
# format for output
if onset.size==0 or offset.size==0:
# No detection found
print('Warning: No detection found')
df = pd.DataFrame(data=None)
if save_df==True:
df.to_csv(savefilename, sep=',',header=False, index=False)
else:
# A detection was found, save results to csv
onset, offset = _corresp_onset_offset(onset, offset, tmin=0, tmax=len(s)/fs)
rois_tf = np.transpose([np.repeat(flims[0],repeats=len(onset)),
np.round(onset,5),
np.repeat(flims[1],repeats=len(onset)),
np.round(offset,5)])
cols=['min_f', 'min_t','max_f', 'max_t']
df = pd.DataFrame(data=rois_tf,columns=cols)
if save_df==True:
df.to_csv(savefilename, sep=',', header=True, index=False)
# Display
if display==True:
figsize = kwargs.pop('figsize',(12,6))
cmap = kwargs.pop('cmap','gray')
nfft = kwargs.pop('nfft',512)
noverlap = kwargs.pop('noverlap',256)
# plot
fig,(ax1,ax2) = plt.subplots(2,1,figsize=figsize)
ax1.margins(x=0)
ax1.plot(s_cwt)
ax1.set_xticks([])
ax1.set_ylabel('Amplitude')
ax1.grid(True)
ax1.hlines(th, 0, len(s_cwt), linestyles='dashed', colors='r')
ax2.specgram(s, NFFT=nfft, Fs=fs, noverlap=noverlap, cmap=cmap)
ax2.set_ylabel('Frequency (Hz)')
ax2.set_xlabel('Time (s)')
if not(df.empty):
for idx, _ in df.iterrows():
xy = (df.min_t[idx],df.min_f[idx])
width = df.max_t[idx] - df.min_t[idx]
height = df.max_f[idx] - df.min_f[idx]
rect = patches.Rectangle(xy, width, height, lw=1,
edgecolor='r', facecolor='none')
ax2.add_patch(rect)
plt.show()
return df
```
#### File: jflatorreg/scikit-maad/setup.py
```python
import os
import textwrap
from setuptools import setup, find_packages, Command
class CleanCommand(Command):
"""Custom clean command to tidy up the project root.
Deletes directories ./build, ./dist and ./*.egg-info
From the terminal type:
> python setup.py clean
"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.egg-info')
setup(
name = 'scikit-maad',
version = '0.1.5.1',
#packages = find_namespace_packages(include=['maad.*']),
packages = find_packages(),
author = '<NAME> and <NAME>',
author_email = '<EMAIL>, <EMAIL>',
maintainer = '<NAME> and <NAME>',
description = 'scikit-maad is a modular toolbox to analyze ecoacoustics datasets',
long_description = 'scikit-maad is a modular toolbox to analyze ecoacoustics datasets in Python 3. This package was designed to bring flexibility to find regions of interest, and to compute acoustic features in audio recordings. This workflow opens the possibility to use powerfull machine learning algorithms through scikit-learn, allowing to identify key patterns in all kind of soundscapes.',
license = 'BSD 3 Clause',
keywords = ['ecoacoustics', 'machine learning', 'ecology', 'wavelets', 'signal processing'],
url = 'https://github.com/scikit-maad/scikit-maad',
platform = 'OS Independent',
cmdclass={'clean': CleanCommand},
license_file = 'LICENSE',
install_requires = ['docutils>=0.3', 'numpy>=1.13', 'scipy>=0.18',
'scikit-image>=0.14', 'scikit-learn>=0.18',
'pandas>=0.23.4'],
classifiers=textwrap.dedent("""
Development Status :: 4 - Beta
Intended Audience :: Science/Research
License :: OSI Approved :: BSD License
Operating System :: OS Independent
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Topic :: Scientific/Engineering :: Artificial Intelligence
""").strip().splitlines()
)
```
|
{
"source": "jflavio1/BagOfVisualWordsExample",
"score": 2
}
|
#### File: jflavio1/BagOfVisualWordsExample/Main.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, Laboratorio 4"
__license__ = "Apache"
__version__ = "1.0"
__email__ = "<EMAIL>"
__status__ = "Production"
from BagOfVW import BagOfVW
def init():
bov = BagOfVW(4, "t2")
bov.trainModel("img/training")
bov.testModel("img/test")
if __name__ == '__main__':
init()
```
|
{
"source": "jflbr/TweetBot",
"score": 3
}
|
#### File: jflbr/TweetBot/TweetBot.py
```python
from tweepy.auth import OAuthHandler
from tweepy.streaming import StreamListener, Stream
from TweetBotConfig import TweetBotConfig
import tweepy, time
class AuthenticationError(Exception):
pass
class TweetBot(StreamListener):
''' TweetBot cls '''
def __init__(self,cfgFile="config",*args,**kwargs):
StreamListener.__init__(self,*args,**kwargs)
self.cfg = TweetBotConfig(cfgFile=kwargs.get('cfgFile',cfgFile))
self.api = None
self.auths = None
self.setupAuthentication()
def setupAuthentication(self):
self.auths = OAuthHandler(self.cfg.consumer_key, self.cfg.consumer_key_secret)
self.auths.set_access_token(self.cfg.access_token, self.cfg.access_token_secret)
self.api = tweepy.API(self.auths)
try:
print 'Verifying bot credentials..........',
self.api.verify_credentials()
print 'OK\n\n'
except Exception as e:
print 'FAILED'
print(e)
raise AuthenticationError()
def on_data(self, raw_data):
''' Implementation of StreamListener.on_data method '''
try:
screen_name = raw_data.lower().split('"screen_name":"')[1].split('","location"')[0].replace(",", "")
tweet_sid = raw_data.split('"id":')[1].split('"id_str":')[0].replace(",", "")
retweet_ed = raw_data.lower().split('"retweeted":')[1].split(',"possibly_sensitive"')[0].replace(",", "")
tweet_text = raw_data.lower().split('"text":"')[1].split('","source":"')[0].replace(",", "")
# Exit if the bot is the tweet owner
if screen_name.lower() == self.api.me().screen_name.lower():
return
print '+ Tweet from {} :\n\t{}\n\n'.format(screen_name,tweet_text)
if not any(a_acc.lower() == screen_name.lower() for a_acc in self.cfg.whitelist_accounts):
if not any(acc.lower() == screen_name.lower() for acc in self.cfg.banned_accounts):
if not any(a_wrds.lower() in screen_name.lower() for a_wrds in self.cfg.whitelist_words):
if not any(word.lower() in tweet_text.lower() for word in self.cfg.banned_words):
if("false" in retweet_ed):
# Retweet if allowed
if self.cfg.strategy['retweet']:
self.retweet(tweet_sid)
time.sleep(2)
# Fav if allowed
if self.cfg.strategy['fav']:
self.fav(tweet_sid)
# Follow if allowed
#if self.cfg.strategy['follow']:
# if api.exists_friendship(api.me().id, screen_name.lower()):
# print 'I already follow @{}'.format(screen_name.lower())
# else:
# print 'Trying to follow @{}'.format(screen_name.lower())
# self.followUsers([screen_name.lower()])
# if screen_name.lower() != self.api.me().screen_name.lower():
# print 'Trying to follow @{}'.format(screen_name.lower())
# self.followUsers([screen_name.lower()])
else:
pass
else:
print 'Banned word in {}\nSkipping...'.format(tweet_text)
else:
pass
else:
print 'Banned account @{}\nSkipping...'.format(screen_name)
return True
except Exception as e:
print(str(e))
def on_error(self, status_code):
''' Implementation of StreamListener.on_error method '''
try:
print( "error " + status_code)
except Exception as e:
print(" ++ wuuuut? ++ " + str(e))
def retweet(self,tweet_sid):
try:
self.api.retweet(tweet_sid)
except Exception as e:
if e.code == 327: pass
else: print( "\terror {} - {}\n".format(e['code'],e['message']))
print( "\terror {} - {}\n".format(e['code'],e['message']))
def fav(self,tweet_sid):
try:
self.api.create_favorite(tweet_sid)
except Exception as e:
print(str(e))
def tweetPost(self,tweet_text):
try:
self.api.update_status(status=tweet_text)
except Exception as e:
print(str(e))
def getUserFollowers(self,user):
pass
def followUsers(self,users):
for user in users:
self.api.create_friendship(user)
print "Followed {}".format(user)
time.sleep(2)
def unfollowUsers(self,users):
for user in users:
self.api.destroy_friendship(user)
print "Unfollowed {}".format(user)
def unfollowFollowers(self):
users = self.api.friends_ids()
self.unfollowUsers(users)
def main():
bot = TweetBot(cfgFile="config-dev")
try:
twt = Stream(bot.auths, bot)
twt.filter(track=bot.cfg.track_words) # OR (follow = bot.cfg.follow_accounts)
except Exception as e:
print(str(e))
pass
if __name__ == '__main__':
main()
```
|
{
"source": "jfleisher/z3",
"score": 3
}
|
#### File: examples/python/mini_ic3.py
```python
from z3 import *
import heapq
# Simplistic (and fragile) converter from
# a class of Horn clauses corresponding to
# a transition system into a transition system
# representation as <init, trans, goal>
# It assumes it is given three Horn clauses
# of the form:
# init(x) => Invariant(x)
# Invariant(x) and trans(x,x') => Invariant(x')
# Invariant(x) and goal(x) => Goal(x)
# where Invariant and Goal are uninterpreted predicates
class Horn2Transitions:
def __init__(self):
self.trans = True
self.init = True
self.inputs = []
self.goal = True
self.index = 0
def parse(self, file):
fp = Fixedpoint()
goals = fp.parse_file(file)
for r in fp.get_rules():
if not is_quantifier(r):
continue
b = r.body()
if not is_implies(b):
continue
f = b.arg(0)
g = b.arg(1)
if self.is_goal(f, g):
continue
if self.is_transition(f, g):
continue
if self.is_init(f, g):
continue
def is_pred(self, p, name):
return is_app(p) and p.decl().name() == name
def is_goal(self, body, head):
if not self.is_pred(head, "Goal"):
return False
pred, inv = self.is_body(body)
if pred is None:
return False
self.goal = self.subst_vars("x", inv, pred)
self.goal = self.subst_vars("i", self.goal, self.goal)
self.inputs += self.vars
self.inputs = list(set(self.inputs))
return True
def is_body(self, body):
if not is_and(body):
return None, None
fmls = [f for f in body.children() if self.is_inv(f) is None]
inv = None
for f in body.children():
if self.is_inv(f) is not None:
inv = f;
break
return And(fmls), inv
def is_inv(self, f):
if self.is_pred(f, "Invariant"):
return f
return None
def is_transition(self, body, head):
pred, inv0 = self.is_body(body)
if pred is None:
return False
inv1 = self.is_inv(head)
if inv1 is None:
return False
pred = self.subst_vars("x", inv0, pred)
self.xs = self.vars
pred = self.subst_vars("xn", inv1, pred)
self.xns = self.vars
pred = self.subst_vars("i", pred, pred)
self.inputs += self.vars
self.inputs = list(set(self.inputs))
self.trans = pred
return True
def is_init(self, body, head):
for f in body.children():
if self.is_inv(f) is not None:
return False
inv = self.is_inv(head)
if inv is None:
return False
self.init = self.subst_vars("x", inv, body)
return True
def subst_vars(self, prefix, inv, fml):
subst = self.mk_subst(prefix, inv)
self.vars = [ v for (k,v) in subst ]
return substitute(fml, subst)
def mk_subst(self, prefix, inv):
self.index = 0
if self.is_inv(inv) is not None:
return [(f, self.mk_bool(prefix)) for f in inv.children()]
else:
vars = self.get_vars(inv)
return [(f, self.mk_bool(prefix)) for f in vars]
def mk_bool(self, prefix):
self.index += 1
return Bool("%s%d" % (prefix, self.index))
def get_vars(self, f, rs=[]):
if is_var(f):
return z3util.vset(rs + [f], str)
else:
for f_ in f.children():
rs = self.get_vars(f_, rs)
return z3util.vset(rs, str)
# Produce a finite domain solver.
# The theory QF_FD covers bit-vector formulas
# and pseudo-Boolean constraints.
# By default cardinality and pseudo-Boolean
# constraints are converted to clauses. To override
# this default for cardinality constraints
# we set sat.cardinality.solver to True
def fd_solver():
s = SolverFor("QF_FD")
s.set("sat.cardinality.solver", True)
return s
# negate, avoid double negation
def negate(f):
if is_not(f):
return f.arg(0)
else:
return Not(f)
def cube2clause(cube):
return Or([negate(f) for f in cube])
class State:
def __init__(self, s):
self.R = set([])
self.solver = s
def add(self, clause):
if clause not in self.R:
self.R |= { clause }
self.solver.add(clause)
class Goal:
def __init__(self, cube, parent, level):
self.level = level
self.cube = cube
self.parent = parent
def __lt__(self, other):
return self.level < other.level
def is_seq(f):
return isinstance(f, list) or isinstance(f, tuple) or isinstance(f, AstVector)
# Check if the initial state is bad
def check_disjoint(a, b):
s = fd_solver()
s.add(a)
s.add(b)
return unsat == s.check()
# Remove clauses that are subsumed
def prune(R):
removed = set([])
s = fd_solver()
for f1 in R:
s.push()
for f2 in R:
if f2 not in removed:
s.add(Not(f2) if f1.eq(f2) else f2)
if s.check() == unsat:
removed |= { f1 }
s.pop()
return R - removed
class MiniIC3:
def __init__(self, init, trans, goal, x0, inputs, xn):
self.x0 = x0
self.inputs = inputs
self.xn = xn
self.init = init
self.bad = goal
self.trans = trans
self.min_cube_solver = fd_solver()
self.min_cube_solver.add(Not(trans))
self.goals = []
s = State(fd_solver())
s.add(init)
s.solver.add(trans)
self.states = [s]
self.s_bad = fd_solver()
self.s_good = fd_solver()
self.s_bad.add(self.bad)
self.s_good.add(Not(self.bad))
def next(self, f):
if is_seq(f):
return [self.next(f1) for f1 in f]
return substitute(f, [p for p in zip(self.x0, self.xn)])
def prev(self, f):
if is_seq(f):
return [self.prev(f1) for f1 in f]
return substitute(f, [p for p in zip(self.xn, self.x0)])
def add_solver(self):
s = fd_solver()
s.add(self.trans)
self.states += [State(s)]
def R(self, i):
return And(self.states[i].R)
# Check if there are two states next to each other that have the same clauses.
def is_valid(self):
i = 1
while i + 1 < len(self.states):
if not (self.states[i].R - self.states[i+1].R):
return And(prune(self.states[i].R))
i += 1
return None
def value2literal(self, m, x):
value = m.eval(x)
if is_true(value):
return x
if is_false(value):
return Not(x)
return None
def values2literals(self, m, xs):
p = [self.value2literal(m, x) for x in xs]
return [x for x in p if x is not None]
def project0(self, m):
return self.values2literals(m, self.x0)
def projectI(self, m):
return self.values2literals(m, self.inputs)
def projectN(self, m):
return self.values2literals(m, self.xn)
# Determine if there is a cube for the current state
# that is potentially reachable.
def unfold(self):
core = []
self.s_bad.push()
R = self.R(len(self.states)-1)
self.s_bad.add(R)
is_sat = self.s_bad.check()
if is_sat == sat:
m = self.s_bad.model()
cube = self.project0(m)
props = cube + self.projectI(m)
self.s_good.push()
self.s_good.add(R)
is_sat2 = self.s_good.check(props)
assert is_sat2 == unsat
core = self.s_good.unsat_core()
core = [c for c in core if c in set(cube)]
self.s_good.pop()
self.s_bad.pop()
return is_sat, core
# Block a cube by asserting the clause corresponding to its negation
def block_cube(self, i, cube):
self.assert_clause(i, cube2clause(cube))
# Add a clause to levels 0 until i
def assert_clause(self, i, clause):
for j in range(i + 1):
self.states[j].add(clause)
# minimize cube that is core of Dual solver.
# this assumes that props & cube => Trans
def minimize_cube(self, cube, inputs, lits):
is_sat = self.min_cube_solver.check(lits + [c for c in cube] + [i for i in inputs])
assert is_sat == unsat
core = self.min_cube_solver.unsat_core()
assert core
return [c for c in core if c in set(cube)]
# push a goal on a heap
def push_heap(self, goal):
heapq.heappush(self.goals, (goal.level, goal))
# A state s0 and level f0 such that
# not(s0) is f0-1 inductive
def ic3_blocked(self, s0, f0):
self.push_heap(Goal(self.next(s0), None, f0))
while self.goals:
f, g = heapq.heappop(self.goals)
sys.stdout.write("%d." % f)
sys.stdout.flush()
# Not(g.cube) is f-1 invariant
if f == 0:
print("")
return g
cube, f, is_sat = self.is_inductive(f, g.cube)
if is_sat == unsat:
self.block_cube(f, self.prev(cube))
if f < f0:
self.push_heap(Goal(g.cube, g.parent, f + 1))
elif is_sat == sat:
self.push_heap(Goal(cube, g, f - 1))
self.push_heap(g)
else:
return is_sat
print("")
return None
# Rudimentary generalization:
# If the cube is already unsat with respect to transition relation
# extract a core (not necessarily minimal)
# otherwise, just return the cube.
def generalize(self, cube, f):
s = self.states[f - 1].solver
if unsat == s.check(cube):
core = s.unsat_core()
if not check_disjoint(self.init, self.prev(And(core))):
return core, f
return cube, f
# Check if the negation of cube is inductive at level f
def is_inductive(self, f, cube):
s = self.states[f - 1].solver
s.push()
s.add(self.prev(Not(And(cube))))
is_sat = s.check(cube)
if is_sat == sat:
m = s.model()
s.pop()
if is_sat == sat:
cube = self.next(self.minimize_cube(self.project0(m), self.projectI(m), self.projectN(m)))
elif is_sat == unsat:
cube, f = self.generalize(cube, f)
return cube, f, is_sat
def run(self):
if not check_disjoint(self.init, self.bad):
return "goal is reached in initial state"
level = 0
while True:
inv = self.is_valid()
if inv is not None:
return inv
is_sat, cube = self.unfold()
if is_sat == unsat:
level += 1
print("Unfold %d" % level)
sys.stdout.flush()
self.add_solver()
elif is_sat == sat:
cex = self.ic3_blocked(cube, level)
if cex is not None:
return cex
else:
return is_sat
def test(file):
h2t = Horn2Transitions()
h2t.parse(file)
mp = MiniIC3(h2t.init, h2t.trans, h2t.goal, h2t.xs, h2t.inputs, h2t.xns)
result = mp.run()
if isinstance(result, Goal):
g = result
print("Trace")
while g:
print(g.level, g.cube)
g = g.parent
return
if isinstance(result, ExprRef):
print("Invariant:\n%s " % result)
return
print(result)
test("data/horn1.smt2")
test("data/horn2.smt2")
test("data/horn3.smt2")
test("data/horn4.smt2")
test("data/horn5.smt2")
# test("data/horn6.smt2") # takes long time to finish
```
|
{
"source": "JFletcher94/tBot",
"score": 2
}
|
#### File: JFletcher94/tBot/exampleb.py
```python
def get_string():
'''generate full tweet text'''
return 'example #text'
```
|
{
"source": "JFletcher94/tweet-doist",
"score": 3
}
|
#### File: JFletcher94/tweet-doist/setup.py
```python
try:
from tkinter import *
except:
from Tkinter import *
import pygubu
class main_GUI:
"""GUI for one-time setup."""
def __init__(self, master):
"""Create GUI object using pygubu."""
self.master = master
self.builder = builder = pygubu.Builder()
builder.add_from_file('setup.ui')
self.top = builder.get_object('top', master)
builder.connect_callbacks(self)
self.top.protocol('WM_DELETE_WINDOW', self.on_close)
root.withdraw()
def go(self):
"""Write consumer key and consumer secret to file."""
f = open('setup.txt', 'w')
f.write(self.builder.get_object('k_entry').get())
f.write('\n')
f.write(self.builder.get_object('s_entry').get())
f.close()
self.master.destroy()
def on_close(self):
"""Exit program when user closes GUI window."""
self.master.destroy()
if __name__ == '__main__':
root = Tk()
gui = main_GUI(root)
root.mainloop()
```
|
{
"source": "Jflick58/Alexa-Test-Harness",
"score": 2
}
|
#### File: Alexa-Test-Harness/src/alexa_tests.py
```python
import requests
import json
import pytest
class Alexa_Test(object):
def __init__(self,endpoint, userid, applicationId, apitoken, locale="en-US", path_to_intent_json="src/sample.json"):
self.endpoint = endpoint
self.userid = userid
self.locale = locale
self.applicationId = applicationId
self.apitoken = str(apitoken)
self.path_to_intent_json = path_to_intent_json
def test_intent_response(self,intent, expected_response, session_context=""):
headers = {
'Content-Type': 'application/json',
}
request = {
"session": {
"new": True,
"sessionId": "SessionId.2604160e-34c2-4d27-961e-10e20c7a70c9",
"application": {
"applicationId": self.applicationId
},
"attributes": session_context,
"user": {
"userId": self.userid
}
},
"request": {
"type": "IntentRequest",
"requestId": "EdwRequestId.e6576f06-34d0-4148-9c47-b08356b8a2ee",
"intent": {
"name": intent,
"slots": {}
},
"locale": self.locale,
"timestamp": "2018-03-22T04:49:07Z"
},
"context": {
"AudioPlayer": {
"playerActivity": "IDLE"
},
"System": {
"application": {
"applicationId": self.applicationId
},
"user": {
"userId": self.userid
},
"device": {
"supportedInterfaces": {}
}
}
},
"version": "1.0"
}
endpoint_call = requests.post(endpoint,headers=headers, data=json.dumps(request))
print(endpoint_call.text)
response = str(((endpoint_call['response'])['outputSpeech'])['text'])
assert expected_response in response
def parse_intents(self):
voice_json = self.path_to_intent_json
intents = []
interaction_model = voice_json['interactionModel']
language_model = interaction_model['LanguageModel']
intent_dicts = language_model['intents']
for intent in intent_dicts:
intent_name = intent["name"]
intents.append(intent_name)
return intents
def test_utterance_intent_match(self, utterance, expected_intent):
request = {
"input": {
"content": utterance
},
"device": {
"locale": self.locale
}
}
url = "https://api.amazonalexa.com/v1/skills/{}/simulations".format(self.applicationId)
headers = {
"Authorization": self.apitoken
"Content-Type": "application/json"
"Accept": "application/json"
}
endpoint_call = requests.post(url,headers=headers, data=json.dumps(request))
if endpoint_call.code == "200":
# id = endpoint_call.json["id"]
# results_url = url.replace("/simulations", "/simulations/{}".format(id))
# enpoint_results = requests.get(results_url,headers=headers)
result = endpoint_call.json["result"]
skill_execution = result["skillExecutionInfo"]
invocation_request = skill_execution["invocationRequest"]
request = invocation_request["request"]
intent_dict = request["intent"]
returned_intent = intent_dict["name"]
assert returned_intent == expected_intent
else:
return("error") #add logging and proper handling later
```
|
{
"source": "Jflick58/extemp-assist",
"score": 3
}
|
#### File: rss_scraper/rss_summary_parser/summarize.py
```python
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
from string import punctuation
from collections import Counter
from heapq import nlargest
import time
import logging
nlp = spacy.load("en_core_web_sm")
def word_frequency(text:str):
doc = nlp(text)
keyword = []
stopwords = list(STOP_WORDS)
pos_tag = ['PROPN', 'ADJ', 'NOUN', 'VERB']
for token in doc:
if(token.text in stopwords or token.text in punctuation):
continue
if(token.pos_ in pos_tag):
keyword.append(token.text)
word_freq = Counter(keyword)
return word_freq
def summarize(text:str, number_of_sentences:int=5):
text = text.replace("\n","")
doc = nlp(text)
word_freq = word_frequency(text)
sent_strength={}
for sent in doc.sents:
for word in sent:
if word.text in word_freq.keys():
if sent in sent_strength.keys():
sent_strength[sent]+=word_freq[word.text]
else:
sent_strength[sent]=word_freq[word.text]
summarized_sentences = nlargest(number_of_sentences, sent_strength, key=sent_strength.get)
final_sentences = [w.text for w in summarized_sentences]
summary = ' '.join(final_sentences)
return summary
```
|
{
"source": "Jflick58/TeachPythonWithMinecraft",
"score": 2
}
|
#### File: TeachPythonWithMinecraft/Flask_Starter_Code/minecraft_controller.py
```python
from flask import Flask, render_template
#from mcpi.minecraft import Minecraft
app = Flask(__name__)
@app.route('/', methods=['GET'])
def home():
return render_template('index.html')
@app.route('/tree/', methods=['POST, GET'])
def tree():
return render_template('index.html')
#add house
#add moat
#add custom
if __name__ == '__main__':
app.run(debug=False, port=5000)
```
|
{
"source": "Jflick58/Twitter-Content-Analysis",
"score": 3
}
|
#### File: Twitter-Content-Analysis/easysentiment/scraper_and_analysis.py
```python
from datetime import datetime
from json import dump
# from os.path import isfile # imported but unused
import collections
import csv # library for reading/writing CSV
import easygui as g # library for GUI
import json # library for manipulating JSON files
# import logging # imported but unused
import sys
import webbrowser
from textblob import TextBlob # library for sentiment analysis
from twitterscraper import query_tweets # library for scraping
# from twitterscraper.query import query_all_tweets # imported but unused
# Initialize JSON encoder
class JSONEncoder(json.JSONEncoder):
"""custom json encoder."""
def default(self, obj):
"""default method."""
if hasattr(obj, '__json__'):
return obj.__json__()
elif isinstance(obj, collections.Iterable):
return list(obj)
elif isinstance(obj, datetime):
return obj.isoformat()
elif hasattr(obj, '__getitem__') and hasattr(obj, 'keys'):
return dict(obj)
elif hasattr(obj, '__dict__'):
return {member: getattr(obj, member)
for member in dir(obj)
if not member.startswith('_') and
not hasattr(getattr(obj, member), '__call__')}
return json.JSONEncoder.default(self, obj)
def scrape_and_analyze():
"""scrape and analyze."""
# Opens a GUI on start
version = 'Easysentiment 1.2'
options = ['Start', 'Developer Page', 'Exit']
button = g.buttonbox(
'Welcome to Easysentiment Twitter Scraper and Seniment Analyzer Version 1.2' +
'\n' + '\n' + '\n' + '\n' + '\n' +
'Created by <NAME>, Copyright 2017 Licensed Under MIT License',
title=version, choices=options
)
if button == options[0]:
pass
if button == options[1]:
webbrowser.open('https://github.com/Jflick58', new=0, autoraise=True)
if button == options[2]:
sys.exit()
msg = "Enter your query information. Output will be in the form of a .csv file"
title = version
fieldNames = [ # NOQA
"Search term (do not include the '#' mark, just the the hashtag text)", "From Account",
"Starting Date (YYYY-MM-DD)", "Ending Date (YYYY-MM-DD)", "Number of Tweets",
"Output File Name"
]
fieldValues = [] # we start with blanks for the values # NOQA
fieldValues = g.multenterbox(msg, title, fieldNames) # NOQA
query = fieldValues[0]
account = fieldValues[1]
starting_date = fieldValues[2]
ending_date = fieldValues[3]
limit = int(fieldValues[4])
output2 = fieldValues[5]
# Scrape Twitter
tweets = query_tweets(
query + '%20from%3A' + account +
'%20since%3A' + starting_date + 'until%3A' + ending_date, limit
)
with open(output2 + '.json', "w") as output:
dump(tweets, output, cls=JSONEncoder)
print(tweets)
print(" ")
# converts json to python objects and prepares it to be written to a csv
# reads in the JSON file into Python as a string
data_json = open(output2 + '.json', mode='r').read()
# turns the string into a json Python object
data_python = json.loads(data_json)
csv_out = open(output2 + '.csv', mode='w') # opens csv file
writer = csv.writer(csv_out) # create the csv writer object
fields = ['text', 'timestamp', 'polarity', 'subjectivity', 'sentiment'] # field names
writer.writerow(fields) # writes field
for line in data_python:
# performs the sentiment analysis and classifies it
print(line.get('text').encode('unicode_escape'))
analysis = TextBlob(line.get('text'))
def get_label(analysis, threshold=0):
if analysis.sentiment[0] > threshold:
return 'Positive'
elif analysis.sentiment[0] < threshold:
return 'Negative'
else:
return 'Neutral'
print(analysis.sentiment, get_label(analysis)) # print the results
print(" ")
# writes a row and gets the fields from the json object and the sentiment analysis
writer.writerow([
line.get('text').encode('unicode_escape'), # unicode escape to fix emoji issue
line.get('timestamp'),
analysis.sentiment.polarity,
analysis.sentiment.subjectivity,
get_label(analysis)
])
else:
csv_out.close() # saves the file and closes it
print('Thank you for using this program')
sys.exit('goodbye') # end program
```
|
{
"source": "Jflinchum/pow-generator",
"score": 3
}
|
#### File: Jflinchum/pow-generator/findSentence.py
```python
import sys
import os
from random import shuffle
import re
def sentenceGrab(subject, directory, firstOccurence):
bookshelf = os.listdir(directory)
sentences = []
for file in bookshelf:
if (file.endswith(".txt")):
with open(os.path.join(directory, file), "r") as f:
text = f.read()
re.sub(r'(M\w{1,2})\.', r'\1', text) # Get rid of Mr./Mrs.
sentenceList = re.split(r' *[\.\?!][\'"\)\]]* *', text) # Split into sentences
for sentence in sentenceList:
wordList = sentence.split()
for word in wordList:
if subject.lower() == word.strip().lower():
sentences.append(sentence.strip())
if firstOccurence:
return sentences
return sentences
def main():
subject = sys.argv[1]
bookDirectory = sys.argv[2]
sentences = sentenceGrab(subject, bookDirectory)
print(sentences)
if __name__ == '__main__':
main()
```
#### File: Jflinchum/pow-generator/powGenerator.py
```python
from findSentence import sentenceGrab
from phoneticWords import findPhonetics
from phoneticIndex import findPhoneticIndex
from random import randint
from math import floor
import sys
def main():
library = sys.argv[1]
subject = sys.argv[2]
dictionary = "/usr/share/dict/words"
phonetics = findPhonetics(subject, dictionary)
if len(phonetics) == 0:
print("Could not find any phonetic words.")
return
nearPhoneticNum = floor((phonetics[0][1] + phonetics[len(phonetics)-1][1]) / 2)
phonetics = [i for i in phonetics if i[1] <= nearPhoneticNum]
sentences = []
tries = 10
index = 0
while len(sentences) == 0 and index <= tries:
if len(phonetics) == 0:
print("No more phonetic words. Ending")
return
index += 1
punWord = phonetics[randint(0, floor(len(phonetics)/2))][0]
print(punWord)
sentences = sentenceGrab(punWord, library, True)
if len(sentences) == 0:
phonetics = [i for i in phonetics if i[0] != punWord]
print("Could not find sentence... Trying again")
if index >= tries:
print("Reached maximum tries. Ending")
return
punSentence = sentences[randint(0, len(sentences) - 1)]
sentenceIndex = punSentence.find(punWord)
punIndex = findPhoneticIndex(subject, punWord)
punSentence = punSentence[0:sentenceIndex + punIndex] + subject + punSentence[sentenceIndex + punIndex + len(subject):len(punSentence)]
print(punSentence)
if __name__ == "__main__":
main()
```
#### File: Jflinchum/pow-generator/similarWords.py
```python
import sys
from levenshtein import levenshtein
def main():
subject = sys.argv[1]
dictionary = open("/usr/share/dict/web2")
levenNumber = 2
similar = []
for line in dictionary:
line = line.split("\n")[0]
if subject != line and subject[0] == line[0] and levenshtein(subject, line) <= levenNumber:
similar.append(line)
print(similar)
if __name__ == '__main__':
main()
```
|
{
"source": "jfloff/dblp-orcid",
"score": 2
}
|
#### File: jfloff/dblp-orcid/parse.py
```python
import os
import sys
import gzip
import tempfile
import shutil
import urllib.request
from tqdm import tqdm
from lxml import etree
import pandas as pd
import datetime
import argparse
import copy
##########################
# COMMAND LINE
#
parser = argparse.ArgumentParser()
# group for either out or csv
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--out", action='store_true', default=True, help="Outputs csv to stdout. Useful for redirecting output.", required=False)
group.add_argument("--csv", action='store_true', default=False, help="Saves output to csv", required=False)
# group for either orcid or alias
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--orcid", action='store_true', default=False, help="We gather by orcid, and list all alias for that orcid", required=False)
group.add_argument("--alias", action='store_true', default=False, help="We gather by alias, and list all orcids for that alias", required=False)
# option for no download
parser.add_argument("--no-download", action='store_true', default=False, help="Does not download DBLP XML files", required=False)
args = vars(parser.parse_args())
##########################
# CONSTANTS
#
DBLP_XML_URL = 'http://dblp.org/xml/dblp.xml.gz'
DBLP_XML_FILENAME = 'dblp.xml.gz'
DBLP_DTD_URL = 'http://dblp.org/xml/dblp.dtd'
DBLP_DTD_FILENAME = 'dblp.dtd'
ORCID_OUTPUT_CSV_FILENAME = 'by_orcid.csv'
ALIAS_OUTPUT_CSV_FILENAME = 'by_alias.csv'
def progress_bar_hook(t):
"""Wraps tqdm instance."""
last_b = [0]
def update_to(b=1, bsize=1, tsize=None):
"""
b : int, optional
Number of blocks transferred so far [default: 1].
bsize : int, optional
Size of each block (in tqdm units) [default: 1].
tsize : int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return update_to
# download files
if not args['no_download']:
print("Downloading DBLP XML files...", file=sys.stderr)
with tqdm(unit='B', unit_scale=True, unit_divisor=1024, miniters=1, desc=DBLP_XML_FILENAME) as t:
urllib.request.urlretrieve(DBLP_XML_URL, filename=DBLP_XML_FILENAME,reporthook=progress_bar_hook(t), data=None)
with tqdm(unit='B', unit_scale=True, unit_divisor=1024, miniters=1, desc=DBLP_DTD_FILENAME) as t:
urllib.request.urlretrieve(DBLP_DTD_URL, filename=DBLP_DTD_FILENAME,reporthook=progress_bar_hook(t), data=None)
##########################
# PARSING DATA STRUCTURES
#
# ALIAS -> dict(all_alias, dblp_key, affiliation, orcid, google_scholar_id, scopus_id, acm_id)
alias_info = {}
# ORCID -> set(ALIAS)
orcid_alias = {}
# ALIAS -> set(ORCID)
alias_orcid = {}
# adds pair of entries to bimap
def add_bimap(alias, orcid):
if alias not in alias_orcid:
alias_orcid[alias] = set()
if orcid not in orcid_alias:
orcid_alias[orcid] = set()
alias_orcid[alias].add(orcid)
orcid_alias[orcid].add(alias)
##########################
# PARSING METHODS
#
# processes author tags
def process_author(element):
if 'orcid' in element.attrib:
alias = element.text
orcid = 'https://orcid.org/' + element.attrib['orcid']
# inits bi-directional entries
add_bimap(alias, orcid)
# processes www tags
def process_www(element):
if 'key' in element.attrib and element.attrib['key'].startswith("homepages"):
all_alias = set([a.strip() for a in element.xpath('author/text()')])
info = {
'dblp_key': set([element.attrib['key']]),
'affiliation': element.findtext("note[@type='affiliation']"),
'orcid': None,
'researcher_id': None,
'google_scholar_id': None,
'scopus_id': None,
'acm_id': None,
'homepage': None,
}
# finds info based on author urls
for url in element.xpath("url/text()"):
# some cleanup
url = url.strip().strip("/")
# tries to find several standard information
if 'orcid.org/' in url:
info['orcid'] = 'https://orcid.org/' + url.rpartition('/')[-1]
elif 'researcherid' in url:
info['researcher_id'] = 'http://www.researcherid.com/rid/' + url.rpartition('/')[-1]
elif 'scholar.google' in url:
info['google_scholar_id'] = 'https://scholar.google.com/citations?user=' + url.rpartition("user=")[-1]
elif 'scopus' in url:
info['scopus_id'] = 'https://www.scopus.com/authid/detail.uri?authorId=' + url.rpartition("authorId=")[-1]
elif 'dl.acm.org/author_page' in url:
info['acm_id'] = 'https://dl.acm.org/author_page.cfm?id=' + url.rpartition("id=")[-1]
# other not very relevant urls that we skip
elif 'wikidata' in url: continue
elif 'genealogy.ams.org' in url: continue
elif 'researchgate' in url: continue
elif 'mendeley' in url: continue
elif 'github' in url: continue
elif 'twitter' in url: continue
elif 'wikipedia' in url: continue
elif 'isni' in url: continue
elif 'linkedin' in url: continue
# everything else we consider homepage, but we just consider 1
else: info['homepage'] = url
# save info on all alias
for alias in all_alias:
alias_info[alias] = info
# if exists saves on bimap and searches for more aliases
if info['orcid'] and (info['orcid'] is not None):
add_bimap(alias, info['orcid'])
# merges info by orcid
def info_by_orcid():
final = {}
for orcid, aliases in orcid_alias.items():
# merges all alias_info
info = {}
dblp_keys = set()
for alias in aliases:
dblp_keys.update(alias_info[alias]['dblp_key'])
if info:
# deep copy of the dict to avoid duplicates in the csv
info = copy.deepcopy(alias_info[alias])
else:
# from running we found out that we might have
# different alias info for the same orcid hence
# we merging dict infos
info.update({k:v for k,v in alias_info[alias].items() if v})
# adds info to all alias
for alias in aliases:
alias_info[alias] = info
# but we add all aliases to the final result
info['alias'] = sorted(aliases)
info['dblp_key'] = sorted(dblp_keys)
info['orcid'] = orcid
final[orcid] = info
return final
# merges info by orcid
def info_by_alias():
final = {}
for alias, orcids in alias_orcid.items():
# gets info from that alias only
# deep copy of the dict to avoid duplicates in the csv
info = copy.deepcopy(alias_info[alias])
# but store all orcids as a list
info['orcid'] = sorted(orcids)
info['dblp_key'] = next(iter(info['dblp_key']))
info['alias'] = alias
final[alias] = info
return final
def fast_iter(context, func, *args, **kwargs):
"""
http://lxml.de/parsing.html#modifying-the-tree
Based on Liza Daly's fast_iter
http://www.ibm.com/developerworks/xml/library/x-hiperfparse/
See also http://effbot.org/zone/element-iterparse.htm
"""
for event, elem in context:
func(elem, *args, **kwargs)
# It's safe to call clear() here because no descendants will be accessed
# filter for end-ns event due to DTD replacements
if event == 'end-ns':
elem.clear()
# Also eliminate now-empty references from the root node to elem
for ancestor in elem.xpath('ancestor-or-self::*'):
while ancestor.getprevious() is not None:
del ancestor.getparent()[0]
del context
counter = 0
def process_element(element):
global counter
globals()['process_'+element.tag](element)
counter += 1
if counter % 100000 == 0:
print(str(counter)+ " xml nodes processed.", file=sys.stderr)
# first parses all the authors with orcids
print("Started parsing...", file=sys.stderr)
context = etree.iterparse(gzip.GzipFile(DBLP_XML_FILENAME), events=('end','end-ns'), tag=('author','www'), load_dtd=True, dtd_validation=True)
fast_iter(context,process_element)
# merges info by orcid or alias
if args['orcid']:
final = info_by_orcid()
order = ['orcid','alias']
sort = 'orcid'
output_csv_filename = ORCID_OUTPUT_CSV_FILENAME
if args['alias']:
final = info_by_alias()
order = ['alias','orcid']
sort = 'alias'
output_csv_filename = ALIAS_OUTPUT_CSV_FILENAME
# export to csv
df = pd.DataFrame(list(final.values()))
order += ['dblp_key','affiliation','researcher_id','google_scholar_id','scopus_id','acm_id','homepage']
df = df.reindex(columns=order).sort_values(sort)
tmp_csv_fd = tempfile.NamedTemporaryFile(mode='w', delete=False)
tmp_csv_fd.write('# PARSED ON ' + datetime.datetime.today().strftime('%Y-%m-%d') + '\n')
df.to_csv(tmp_csv_fd, index=False, encoding='utf-8')
tmp_csv_fd.close()
print("Finished parsing!", file=sys.stderr)
# remove files
if not args['no_download']:
os.remove(DBLP_DTD_FILENAME)
os.remove(DBLP_XML_FILENAME)
print("Removed DBLP xml files.", file=sys.stderr)
# just prints message, file already saved
if args['csv']:
args['out'] = False
shutil.copy(tmp_csv_fd.name, output_csv_filename)
print("Parsed info saved to: " + output_csv_filename, file=sys.stderr)
# defaults "cat"s the file
if args['out']:
with open(tmp_csv_fd.name) as f:
for line in f: print(line, end="")
# delete file
os.remove(tmp_csv_fd.name)
```
|
{
"source": "jfloff/pywFM",
"score": 2
}
|
#### File: pywFM/pywFM/__init__.py
```python
import subprocess
import os
import tempfile
class FM:
""" Class that wraps `libFM` parameters. For more information read
[libFM manual](http://www.libfm.org/libfm-1.42.manual.pdf)
Parameters
----------
task : string, MANDATORY
regression: for regression
classification: for binary classification
num_iter: int, optional
Number of iterations
Defaults to 100
init_stdev : double, optional
Standard deviation for initialization of 2-way factors
Defaults to 0.1
k0 : bool, optional
Use bias.
Defaults to True
k1 : bool, optional
Use 1-way interactions.
Defaults to True
k2 : int, optional
Dimensionality of 2-way interactions.
Defaults to 8
learning_method: string, optional
sgd: parameter learning with SGD
sgda: parameter learning with adpative SGD
als: parameter learning with ALS
mcmc: parameter learning with MCMC
Defaults to 'mcmc'
learn_rate: double, optional
Learning rate for SGD
Defaults to 0.1
r0_regularization: int, optional
bias regularization for SGD and ALS
Defaults to 0
r1_regularization: int, optional
1-way regularization for SGD and ALS
Defaults to 0
r2_regularization: int, optional
2-way regularization for SGD and ALS
Defaults to 0
rlog: bool, optional
Enable/disable rlog output
Defaults to True.
verbose: bool, optional
How much infos to print
Defaults to False.
seed: int, optional
seed used to reproduce the results
Defaults to None.
silent: bool, optional
Completly silences all libFM output
Defaults to False.
temp_path: string, optional
Sets path for libFM temporary files. Usefull when dealing with large data.
Defaults to None (default NamedTemporaryFile behaviour)
"""
"""
### unsused libFM flags
cache_size: cache size for data storage (only applicable if data is in binary format), default=infty
datafile is text so we don't need this parameter
relation: BS - filenames for the relations, default=''
not dealing with BS extensions since they are only used for binary files
"""
def __init__(self,
task,
num_iter=100,
init_stdev=0.1,
k0=True,
k1=True,
k2=8,
learning_method='mcmc',
learn_rate=0.1,
r0_regularization=0,
r1_regularization=0,
r2_regularization=0,
rlog=True,
verbose=False,
seed=None,
silent=False,
temp_path=None):
# gets first letter of either regression or classification
self.__task = task[0]
self.__num_iter = num_iter
self.__init_stdev = init_stdev
self.__dim = "%d,%d,%d" % (int(k0), int(k1), k2)
self.__learning_method = learning_method
self.__learn_rate = learn_rate
self.__regularization = "%.5f,%.5f,%.5f" % (r0_regularization, r1_regularization, r2_regularization)
self.__rlog = rlog
self.__verbose = int(verbose)
self.__seed = int(seed) if seed else None
self.__silent = silent
self.__temp_path = temp_path
# gets libfm path
self.__libfm_path = os.path.join(os.environ.get('LIBFM_PATH'), "")
if self.__libfm_path is None:
raise OSError("`LIBFM_PATH` is not set. Please install libFM and set the path variable "
"(https://github.com/jfloff/pywFM#installing).")
# #ShameShame
# Once upon a time, there was a bug in libFM that allowed any type of
# learning_method to save the model. I @jfloff built this package at
# that time, and did not find anything that showed me that MCMC couldn't
# use save_model flag. Nowadays only SGD and ALS can use this parameter.
# Hence, we need to reset the repo to this specific commit pre-fix, so
# we can use MCMC with save_model flag.
# Can we contribute to main libFM repo so this is possible again??
GITHASH = '91f8504a15120ef6815d6e10cc7dee42eebaab0f'
c_githash = subprocess.check_output(['git', '--git-dir', os.path.join(self.__libfm_path, "..", ".git"), 'rev-parse', 'HEAD']).strip()
if c_githash.decode("utf-8") != GITHASH:
raise OSError("libFM is not checked out to the correct commit."
"(https://github.com/jfloff/pywFM#installing).")
def run(self, x_train, y_train, x_test, y_test, x_validation_set=None, y_validation_set=None, meta=None):
"""Run factorization machine model against train and test data
Parameters
----------
x_train : {array-like, matrix}, shape = [n_train, n_features]
Training data
y_train : numpy array of shape [n_train]
Target values
x_test: {array-like, matrix}, shape = [n_test, n_features]
Testing data
y_test : numpy array of shape [n_test]
Testing target values
x_validation_set: optional, {array-like, matrix}, shape = [n_train, n_features]
Validation data (only for SGDA)
y_validation_set: optional, numpy array of shape [n_train]
Validation target data (only for SGDA)
meta: optional, numpy array of shape [n_features]
Grouping input variables
Return
-------
Returns `namedtuple` with the following properties:
predictions: array [n_samples of x_test]
Predicted target values per element in x_test.
global_bias: float
If k0 is True, returns the model's global bias w0
weights: array [n_features]
If k1 is True, returns the model's weights for each features Wj
pairwise_interactions: numpy matrix [n_features x k2]
Matrix with pairwise interactions Vj,f
rlog: pandas dataframe [nrow = num_iter]
`pandas` DataFrame with measurements about each iteration
"""
from sklearn.datasets import dump_svmlight_file
TMP_SUFFIX = '.pywfm'
train_fd = tempfile.NamedTemporaryFile(suffix=TMP_SUFFIX, dir=self.__temp_path)
test_fd = tempfile.NamedTemporaryFile(suffix=TMP_SUFFIX, dir=self.__temp_path)
out_fd = tempfile.NamedTemporaryFile(suffix=TMP_SUFFIX, dir=self.__temp_path)
model_fd = tempfile.NamedTemporaryFile(suffix=TMP_SUFFIX, dir=self.__temp_path)
# converts train and test data to libSVM format
dump_svmlight_file(x_train, y_train, train_fd)
train_fd.seek(0)
dump_svmlight_file(x_test, y_test, test_fd)
test_fd.seek(0)
# builds arguments array
args = [os.path.join(self.__libfm_path, "libFM"),
'-task', "%s" % self.__task,
'-train', "%s" % train_fd.name,
'-test', "%s" % test_fd.name,
'-dim', "'%s'" % self.__dim,
'-init_stdev', "%g" % self.__init_stdev,
'-iter', "%d" % self.__num_iter,
'-method', "%s" % self.__learning_method,
'-out', "%s" % out_fd.name,
'-verbosity', "%d" % self.__verbose,
'-save_model', "%s" % model_fd.name]
# appends rlog if true
rlog_fd = None
if self.__rlog:
rlog_fd = tempfile.NamedTemporaryFile(suffix=TMP_SUFFIX, dir=self.__temp_path)
args.extend(['-rlog', "%s" % rlog_fd.name])
# appends seed if given
if self.__seed:
args.extend(['-seed', "%d" % self.__seed])
# appends arguments that only work for certain learning methods
if self.__learning_method in ['sgd', 'sgda']:
args.extend(['-learn_rate', "%.5f" % self.__learn_rate])
if self.__learning_method in ['sgd', 'sgda', 'als']:
args.extend(['-regular', "'%s'" % self.__regularization])
# adds validation if sgda
# if validation_set is none, libFM will throw error hence, I'm not doing any validation
validation_fd = None
if self.__learning_method == 'sgda' and (x_validation_set is not None and y_validation_set is not None):
validation_fd = tempfile.NamedTemporaryFile(suffix=TMP_SUFFIX, dir=self.__temp_path)
dump_svmlight_file(x_validation_set, y_validation_set, validation_fd.name)
args.extend(['-validation', "%s" % validation_fd.name])
# if meta data is given
meta_fd = None
if meta is not None:
meta_fd = tempfile.NamedTemporaryFile(suffix=TMP_SUFFIX, dir=self.__temp_path, text=True)
# write group ids
for group_id in meta:
meta_fd.write("%s\n" % group_id)
args.extend(['-meta', "%s" % meta_fd.name])
meta_fd.seek(0)
# if silent redirects all output
stdout = None
if self.__silent:
stdout = open(os.devnull, 'wb')
# call libfm with parsed arguments
# had unkown bug with "-dim" option on array. At the time was forced to
# concatenate string `args = ' '.join(args)` but looks like its working
# needs further tests
subprocess.call(args, shell=False, stdout=stdout)
# reads output file
preds = [float(p) for p in out_fd.read().decode("utf-8").split('\n') if p]
# "hidden" feature that allows users to save the model
# We use this to get the feature weights
# https://github.com/srendle/libfm/commit/19db0d1e36490290dadb530a56a5ae314b68da5d
import numpy as np
global_bias = None
weights = []
pairwise_interactions = []
# if 0 its global bias; if 1, weights; if 2, pairwise interactions
out_iter = 0
for line in model_fd.read().decode("utf-8").splitlines():
# checks which line is starting with #
if line.startswith('#'):
if "#global bias W0" in line:
out_iter = 0
elif "#unary interactions Wj" in line:
out_iter = 1
elif "#pairwise interactions Vj,f" in line:
out_iter = 2
else:
# check context get in previous step and adds accordingly
if out_iter == 0:
global_bias = float(line)
elif out_iter == 1:
weights.append(float(line))
elif out_iter == 2:
try:
pairwise_interactions.append([float(x) for x in line.split(' ')])
except ValueError as e:
pairwise_interactions.append(0.0) #Case: no pairwise interactions used
pairwise_interactions = np.matrix(pairwise_interactions)
# parses rlog into dataframe
if self.__rlog:
# parses rlog into
import pandas as pd
rlog_fd.seek(0)
print(os.stat(rlog_fd.name).st_size)
rlog = pd.read_csv(rlog_fd.name, sep='\t')
rlog_fd.close()
else:
rlog = None
if self.__learning_method == 'sgda' and (x_validation_set is not None and y_validation_set is not None):
validation_fd.close()
if meta is not None:
meta_fd.close()
# removes temporary output file after using
train_fd.close()
test_fd.close()
model_fd.close()
out_fd.close()
# return as named collection for multiple output
import collections
fm = collections.namedtuple('model', ['predictions',
'global_bias',
'weights',
'pairwise_interactions',
'rlog'])
return fm(preds, global_bias, weights, pairwise_interactions, rlog)
```
|
{
"source": "JFlommersfeld/Actomyosin-contractions-in-soft-pillar-rings",
"score": 3
}
|
#### File: JFlommersfeld/Actomyosin-contractions-in-soft-pillar-rings/calculate_contraction_dynamics.py
```python
import sys
from scipy.integrate import solve_ivp
from models import FullModel, DensityModel
from work_and_power import *
def calculate_contraction_dynamics(model_type, parameter_file, t_max, pillar_stiffness):
""" Solves the contraction dynamics from time t=0 to t=t_max.
Parameters:
model_type (str):
sets the type of model, which determines the exact parameter set that is needed. Possible values for
the parameter model_type are: 'full model' or 'density model'.
parameter_file (str):
the path to the parameter file.
t_max (float):
the final timepoint in seconds until which the dynamics are solved.
pillar_stiffness (float)
the stiffness of the pillars in the pillar ring in pN/um.
Returns:
numpy.array(float): the calculated time points
numpy.array(float): the solution of the (set of) differential equation(s)
list(float): the tip velocities
list(float): the transmitted powers
list(float): the dissipated powers
float: the total transmitted work
float: the total dissipated work
"""
if model_type == 'full model':
model = FullModel(parameter_file, pillar_stiffness)
elif model_type == 'density model':
model = DensityModel(parameter_file, pillar_stiffness)
else:
print("ERROR: the parameter <model_type> has to be given one of the three values: "
"'full model' or 'density model'.")
sys.exit(1)
t_min = 0
# define initial conditions
f_init = 7
if model_type == 'full model':
k_on_fil = model.get_parameter('k_on_fil')
Nmax = model.get_parameter('Nmax')
ic = [f_init, k_on_fil / (model.k_off_fil(f_init) + k_on_fil) * Nmax]
else:
ic = [0.]
# integrate ODE
sol = solve_ivp(model.rhs, (t_min, t_max), ic, method="LSODA")
# calculate the dynamic quantities
if model_type == 'full model':
velocities = model.velocity(sol.t, sol.y[0], sol.y[1])
else:
velocities = model.velocity(sol.t, sol.y[0])
transmitted_powers = transmitted_power(sol.y[0]/pillar_stiffness, velocities, model)
dissipated_powers = dissipated_power(velocities, model)
# calculate the total transmitted and dissipated work
transmitted_work = work_elastic(sol.t, sol.y[0]/pillar_stiffness, velocities, model)
dissipated_work = work_diss(sol.t, velocities, model)
return sol.t, sol.y, velocities, transmitted_powers, dissipated_powers, transmitted_work, dissipated_work
```
#### File: JFlommersfeld/Actomyosin-contractions-in-soft-pillar-rings/models.py
```python
import numpy as np
from parameter_loader import load_parameters
from math import pi
class FullModel:
"""
A class that defines the full model for actomyosin contraction in soft pillar rings that accounts for both myosin
filament binding and density changes
Attributes:
parameter_file (str):
path to a file that contains all necessary parameters for the model (see provided examples).
Methods:
k_off_fil(total_force):
calculates the load dependent steady state off-rate of a myosin filament.
rhs(t, y):
calculates the right hand side of the set of differential equations that describe the model.
velocity(t, force, N):
calculates the deflection velocity of the tip of the pillar.
"""
def __init__(self, parameter_file, pillar_stiffness):
"""
Sets all the necessary parameters for the FullModel object.
Parameters:
parameter_file (str):
path to a file that contains all necessary parameters for the model (see provided examples).
pillar_stiffness (float):
stiffness of the pillars in the pillar ring in pN/um.
"""
self.x_catch, self.x_slip, self.k_off0_catch, self.k_off0_slip, self.k_on, self.k_on_fil, self.a_per_kBT, \
self.Nh, self.Nmax, self.h_eta, self.xi_rho_a2, self.rho_max_per_rho, \
self.R0 = load_parameters('full model', parameter_file)
self.k_p = pillar_stiffness
self.parameter_dict = {"x_catch": self.x_catch, "x_slip": self.x_slip, "k_off0_catch": self.k_off0_catch,
"k_off0_slip": self.k_off0_slip, "k_on": self.k_on, "k_on_fil": self.k_on_fil,
"a_per_kBT": self.a_per_kBT, "Nh": self.Nh, "Nmax": self.Nmax, "h_eta": self.h_eta,
"xi_rho_a2": self.xi_rho_a2, "rho_max_per_rho": self.rho_max_per_rho, "R0": self.R0,
"k_p": self.k_p}
self.A0 = pi * self.R0**2
self.tau = 6. / 5. * pi * self.h_eta / self.k_p
def __k_off(self, force):
"""Calculates the load dependent off-rate of an individual myosin head.
Parameters:
force (float):
the average load that is applied to an individual myosin head.
Returns:
float: the average off-rate of the head.
"""
return self.k_off0_catch * np.exp(-self.a_per_kBT * force * self.x_catch) + \
self.k_off0_slip * np.exp(self.a_per_kBT * force * self.x_slip)
def __calc_prob_dist(self, total_force):
"""Calculates the load dependent steady state probability distribution of the number of bound heads per
myosin filament
Parameters:
total_force (float):
the total load that is applied to the myosin filament.
Returns:
list(float): list of probabilities that n heads are bound per filament, where n is given by the list index.
"""
pns = []
for n in range(0, self.Nh + 1):
nom = 1
for i in range(0, n):
nom = nom * ((self.Nh - i) * self.k_on) / ((i + 1) * self.__k_off(total_force / (i + 1)))
denom = 1
for k in range(1, self.Nh + 1):
prod = 1
for j in range(0, k):
prod = prod * ((self.Nh - j) * self.k_on) / ((j + 1) * self.__k_off(total_force / (j + 1)))
denom = denom + prod
pns.append(nom / denom)
return pns
def k_off_fil(self, total_force):
"""Calculates the load dependent steady state off-rate of a myosin filament.
Parameters:
total_force (float):
the total load that is applied to the myosin filament.
Returns:
float: the off-rate of the filament.
"""
T_off_av = 0
pns = self.__calc_prob_dist(total_force)
for NB_init in range(1, self.Nh + 1):
T_off = 0
for NB in range(1, NB_init + 1):
s = 0
for j in range(NB, self.Nh + 1):
s = s + pns[j]
T_off = T_off + 1 / (NB * self.__k_off(total_force / NB) * pns[NB]) * s
T_off_av = T_off_av + pns[NB_init] * T_off
return 1 / T_off_av
def rhs(self, t, y):
"""Calculates the right hand side of the set of differential equations that describe the model.
Parameters:
t (float):
the time point.
y (list(float)):
a list with elements y[0] = force on the pillar at time t and y[1] = number of bound filaments at time t
Returns:
list(float): the temporal derivative of the input y
"""
force = y[0]
N = y[1]
area = pi * (self.R0 - force / self.k_p) ** 2
density_factor = -self.A0 / area * (self.A0 / area - self.rho_max_per_rho)
force_prime = -force / self.tau + self.xi_rho_a2 * N * density_factor / self.tau
N_prime = self.k_on_fil * (self.Nmax - N) - self.k_off_fil(force) * N
return [force_prime, N_prime]
def velocity(self, t, force, N):
"""Calculates the deflection velocity of the tip of the pillar.
Parameters:
t (float):
the time point.
force (float):
force on the pillar at time t
N:
number of bound filaments at time t
Returns:
float: the deflection velocity of the pillar tip at time t
"""
area = pi * (self.R0 - force / self.k_p) ** 2
density_factor = -self.A0 / area * (self.A0 / area - self.rho_max_per_rho)
return (-force / self.tau + self.xi_rho_a2 * N * density_factor / self.tau) / self.k_p
def get_parameter(self, parameter_name):
"""Get all model parameters
Parameters:
parameter_name (str):
parameter name.
Returns:
float/int: the value of the specified parameter.
"""
return self.parameter_dict[parameter_name]
class DensityModel:
"""
A class that defines the purley density dependent model for actomyosin contraction in soft pillar rings.
...
Attributes:
parameter_file (str):
path to a file that contains all necessary parameters for the model (see provided examples).
Methods:
k_off_fil(total_force):
calculates the load dependent steady state off-rate of a myosin filament.
rhs(t, y):
calculates the right hand side of the set of differential equations that describe the model.
velocity(t, force, N):
calculates the deflection velocity of the tip of the pillar.
"""
def __init__(self, parameter_file, pillar_stiffness):
"""
Sets all the necessary parameters for the DensityModel object.
Parameters:
parameter_file (str):
path to a file that contains all necessary parameters for the model (see provided examples).
pillar_stiffness (float):
stiffness of the pillars in the pillar ring in pN/um.
"""
self.h_eta, self.xi_N_rho_a2, self.rho_max_per_rho, self.R0 = load_parameters('density model', parameter_file)
self.k_p = pillar_stiffness
self.parameter_dict = {"h_eta": self.h_eta, "xi_N_rho_a2": self.xi_N_rho_a2,
"rho_max_per_rho": self.rho_max_per_rho, "R0": self.R0, "k_p": self.k_p}
self.A0 = pi * self.R0 ** 2
self.tau = 6. / 5. * pi * self.h_eta / self.k_p
def rhs(self, t, y):
"""Calculates the right hand side of the set of differential equations that describe the model.
Parameters:
t (float):
the time point.
y (list(float)):
a list with a single element y[0] = force on the pillar at time t
Returns:
list(float): the temporal derivative of the input y
"""
force = y[0]
area = pi * (self.R0 - force / self.k_p) ** 2
density_factor = -self.A0 / area * (self.A0 / area - self.rho_max_per_rho)
force_prime = -force/self.tau + self.xi_N_rho_a2 * density_factor / self.tau
return [force_prime]
def velocity(self, t, force):
"""Calculates the deflection velocity of the tip of the pillar.
Parameters:
t (float):
the time point.
force (float):
force on the pillar at time t
Returns:
float: the deflection velocity of the pillar tip at time t
"""
area = pi * (self.R0 - force / self.k_p) ** 2
density_factor = -self.A0 / area * (self.A0 / area - self.rho_max_per_rho)
return (-force/self.tau + self.xi_N_rho_a2 * density_factor / self.tau)/self.k_p
def get_parameter(self, parameter_name):
"""Get all model parameters
Parameters:
parameter_name (str):
parameter name.
Returns:
float/int: the value of the specified parameter.
"""
return self.parameter_dict[parameter_name]
```
#### File: JFlommersfeld/Actomyosin-contractions-in-soft-pillar-rings/plotting.py
```python
import matplotlib.pyplot as plt
def plot_tip_displacement_and_velocity(timepoints, displacements, velocities, ylim_bottom_velo=0, ylim_top_velo=10,
ylim_bottom_disp=0, ylim_top_disp=7):
"""
Plots the dynammics of the tip displacement and tip velocity
Parameters:
timepoints (list(float)):
list of the considered timepoints in seconds.
displacements (list(float)):
list of the calculated tip displacements in um.
velocities (list(float)):
list of the calculated tip velocities in um/s.
ylim_bottom_velo (float):
the lower limit of the y-axis for the tip velocity axis.
ylim_top_velo (float):
the upper limit of the y-axis for the tip velocity axis.
ylim_bottom_disp (float):
the lower limit of the y-axis for the tip displacment axis.
ylim_top_disp (float):
the upper limit of the y-axis for the tip displacment axis.
"""
plt.rcParams.update({'font.size': 15})
plt.clf()
fig, ax1 = plt.subplots()
color1 = 'k'
ax1.set_xlabel('Time (min)')
ax1.set_ylabel(r'Tip velocity ($10^{-2}\mu m /s$)', color=color1)
ax1.plot(timepoints/60, 100*velocities, color=color1, linewidth=2)
ax1.tick_params(axis='y', labelcolor=color1, color=color1)
ax1.set_ylim(bottom=ylim_bottom_velo, top=ylim_top_velo)
ax2 = ax1.twinx()
color2 = 'tab:red'
ax2.set_ylabel(r'Tip displacement ($\mu m$)', color=color2)
ax2.plot(timepoints/60, displacements, color=color2, linewidth=2)
ax2.tick_params(axis='y', labelcolor=color2, color=color2)
ax2.spines['left'].set_color(color1)
ax2.spines['right'].set_color(color2)
ax2.set_ylim(bottom=ylim_bottom_disp, top=ylim_top_disp)
fig.tight_layout()
plt.show()
def plot_transmitted_and_dissipated_power(timepoints, transmitted_powers, dissipated_powers, ylim_bottom=0.1,
ylim_top=1.e3):
"""
Plots the dynammics of the transmitted and dissipated power on a logarithmic y-axis.
Parameters:
timepoints (list(float)):
list of the considered timepoints in seconds.
transmitted_powers (list(float)):
list of the calculated transmitted powers in atto Watts.
dissipated_powers (list(float)):
list of the calculated dissipated powers atto Watts.
ylim_bottom (float):
the lower limit of the y-axis. Default value is 0.1.
ylim_top (float):
the upper limit of the y-axis. Default value is 1.e3.
"""
plt.rcParams.update({'font.size': 15})
fig, ax1 = plt.subplots()
ax1.set_xlabel('Time (min)')
ax1.set_ylabel(r'Power ($aW$)')
ax1.semilogy(timepoints/60, transmitted_powers, 'k-', linewidth=2, alpha=1, label='Transmitted power')
ax1.semilogy(timepoints/60, dissipated_powers, 'k:', linewidth=2, alpha=1, label='Dissipated power')
ax1.legend(frameon=False, loc=(0.5, 0.75), fontsize=13)
ax1.set_ylim(bottom=ylim_bottom, top=ylim_top)
fig.tight_layout()
plt.show()
def plot_peak_velocities(pillar_stiffnesses, peak_velocities, ylim_bottom=0., ylim_top=10.):
"""
Plots the dynammics of the transmitted and dissipated power
Parameters:
pillar_stiffnesses (list(float)):
list of the considered pillar stiffnesses in unites of pN/um.
peak_velocities (list(float)):
list of the calculated peak velocities in um/s.
ylim_bottom (float):
the lower limit of the y-axis. Default value is 0.
ylim_top (float):
the upper limit of the y-axis. Default value is 10.
"""
plt.plot(pillar_stiffnesses, 100 * peak_velocities, '-', linewidth=2)
plt.xlabel(r"Pillar stiffness $k_p$ ($pN/\mu m$)")
plt.ylabel(r"Peak velocity ($10^{-2}\mu m/s $)")
plt.ylim(bottom=ylim_bottom, top=ylim_top)
plt.tight_layout()
plt.show()
def plot_final_forces(pillar_stiffnesses, final_force, ylim_bottom=0., ylim_top=500.):
"""
Plots the dynammics of the transmitted and dissipated power
Parameters:
pillar_stiffnesses (list(float)):
list of the considered pillar stiffnesses in unites of pN/um.
final_force (list(float)):
list of the calculated final forces per pillar in pN.
ylim_bottom (float):
the lower limit of the y-axis. Default value is 0.
ylim_top (float):
the upper limit of the y-axis. Default value is 500.
"""
plt.clf()
plt.rcParams.update({'font.size': 15})
plt.plot(pillar_stiffnesses, final_force)
plt.xticks([50, 100, 150], ["0.05", "0.10", "0.15"])
plt.xlim(left=0, right=190)
plt.ylim(ylim_bottom, ylim_top)
plt.xlabel(r"Pillar stiffness $k_p$ ($nN/\mu m$)")
plt.ylabel(r"Force per pillar ($pN$)")
plt.tight_layout()
plt.show()
```
|
{
"source": "jflot/HapPy",
"score": 2
}
|
#### File: jflot/HapPy/commands.py
```python
from docopt import docopt
import sys, os, shutil
import tempfile
from os.path import join, dirname
import coverage as happyc
import estimate as happye
class AbstractCommand:
"""Base class for the commands"""
def __init__(self, command_args, global_args):
"""Initialize the commands."""
self.args = docopt(self.__doc__, argv=command_args)
self.global_args = global_args
def execute(self):
"""Execute the commands"""
raise NotImplementedError
def check_output_path(self, path, force=False):
"""Throws error if the output file exists. Create required file tree otherwise."""
# Get complete output filename and prevent overwriting unless force is enabled
if not force and os.path.exists(path):
raise IOError("Output file already exists. Use --force to overwrite")
if dirname(path):
os.makedirs(dirname(path), exist_ok=True)
class Coverage(AbstractCommand):
"""Coverage histogram command
Compute coverage histogram for mapping file.
usage:
coverage [--threads=1] --outdir=DIR <mapping.bam>
arguments:
mapping.bam Sorted BAM file after mapping reads to the assembly.
options:
-t, --threads=INT Number of parallel threads allocated for
sambamba [default: 1].
-d, --outdir=DIR Path where the .cov and .hist files are written.
"""
def execute(self):
print("Running coverage module.")
happyc.get_cov_hist(
self.args["<mapping.bam>"], self.args["--threads"], self.args["--outdir"]
)
class Estimate(AbstractCommand):
"""Estimate command
Compute AUC ratio and TSS from coverage histogram.
usage:
estimate [--max-contaminant=INT] [--max-diploid=INT] [--min-peak=INT] --size=INT --outstats=FILE [--plot] <coverage.hist>
arguments:
coverage.hist Coverage histogram.
options:
-C, --max-contaminant=INT Maximum coverage of contaminants.
-D, --max-diploid=INT Maximum coverage of the diploid peak.
-M, --min-peak=INT Minimum peak height.
-S, --size=INT Estimated haploid genome size.
-O, --outstats=FILE Path where the AUC ratio and TSS values are written.
-p, --plot Generate histogram plot.
"""
def execute(self):
happye.estimate_haploidy(
self.args["<coverage.hist>"],
self.args["--max-contaminant"],
self.args["--max-diploid"],
self.args["--min-peak"],
self.args["--size"],
self.args["--outstats"],
)
```
|
{
"source": "jflournoy/ACE",
"score": 3
}
|
#### File: ACE/ace/sources.py
```python
from __future__ import unicode_literals # use unicode everywhere
from bs4 import BeautifulSoup
import re
import os
import json
import abc
import importlib
from glob import glob
import datatable
import tableparser
import scrape
import config
import database
import logging
logger = logging.getLogger(__name__)
class SourceManager:
''' Loads all the available Source subclasses from this module and the
associated directory of JSON config files and uses them to determine which parser
to call when a new HTML file is passed. '''
def __init__(self, database, table_dir=None):
''' SourceManager constructor.
Args:
database: A Database instance to use with all Sources.
table_dir: An optional directory name to save any downloaded tables to.
When table_dir is None, nothing will be saved (requiring new scraping
each time the article is processed).
'''
module = importlib.import_module('ace.sources')
self.sources = {}
source_dir = os.path.join(os.path.dirname(__file__), 'sources')
for config_file in glob('%s/*json' % source_dir):
class_name = config_file.split('/')[-1].split('.')[0]
cls = getattr(module, class_name + 'Source')(config_file, database, table_dir)
self.sources[class_name] = cls
def identify_source(self, html):
''' Identify the source of the article and return the corresponding Source object. '''
for source in self.sources.values():
for patt in source.identifiers:
if re.search(patt, html):
logger.debug('Matched article to Source: %s' % source.__class__.__name__)
return source
# A single source of articles--i.e., a publisher or journal
class Source:
__metaclass__ = abc.ABCMeta
# Core set of HTML entities and unicode characters to replace.
# BeautifulSoup converts HTML entities to unicode, so we could
# potentially do the replacement only for unicode chars after
# soupifying the HTML. But this way we only have to do one pass
# through the entire file, so it should be faster to do it up front.
ENTITIES = {
' ': ' ',
'−': '-',
# 'κ': 'kappa',
'\xa0': ' ', # Unicode non-breaking space
# '\x3e': ' ',
'\u2212': '-', # Various unicode dashes
'\u2012': '-',
'\u2013': '-',
'\u2014': '-',
'\u2015': '-',
'\u8211': '-',
'\u0150': '-',
'\u0177': '',
'\u0160': '',
'\u0145': "'",
'\u0146': "'",
}
def __init__(self, config, database, table_dir=None):
config = json.load(open(config, 'rb'))
self.database = database
self.table_dir = table_dir
valid_keys = ['name', 'identifiers', 'entities', 'delay']
for k, v in config.items():
if k in valid_keys:
setattr(self, k, v)
# Append any source-specific entities found in the config file to
# the standard list
if self.entities is None:
self.entities = Source.ENTITIES
else:
self.entities.update(Source.ENTITIES)
@abc.abstractmethod
def parse_article(self, html, pmid=None, metadata_dir=None):
''' Takes HTML article as input and returns an Article. PMID Can also be
passed, which prevents having to scrape it from the article and/or look it
up in PubMed. '''
# Skip rest of processing if this record already exists
if pmid is not None and self.database.article_exists(pmid) and not config.OVERWRITE_EXISTING_ROWS:
return False
html = html.decode('utf-8') # Make sure we're working with unicode
html = self.decode_html_entities(html)
soup = BeautifulSoup(html)
doi = self.extract_doi(soup)
pmid = self.extract_pmid(soup) if pmid is None else pmid
metadata = scrape.get_pubmed_metadata(pmid, store=metadata_dir, save=True)
# TODO: add Source-specific delimiting of salient text boundaries--e.g., exclude References
text = soup.get_text()
if self.database.article_exists(pmid):
if config.OVERWRITE_EXISTING_ROWS:
self.database.delete_article(pmid)
else:
return False
self.article = database.Article(text, pmid=pmid, doi=doi, metadata=metadata)
return soup
@abc.abstractmethod
def parse_table(self, table):
''' Takes HTML for a single table and returns a Table. '''
# Formatting issues sometimes prevent table extraction, so just return
if table is None:
return False
logger.debug("\t\tFound a table...")
# Count columns. Check either just one row, or all of them.
def n_cols_in_row(row):
return sum([int(td['colspan']) if td.has_attr('colspan') else 1 for td in row.find_all('td')])
if config.CAREFUL_PARSING:
n_cols = max([n_cols_in_row(
row) for row in table.find('tbody').find_all('tr')])
else:
n_cols = n_cols_in_row(table.find('tbody').find('tr'))
# Initialize grid and populate
data = datatable.DataTable(0, n_cols)
rows = table.find_all('tr')
for (j, r) in enumerate(rows):
try:
cols = r.find_all(['td', 'th'])
cols_found_in_row = 0
n_cells = len(cols)
# Assign number of rows and columns this cell fills. We use these rules:
# * If a rowspan/colspan is explicitly provided, use it
# * If not, initially assume span == 1 for both rows and columns.
for (i, c) in enumerate(cols):
r_num = int(c['rowspan']) if c.has_attr('rowspan') else 1
c_num = int(c['colspan']) if c.has_attr('colspan') else 1
cols_found_in_row += c_num
# * Check to make sure that we don't have unaccounted-for columns in the
# row after including the current cell. If we do, adjust the colspan
# to take up all of the remaining columns. This is necessary because
# some tables have malformed HTML, and BeautifulSoup can also
# cause problems in its efforts to fix bad tables. The most common
# problem is deletion or omission of enough <td> tags to fill all
# columns, hence our adjustment. Note that in some cases the order of
# filling is not sequential--e.g., when a previous row has cells with
# rowspan > 1. So we have to check if there are None values left over
# in the DataTable's current row after we finish filling
# it.
if i + 1 == n_cells and cols_found_in_row < n_cols and data[j].count(None) > c_num:
c_num += n_cols - cols_found_in_row
data.add_val(c.get_text(), r_num, c_num)
except Exception as e:
if not config.SILENT_ERRORS:
logger.error(e.message)
if not config.IGNORE_BAD_ROWS:
raise
logger.debug("\t\tTrying to parse table...")
return tableparser.parse_table(data)
@abc.abstractmethod
def extract_doi(self, soup):
''' Every Source subclass must be able to extract its doi. '''
return
@abc.abstractmethod
def extract_pmid(self, soup):
''' Every Source subclass must be able to extract its PMID. '''
return
def decode_html_entities(self, html):
''' Re-encode HTML entities as innocuous little Unicode characters. '''
# Any entities BeautifulSoup passes through thatwe don't like, e.g.,
#  /x0a
patterns = re.compile('(' + '|'.join(re.escape(
k) for k in self.entities.iterkeys()) + ')')
replacements = lambda m: self.entities[m.group(0)]
return patterns.sub(replacements, html)
# return html
def _download_table(self, url):
''' For Sources that have tables in separate files, a helper for
downloading and extracting the table data. Also saves to file if desired.
'''
delay = self.delay if hasattr(self, 'delay') else 0
if self.table_dir is not None:
nice_url = url.replace('/', '_')
nice_url = nice_url.replace(':', '_')
nice_url = nice_url.replace('?', '_')
filename = '%s/%s' % (self.table_dir, nice_url)
if os.path.exists(filename):
table_html = open(filename).read().decode('utf-8')
else:
table_html = scrape.get_url(url, delay=delay)
open(filename, 'w').write(table_html.encode('utf-8'))
else:
table_html = scrape.get_url(url, delay=delay)
table_html = self.decode_html_entities(table_html)
return(BeautifulSoup(table_html))
class HighWireSource(Source):
def parse_article(self, html, pmid=None, **kwargs):
soup = super(HighWireSource, self).parse_article(html, pmid, **kwargs)
if not soup:
return False
# To download tables, we need the content URL and the number of tables
content_url = soup.find('meta', {
'name': 'citation_public_url'})['content']
n_tables = len(soup.find_all('span', class_='table-label'))
# Now download each table and parse it
tables = []
for i in range(n_tables):
t_num = i + 1
url = '%s/T%d.expansion.html' % (content_url, t_num)
table_soup = self._download_table(url)
tc = table_soup.find(class_='table-expansion')
t = tc.find('table', {'id': 'table-%d' % (t_num)})
t = self.parse_table(t)
if t:
t.position = t_num
t.label = tc.find(class_='table-label').text
t.number = t.label.split(' ')[-1].strip()
try:
t.caption = tc.find(class_='table-caption').get_text()
except:
pass
try:
t.notes = tc.find(class_='table-footnotes').get_text()
except:
pass
tables.append(t)
self.article.tables = tables
return self.article
def parse_table(self, table):
return super(HighWireSource, self).parse_table(table)
def extract_doi(self, soup):
try:
return soup.find('meta', {'name': 'citation_doi'})['content']
except:
return ''
def extract_pmid(self, soup):
return soup.find('meta', {'name': 'citation_pmid'})['content']
class ScienceDirectSource(Source):
def parse_article(self, html, pmid=None, **kwargs):
soup = super(ScienceDirectSource, self).parse_article(html, pmid, **kwargs)
if not soup:
return False
# Extract tables
tables = []
for (i, tc) in enumerate(soup.find_all('dl', {'class': 'table '})):
table_html = tc.find('table')
t = self.parse_table(table_html)
if t:
t.position = i + 1
t.number = tc['data-label'].split(' ')[-1].strip()
t.label = tc.find('span', class_='label').text.strip()
try:
t.caption = tc.find('p', class_='caption').get_text()
except:
pass
try:
t.notes = tc.find(class_='tblFootnote').get_text()
except:
pass
tables.append(t)
self.article.tables = tables
return self.article
def parse_table(self, table):
return super(ScienceDirectSource, self).parse_table(table)
def extract_doi(self, soup):
try:
found = soup.find('a', {'id': 'ddDoi'})['href'].replace('http://dx.doi.org/', '')
except TypeError:
found = ''
if found == '':
try:
someText = soup.find(text=re.compile('SDM\.doi = '))
found = re.search('SDM.doi = \'(.+?)\'', someText).group(1)
except AttributeError:
found = ''
return found
def extract_pmid(self, soup):
return scrape.get_pmid_from_doi(self.extract_doi(soup))
class PlosSource(Source):
def parse_article(self, html, pmid=None, **kwargs):
soup = super(PlosSource, self).parse_article(html, pmid, **kwargs) # Do some preprocessing
if not soup:
return False
# Extract tables
tables = []
for (i, tc) in enumerate(soup.find_all('table-wrap')):
table_html = tc.find('table')
t = self.parse_table(table_html)
if t:
t.position = i + 1
t.label = tc.find('label').text
t.number = t.label.split(' ')[-1].strip()
try:
t.caption = tc.find('title').get_text()
except:
pass
try:
t.notes = tc.find('table-wrap-foot').get_text()
except:
pass
tables.append(t)
self.article.tables = tables
return self.article
def parse_table(self, table):
return super(PlosSource, self).parse_table(table)
def extract_doi(self, soup):
return soup.find('article-id', {'pub-id-type': 'doi'}).text
def extract_pmid(self, soup):
return scrape.get_pmid_from_doi(self.extract_doi(soup))
class FrontiersSource(Source):
def parse_article(self, html, pmid=None, **kwargs):
soup = super(FrontiersSource, self).parse_article(html, pmid, **kwargs)
if not soup:
return False
# Extract tables
tables = []
table_containers = soup.findAll(
'table-wrap', {'id': re.compile('^T\d+$')})
for (i, tc) in enumerate(table_containers):
table_html = tc.find('table')
t = self.parse_table(table_html)
# If Table instance is returned, add other properties
if t:
t.position = i + 1
t.number = tc['id'][1::].strip()
t.label = tc.find('label').get_text()
try:
t.caption = tc.find('caption').get_text()
except:
pass
try:
t.notes = tc.find('table-wrap-foot').get_text()
except:
pass
tables.append(t)
self.article.tables = tables
return self.article
def parse_table(self, table):
return super(FrontiersSource, self).parse_table(table)
def extract_doi(self, soup):
return soup.find('article-id', {'pub-id-type': 'doi'}).text
def extract_pmid(self, soup):
return scrape.get_pmid_from_doi(self.extract_doi(soup))
class JournalOfCognitiveNeuroscienceSource(Source):
def parse_article(self, html, pmid=None, **kwargs):
soup = super(
JournalOfCognitiveNeuroscienceSource, self).parse_article(html, pmid, **kwargs)
if not soup:
return False
# To download tables, we need the DOI and the number of tables
doi = self.extract_doi(soup)
pattern = re.compile('^T\d+$')
n_tables = len(soup.find_all('table', {'id': pattern}))
logger.debug("Found %d tables!" % n_tables)
tables = []
# Now download each table and parse it
for i in range(n_tables):
num = i + 1
url = 'http://www.mitpressjournals.org/action/showPopup?citid=citart1&id=T%d&doi=%s' % (
num, doi)
table_soup = self._download_table(url)
tc = table_soup.find('table').find('table') # JCogNeuro nests tables 2-deep
t = self.parse_table(tc)
if t:
t.position = num
t.number = num
cap = tc.caption.find('span', class_='title')
t.label = cap.b.get_text()
t.caption = cap.get_text()
try:
t.notes = table_soup.find('div', class_="footnote").p.get_text()
except:
pass
tables.append(t)
self.article.tables = tables
return self.article
def parse_table(self, table):
return super(JournalOfCognitiveNeuroscienceSource, self).parse_table(table)
def extract_doi(self, soup):
return soup.find('meta', {'name': 'dc.Identifier', 'scheme': 'doi'})['content']
def extract_pmid(self, soup):
return scrape.get_pmid_from_doi(self.extract_doi(soup))
class WileySource(Source):
def parse_article(self, html, pmid=None, **kwargs):
soup = super(WileySource, self).parse_article(html, pmid, **kwargs) # Do some preprocessing
if not soup:
return False
# Extract tables
tables = []
table_containers = soup.findAll('div', {
'class': 'table', 'id': re.compile('^(.*?)\-tbl\-\d+$|^t(bl)*\d+$')})
print "Found %d tables." % len(table_containers)
for (i, tc) in enumerate(table_containers):
table_html = tc.find('table')
try:
# Remove footer, which appears inside table
footer = table_html.tfoot.extract()
except:
pass
t = self.parse_table(table_html)
# If Table instance is returned, add other properties
if t:
t.position = i + 1
# t.number = tc['id'][3::].strip()
t.number = re.search('t[bl0\-]*(\d+)$', tc['id']).group(1)
t.label = tc.find('span', class_='label').get_text()
t.caption = tc.find('caption').get_text()
try:
t.notes = footer.get_text()
except:
pass
tables.append(t)
self.article.tables = tables
return self.article
def parse_table(self, table):
return super(WileySource, self).parse_table(table)
def extract_doi(self, soup):
return soup.find('meta', {'name': 'citation_doi'})['content']
def extract_pmid(self, soup):
return scrape.get_pmid_from_doi(self.extract_doi(soup))
# Note: the SageSource is largely useless and untested because Sage renders tables
# as images.
class SageSource(Source):
def parse_article(self, html, pmid=None, **kwargs):
soup = super(SageSource, self).parse_article(html, pmid, **kwargs)
if not soup:
return False
# To download tables, we need the content URL and the number of tables
content_url = soup.find('meta', {
'name': 'citation_public_url'})['content']
n_tables = len(soup.find_all('span', class_='table-label'))
# Now download each table and parse it
tables = []
for i in range(n_tables):
t_num = i + 1
url = '%s/T%d.expansion.html' % (content_url, t_num)
table_soup = self._download_table(url)
tc = table_soup.find(class_='table-expansion')
t = tc.find('table', {'id': 'table-%d' % (t_num)})
t = self.parse_table(t)
if t:
t.position = t_num
t.label = tc.find(class_='table-label').text
t.number = t.label.split(' ')[-1].strip()
try:
t.caption = tc.find(class_='table-caption').get_text()
except:
pass
try:
t.notes = tc.find(class_='table-footnotes').get_text()
except:
pass
tables.append(t)
self.article.tables = tables
return self.article
def parse_table(self, table):
return super(SageSource, self).parse_table(table)
def extract_doi(self, soup):
return soup.find('meta', {'name': 'citation_doi'})['content']
def extract_pmid(self, soup):
return soup.find('meta', {'name': 'citation_pmid'})['content']
class SpringerSource(Source):
def parse_article(self, html, pmid=None, **kwargs):
soup = super(SpringerSource, self).parse_article(html, pmid, **kwargs)
if not soup:
return False
# Extract tables
tables = []
table_containers = soup.findAll(
'figure', {'id': re.compile('^Tab\d+$')})
for (i, tc) in enumerate(table_containers):
table_html = tc.find('table')
t = self.parse_table(table_html)
# If Table instance is returned, add other properties
if t:
t.position = i + 1
t.number = tc['id'][3::].strip()
t.label = tc.find('span', class_='CaptionNumber').get_text()
try:
t.caption = tc.find(class_='CaptionContent').p.get_text()
except:
pass
try:
t.notes = tc.find(class_='TableFooter').p.get_text()
except:
pass
tables.append(t)
self.article.tables = tables
return self.article
def parse_table(self, table):
return super(SpringerSource, self).parse_table(table)
def extract_doi(self, soup):
content = soup.find('p', class_='ArticleDOI').get_text()
print content
return content.split(' ')[1]
def extract_pmid(self, soup):
return scrape.get_pmid_from_doi(self.extract_doi(soup))
```
|
{
"source": "jflower154/magnum",
"score": 2
}
|
#### File: controllers/v1/magnum_services.py
```python
import pecan
import wsme
from wsme import types as wtypes
from magnum.api.controllers import base
from magnum.api.controllers.v1 import collection
from magnum.api.controllers.v1 import types
from magnum.api import expose
from magnum.api import servicegroup as svcgrp_api
from magnum.common import policy
from magnum import objects
from magnum.objects import fields
class MagnumService(base.APIBase):
host = wtypes.StringType(min_length=1, max_length=255)
"""Name of the host """
binary = wtypes.Enum(str, *fields.MagnumServiceBinary.ALL)
"""Name of the binary"""
state = wtypes.Enum(str, *fields.MagnumServiceState.ALL)
"""State of the binary"""
id = wsme.wsattr(wtypes.IntegerType(minimum=1))
"""The id for the healthcheck record """
report_count = wsme.wsattr(wtypes.IntegerType(minimum=0))
"""The number of times the heartbeat was reported """
disabled = wsme.wsattr(types.boolean, default=False)
"""If the service is 'disabled' administratively """
disabled_reason = wtypes.StringType(min_length=0, max_length=255)
"""Reason for disabling """
def __init__(self, state, **kwargs):
super(MagnumService, self).__init__()
self.fields = ['state']
setattr(self, 'state', state)
for field in objects.MagnumService.fields:
self.fields.append(field)
setattr(self, field, kwargs.get(field, wtypes.Unset))
class MagnumServiceCollection(collection.Collection):
mservices = [MagnumService]
"""A list containing service objects"""
def __init__(self, **kwargs):
super(MagnumServiceCollection, self).__init__()
self._type = 'mservices'
@staticmethod
def convert_db_rec_list_to_collection(servicegroup_api,
rpc_msvcs, **kwargs):
collection = MagnumServiceCollection()
collection.mservices = []
for p in rpc_msvcs:
alive = servicegroup_api.service_is_up(p)
state = 'up' if alive else 'down'
msvc = MagnumService(state, **p.as_dict())
collection.mservices.append(msvc)
collection.next = collection.get_next(limit=None, url=None, **kwargs)
return collection
class MagnumServiceController(base.Controller):
"""REST controller for magnum-services."""
def __init__(self, **kwargs):
super(MagnumServiceController, self).__init__()
self.servicegroup_api = svcgrp_api.ServiceGroup()
@expose.expose(MagnumServiceCollection)
@policy.enforce_wsgi("magnum-service")
def get_all(self):
"""Retrieve a list of magnum-services.
"""
msvcs = objects.MagnumService.list(pecan.request.context,
limit=None,
marker=None,
sort_key='id',
sort_dir='asc')
return MagnumServiceCollection.convert_db_rec_list_to_collection(
self.servicegroup_api, msvcs)
```
#### File: unit/objects/test_objects.py
```python
import datetime
import gettext
import mock
from oslo_versionedobjects import exception as object_exception
from oslo_versionedobjects import fields
from oslo_versionedobjects import fixture
from magnum.common import context as magnum_context
from magnum.objects import base
from magnum.tests import base as test_base
gettext.install('magnum')
@base.MagnumObjectRegistry.register
class MyObj(base.MagnumPersistentObject, base.MagnumObject):
VERSION = '1.0'
fields = {'foo': fields.IntegerField(),
'bar': fields.StringField(),
'missing': fields.StringField(),
}
def obj_load_attr(self, attrname):
setattr(self, attrname, 'loaded!')
@base.remotable_classmethod
def query(cls, context):
obj = cls(context)
obj.foo = 1
obj.bar = 'bar'
obj.obj_reset_changes()
return obj
@base.remotable
def marco(self, context):
return 'polo'
@base.remotable
def update_test(self, context):
if context.project_id == 'alternate':
self.bar = 'alternate-context'
else:
self.bar = 'updated'
@base.remotable
def save(self, context):
self.obj_reset_changes()
@base.remotable
def refresh(self, context):
self.foo = 321
self.bar = 'refreshed'
self.obj_reset_changes()
@base.remotable
def modify_save_modify(self, context):
self.bar = 'meow'
self.save(context)
self.foo = 42
class MyObj2(object):
@classmethod
def obj_name(cls):
return 'MyObj'
@base.remotable_classmethod
def get(cls, *args, **kwargs):
pass
@base.MagnumObjectRegistry.register_if(False)
class TestSubclassedObject(MyObj):
fields = {'new_field': fields.StringField()}
class _TestObject(object):
def test_hydration_type_error(self):
primitive = {'magnum_object.name': 'MyObj',
'magnum_object.namespace': 'magnum',
'magnum_object.version': '1.0',
'magnum_object.data': {'foo': 'a'}}
self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive)
def test_hydration(self):
primitive = {'magnum_object.name': 'MyObj',
'magnum_object.namespace': 'magnum',
'magnum_object.version': '1.0',
'magnum_object.data': {'foo': 1}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(1, obj.foo)
def test_hydration_bad_ns(self):
primitive = {'magnum_object.name': 'MyObj',
'magnum_object.namespace': 'foo',
'magnum_object.version': '1.0',
'magnum_object.data': {'foo': 1}}
self.assertRaises(object_exception.UnsupportedObjectError,
MyObj.obj_from_primitive, primitive)
def test_dehydration(self):
expected = {'magnum_object.name': 'MyObj',
'magnum_object.namespace': 'magnum',
'magnum_object.version': '1.0',
'magnum_object.data': {'foo': 1}}
obj = MyObj(self.context)
obj.foo = 1
obj.obj_reset_changes()
self.assertEqual(expected, obj.obj_to_primitive())
def test_get_updates(self):
obj = MyObj(self.context)
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_object_property(self):
obj = MyObj(self.context, foo=1)
self.assertEqual(1, obj.foo)
def test_object_property_type_error(self):
obj = MyObj(self.context)
def fail():
obj.foo = 'a'
self.assertRaises(ValueError, fail)
def test_load(self):
obj = MyObj(self.context)
self.assertEqual('loaded!', obj.bar)
def test_load_in_base(self):
@base.MagnumObjectRegistry.register_if(False)
class Foo(base.MagnumPersistentObject, base.MagnumObject):
fields = {'foobar': fields.IntegerField()}
obj = Foo(self.context)
# NOTE(danms): Can't use assertRaisesRegexp() because of py26
raised = False
ex = None
try:
obj.foobar
except NotImplementedError as e:
raised = True
ex = e
self.assertTrue(raised)
self.assertIn('foobar', str(ex))
def test_loaded_in_primitive(self):
obj = MyObj(self.context)
obj.foo = 1
obj.obj_reset_changes()
self.assertEqual('loaded!', obj.bar)
expected = {'magnum_object.name': 'MyObj',
'magnum_object.namespace': 'magnum',
'magnum_object.version': '1.0',
'magnum_object.changes': ['bar'],
'magnum_object.data': {'foo': 1,
'bar': 'loaded!'}}
self.assertEqual(expected, obj.obj_to_primitive())
def test_changes_in_primitive(self):
obj = MyObj(self.context)
obj.foo = 123
self.assertEqual(set(['foo']), obj.obj_what_changed())
primitive = obj.obj_to_primitive()
self.assertIn('magnum_object.changes', primitive)
obj2 = MyObj.obj_from_primitive(primitive)
self.assertEqual(set(['foo']), obj2.obj_what_changed())
obj2.obj_reset_changes()
self.assertEqual(set(), obj2.obj_what_changed())
def test_unknown_objtype(self):
self.assertRaises(object_exception.UnsupportedObjectError,
base.MagnumObject.obj_class_from_name, 'foo', '1.0')
def test_with_alternate_context(self):
context1 = magnum_context.RequestContext('foo', 'foo')
context2 = magnum_context.RequestContext('bar', project_id='alternate')
obj = MyObj.query(context1)
obj.update_test(context2)
self.assertEqual('alternate-context', obj.bar)
def test_orphaned_object(self):
obj = MyObj.query(self.context)
obj._context = None
self.assertRaises(object_exception.OrphanedObjectError,
obj.update_test)
def test_changed_1(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(set(['foo']), obj.obj_what_changed())
obj.update_test(self.context)
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
self.assertEqual(123, obj.foo)
def test_changed_2(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(set(['foo']), obj.obj_what_changed())
obj.save(self.context)
self.assertEqual(set([]), obj.obj_what_changed())
self.assertEqual(123, obj.foo)
def test_changed_3(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(set(['foo']), obj.obj_what_changed())
obj.refresh(self.context)
self.assertEqual(set([]), obj.obj_what_changed())
self.assertEqual(321, obj.foo)
self.assertEqual('refreshed', obj.bar)
def test_changed_4(self):
obj = MyObj.query(self.context)
obj.bar = 'something'
self.assertEqual(set(['bar']), obj.obj_what_changed())
obj.modify_save_modify(self.context)
self.assertEqual(set(['foo']), obj.obj_what_changed())
self.assertEqual(42, obj.foo)
self.assertEqual('meow', obj.bar)
def test_static_result(self):
obj = MyObj.query(self.context)
self.assertEqual('bar', obj.bar)
result = obj.marco(self.context)
self.assertEqual('polo', result)
def test_updates(self):
obj = MyObj.query(self.context)
self.assertEqual(1, obj.foo)
obj.update_test(self.context)
self.assertEqual('updated', obj.bar)
def test_base_attributes(self):
dt = datetime.datetime(1955, 11, 5)
datatime = fields.DateTimeField()
obj = MyObj(self.context)
obj.created_at = dt
obj.updated_at = dt
expected = {'magnum_object.name': 'MyObj',
'magnum_object.namespace': 'magnum',
'magnum_object.version': '1.0',
'magnum_object.changes':
['created_at', 'updated_at'],
'magnum_object.data':
{'created_at': datatime.stringify(dt),
'updated_at': datatime.stringify(dt)}
}
actual = obj.obj_to_primitive()
# magnum_object.changes is built from a set and order is undefined
self.assertEqual(sorted(expected['magnum_object.changes']),
sorted(actual['magnum_object.changes']))
del expected['magnum_object.changes'], actual['magnum_object.changes']
self.assertEqual(expected, actual)
def test_contains(self):
obj = MyObj(self.context)
self.assertNotIn('foo', obj)
obj.foo = 1
self.assertIn('foo', obj)
self.assertNotIn('does_not_exist', obj)
def test_obj_attr_is_set(self):
obj = MyObj(self.context, foo=1)
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertFalse(obj.obj_attr_is_set('bar'))
self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang')
def test_get(self):
obj = MyObj(self.context, foo=1)
# Foo has value, should not get the default
self.assertEqual(1, getattr(obj, 'foo', 2))
# Foo has value, should return the value without error
self.assertEqual(1, getattr(obj, 'foo'))
# Bar without a default should lazy-load
self.assertEqual('loaded!', getattr(obj, 'bar'))
# Bar now has a default, but loaded value should be returned
self.assertEqual('loaded!', getattr(obj, 'bar', 'not-loaded'))
# Invalid attribute should raise AttributeError
self.assertFalse(hasattr(obj, 'nothing'))
def test_object_inheritance(self):
base_fields = list(base.MagnumPersistentObject.fields.keys())
myobj_fields = ['foo', 'bar', 'missing'] + base_fields
myobj3_fields = ['new_field']
self.assertTrue(issubclass(TestSubclassedObject, MyObj))
self.assertEqual(len(MyObj.fields), len(myobj_fields))
self.assertEqual(set(MyObj.fields.keys()), set(myobj_fields))
self.assertEqual(len(TestSubclassedObject.fields),
len(myobj_fields) + len(myobj3_fields))
self.assertEqual(set(TestSubclassedObject.fields.keys()),
set(myobj_fields) | set(myobj3_fields))
def test_get_changes(self):
obj = MyObj(self.context)
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_obj_fields(self):
@base.MagnumObjectRegistry.register_if(False)
class TestObj(base.MagnumPersistentObject, base.MagnumObject):
fields = {'foo': fields.IntegerField()}
obj_extra_fields = ['bar']
@property
def bar(self):
return 'this is bar'
obj = TestObj(self.context)
self.assertEqual(set(['created_at', 'updated_at', 'foo', 'bar']),
set(obj.obj_fields))
def test_obj_constructor(self):
obj = MyObj(self.context, foo=123, bar='abc')
self.assertEqual(123, obj.foo)
self.assertEqual('abc', obj.bar)
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
class TestObject(test_base.TestCase, _TestObject):
pass
# This is a static dictionary that holds all fingerprints of the versioned
# objects registered with the MagnumRegistry. Each fingerprint contains
# the version of the object and an md5 hash of RPC-critical parts of the
# object (fields and remotable methods). If either the version or hash
# change, the static tree needs to be updated.
# For more information on object version testing, read
# https://docs.openstack.org/magnum/latest/contributor/objects.html
object_data = {
'Cluster': '1.18-9f0dfcc3e898eef2b9a09647b612adb6',
'ClusterTemplate': '1.18-7fa94f4fdd027acfb4f022f202afdfb5',
'Certificate': '1.1-1924dc077daa844f0f9076332ef96815',
'MyObj': '1.0-34c4b1aadefd177b13f9a2f894cc23cd',
'X509KeyPair': '1.2-d81950af36c59a71365e33ce539d24f9',
'MagnumService': '1.0-2d397ec59b0046bd5ec35cd3e06efeca',
'Stats': '1.0-73a1cd6e3c0294c932a66547faba216c',
'Quota': '1.0-94e100aebfa88f7d8428e007f2049c18',
'Federation': '1.0-166da281432b083f0e4b851336e12e20'
}
class TestObjectVersions(test_base.TestCase):
def test_versions(self):
# Test the versions of current objects with the static tree above.
# This ensures that any incompatible object changes require a version
# bump.
classes = base.MagnumObjectRegistry.obj_classes()
checker = fixture.ObjectVersionChecker(obj_classes=classes)
expected, actual = checker.test_hashes(object_data)
self.assertEqual(expected, actual,
"Fields or remotable methods in some objects have "
"changed. Make sure the versions of the objects has "
"been bumped, and update the hashes in the static "
"fingerprints tree (object_data). For more "
"information, read https://docs.openstack.org/"
"magnum/latest/contributor/objects.html")
class TestObjectSerializer(test_base.TestCase):
def test_object_serialization(self):
ser = base.MagnumObjectSerializer()
obj = MyObj(self.context)
primitive = ser.serialize_entity(self.context, obj)
self.assertIn('magnum_object.name', primitive)
obj2 = ser.deserialize_entity(self.context, primitive)
self.assertIsInstance(obj2, MyObj)
self.assertEqual(self.context, obj2._context)
def test_object_serialization_iterables(self):
ser = base.MagnumObjectSerializer()
obj = MyObj(self.context)
for iterable in (list, tuple, set):
thing = iterable([obj])
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in primitive:
self.assertFalse(isinstance(item, base.MagnumObject))
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in thing2:
self.assertIsInstance(item, MyObj)
@mock.patch('magnum.objects.base.MagnumObject.indirection_api')
def _test_deserialize_entity_newer(self, obj_version, backported_to,
mock_indirection_api,
my_version='1.6'):
ser = base.MagnumObjectSerializer()
mock_indirection_api.object_backport_versions.side_effect \
= NotImplementedError()
mock_indirection_api.object_backport.return_value = 'backported'
@base.MagnumObjectRegistry.register
class MyTestObj(MyObj):
VERSION = my_version
obj = MyTestObj()
obj.VERSION = obj_version
primitive = obj.obj_to_primitive()
result = ser.deserialize_entity(self.context, primitive)
if backported_to is None:
self.assertEqual(
False,
mock_indirection_api.object_backport.called)
else:
self.assertEqual('backported', result)
mock_indirection_api.object_backport.assert_called_with(
self.context, primitive, backported_to)
def test_deserialize_entity_newer_version_backports_level1(self):
"Test object with unsupported (newer) version"
self._test_deserialize_entity_newer('11.5', '1.6')
def test_deserialize_entity_newer_version_backports_level2(self):
"Test object with unsupported (newer) version"
self._test_deserialize_entity_newer('1.25', '1.6')
def test_deserialize_entity_same_revision_does_not_backport(self):
"Test object with supported revision"
self._test_deserialize_entity_newer('1.6', None)
def test_deserialize_entity_newer_revision_does_not_backport_zero(self):
"Test object with supported revision"
self._test_deserialize_entity_newer('1.6.0', None)
def test_deserialize_entity_newer_revision_does_not_backport(self):
"Test object with supported (newer) revision"
self._test_deserialize_entity_newer('1.6.1', None)
def test_deserialize_entity_newer_version_passes_revision(self):
"Test object with unsupported (newer) version and revision"
self._test_deserialize_entity_newer('1.7', '1.6.1', my_version='1.6.1')
```
|
{
"source": "jfluke1414/Ar_crypto_python_django",
"score": 3
}
|
#### File: jfluke1414/Ar_crypto_python_django/db_insert.py
```python
from . import model_connector
def insert_user():
cursor, mydb = model_connector.mysql_db_info()
cursor.execute("use cryptohard")
mysql = "insert into user(user_name, user_id, user_pw, regdate) values('inzaghi33', '<EMAIL>', '7b52009b64fd0a2a49e6d8a939753077792b0554', NOW())";
cursor.execute(mysql)
mydb.commit()
cursor.close()
mydb.close()
insert_user()
```
#### File: jfluke1414/Ar_crypto_python_django/model_connector.py
```python
import mysql.connector
# class mysql_connect_info:
def mysql_db_info():
mydb = mysql.connector.connect(host="localhost", user="root", passwd="<PASSWORD>", database="cryptohard")
cursor = mydb.cursor()
return cursor, mydb
def sqlite3_db_info():
db = ""
return db
```
#### File: jfluke1414/Ar_crypto_python_django/test.py
```python
import requests
from django.http import HttpResponse
from django.shortcuts import render
def test():
r = requests.get('http://192.168.238.129/index/')
print(r.text)
def __init__(self):
print('in_self')
self.username = self
def chk_session():
p_username = 'jfluke'
p_password = '<PASSWORD>'
p_email = '<EMAIL>'
encrypted_password = '<PASSWORD>'
# sha1 = hashlib.new('sha1')
# sha1.update(p_password.encode('UTF-8'))
# encrypted_password = <PASSWORD>()
header = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'}
s_data = {
'is_user': True,
'username': p_username,
'password': <PASSWORD>,
'email': p_email,
'headers': header
}
s = requests.session()
requests.session.__init__(p_username)
# url = 'http://192.168.238.129/'
# r = s.post(url=url, headers=header)
# print(r.status_code)
# print(r.text)
return s_data
# session.post('login.py', s_data)
def response_test():
r = requests.get('http://192.168.238.129')
print(r.text)
chk_session()
response_test()
```
|
{
"source": "JFlynnXYZ/LichtensteinGenerator",
"score": 4
}
|
#### File: LichtensteinGenerator/bin/quantize.py
```python
r'''
Module containing Colour Quantization for reducing an images down to a
selection of set colours.
The idea behind this module is to replace the colours of an image with
the most likely matches from a set of colours specified by the user. This
module works in conjunction with PIL as it's means for editing image
colours.
Currently, the module only works with RGB colours currently but the
use of other PIL colour types will be added in the future, such as HSV,
RGBA, HEX and PIL worded colours. The functions in the module consist of
calculating the closeness of a colour to another and switching them using
colour switch, and the Colour Quantization function itself.
Here are some examples of how the code works:
>>> curCols = [(98,186,25), (50,0,69), (245,89,12), (69,156,102), (89,56,71), (89,58, 205), (5,5,20)]
>>> newCols = [(0,0,0), (255,255,255), (255,0,0), (0,0,255), (255,255,0)]
>>> colour_switch(curCols, newCols)
[(255, 255, 0), (0, 0, 255), (255, 0, 0), (69, 156, 102), (89, 56, 71), (255, 255, 255), (0, 0, 0)]
>>> f = 'lena.png'
>>> try:
... img = Image.open(f)
... except IOError:
... img = Image.new('RGB', (500,500))
... drw = ImageDraw.Draw(img)
... for i,x in enumerate(xrange(40, img.size[0], 70)):
... for y in xrange(40, img.size[1], 70):
... drw.rectangle((x-30,y-30,x,y), fill=curCols[i])
...
>>> quantImg = quantize(img, newCols, nCols=len(curCols)+1, sigma=1)
>>> img.show(command='display')
>>> quantImg.show(command='display')
>>>
To test/execute the examples in the module documentation make sure that
you have imported the quantize module and do the following:
import doctest
nfail, ntests = doctest.testmod(quantize)
'''
from PIL import Image, ImageFilter, ImageDraw
import colour as c
def colour_switch(curC, newC):
'''Switches the closest matching colours from one list of colours to
another, creating a combined palette of colours.
Parameters:
curC [list] : a list of 3-tuple RGB colours. These are colours that
are from the current image and will be switched out for
the closest matching colour from 'newC'.
newC [list] : a list of 3-tuple RGB colours. These are the colours
which will replace there closest matching counter colour
in the list 'curC'
On Exit:
Returns a new combined list of colours with all the 'newC' colours
replacing there closest matching colours from 'curC'. If there are less
new colours compared to current colours, then any colours which have
no close new colour counterpart will be left in the colours.
'''
if len(newC) > len(curC):
raise ValueError, "more values are in new colours over current colours"
cCloseness = {}
for cCol in curC:
cCloseness[cCol] = {}
for nCol in newC:
cCloseness[cCol][nCol] = sum([abs(cCol[i]-nCol[i]) for i in range(3)])
finalPalette = curC[:]
for newColour in newC:
curCol, close = None, 766
for currentColour, comparisons in cCloseness.items():
if comparisons[newColour] < close:
curCol, close = currentColour, comparisons[newColour]
finalPalette[finalPalette.index(curCol)] = newColour
del cCloseness[curCol]
return finalPalette
def quantize(img, newCols, nCols=8, sigma=4, aalias=4):
'''Creates a colour quantize image from a PIL Image with new colours.
Parameters:
img [PIL Image] : A PIL image object. Any colour mode can be used but
RGB is preferred.
newCols [list] : A list of 3-tuple RGB colours to replace there
closest matching colour on the image.
nCols [int] : The number of colours that the image will be reduced
to. This number must be higher then the length of
'newCols'.
sigma [float] : The magnitude of the gaussian blur used on the image
to de-noise the image for a smoother result.
aalias [int] : The anti-alias amount for the edges of the pixels.
On Exit:
Returns an RGB PIL image with the number of colours 'nCols', with the
colours from 'newCols' replacing their closest matches from the image,
as well as a noise reduction of 'sigma' and anti alias of 'aalias'.
'''
aaliasImg = img.resize((img.size[0]*aalias, img.size[1]*aalias),
resample=Image.ANTIALIAS)
#aaliasImg = aaliasImg.filter(ImageFilter.GaussianBlur(sigma))
#This above line allowes for varying Guassian Blur Levels. However
#the version of PIL in the labs does not have this implemented, except
#in newer version. See the documentation for more details.
aaliasImg = aaliasImg.filter(ImageFilter.BLUR)
finImg = aaliasImg.convert("P", palette=Image.ADAPTIVE, colors=nCols)
curCols = c.rgb_unflatten(finImg.getpalette()[:3*nCols])
finalPalette = colour_switch(curCols, newCols)
finImg.putpalette(c.rgb_flatten(finalPalette))
finImg = finImg.resize(img.size, resample=Image.ANTIALIAS)
return finImg.convert('RGB')
if __name__ == '__main__':
curCols = [(98,186,25), (50,0,69), (245,89,12), (69,156,102), (89,56,71),
(89,58, 205), (5,5,20)]
newCols = [(0,0,0), (255,255,255), (190,0,0), (0,16,115), (248,196,0)]
print colour_switch(curCols, newCols)
f = 'lena.png'
try:
img = Image.open(f)
except IOError:
img = Image.new('RGB', (512,512))
drw = ImageDraw.Draw(img)
for i,x in enumerate(xrange(40, img.size[0], 70)):
for y in xrange(40, img.size[1], 70):
drw.rectangle((x-30,y-30,x,y), fill=curCols[i])
quantImg = quantize(img, newCols, nCols=len(curCols)+1, sigma=1)
img.show(command='display')
quantImg.show(command='display')
```
|
{
"source": "JFlynnXYZ/pymel",
"score": 2
}
|
#### File: maintenance/templates/commandfunc.py
```python
@_factories.addCmdDocs
def {{ funcName }}(*args, **kwargs):
{% if uiWidget %}
from . import uitypes
{% endif %}
{% if timeRangeFlags %}
for flag in {{ timeRangeFlags }}:
try:
rawVal = kwargs[flag]
except KeyError:
continue
else:
kwargs[flag] = _factories.convertTimeValues(rawVal)
{% endif %}
{% if callbackFlags %}
if len(args):
doPassSelf = kwargs.pop('passSelf', False)
else:
doPassSelf = False
for key in {{ callbackFlags }}:
try:
cb = kwargs[key]
if callable(cb):
kwargs[key] = _factories.makeUICallback(cb, args, doPassSelf)
except KeyError:
pass
{% endif %}
res = {{ sourceFuncName }}(*args, **kwargs)
{% if returnFunc %}
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, {{ returnFunc }})
{% endif %}
{% if resultNeedsUnpacking and unpackFlags %}
if isinstance(res, list) and len(res) == 1:
if kwargs.get('query', kwargs.get('q', False)):
# unpack for specific query flags
unpackFlags = {{ unpackFlags }}
if not unpackFlags.isdisjoint(kwargs):
res = res[0]
else:
# unpack create/edit result
res = res[0]
{% elif unpackFlags %}
if isinstance(res, list) and len(res) == 1:
# unpack for specific query flags
unpackFlags = {{ unpackFlags }}
if kwargs.get('query', kwargs.get('q', False)) and not unpackFlags.isdisjoint(kwargs):
res = res[0]
{% elif resultNeedsUnpacking %}
# unpack create/edit list result
if isinstance(res, list) and len(res) == 1 and not kwargs.get('query', kwargs.get('q', False)):
res = res[0]
{% endif %}
{% if simpleWraps %}
wraps = _factories.simpleCommandWraps['{{ commandName }}']
for func, wrapCondition in wraps:
if wrapCondition.eval(kwargs):
res = func(res)
break
{% endif %}
return res
```
#### File: maintenance/templates/getattribute.py
```python
def __getattribute__(self, name):
if name in {{ method.removeAttrs }} and name not in _f.EXCLUDE_METHODS: # tmp fix
raise AttributeError("'{{ classname }}' object has no attribute '" + name + "'")
return super({{ classname }}, self).__getattribute__(name)
```
#### File: maintenance/templates/querymethod.py
```python
@_f.addMelDocs('{{ method.command }}', '{{ method.flag }}')
def {{ method.name }}(self, **kwargs):
res = _f.asQuery(self, {{ method.func }}, kwargs, '{{ method.flag }}')
{% if method.returnFunc %}
res = {{ method.returnFunc }}(res)
{% endif %}
return res
```
#### File: pymel/internal/pwarnings.py
```python
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future.utils import PY2
from past.builtins import basestring
import warnings
def formatwarning(message, category, filename, lineno, line=None):
"""Redefined format warning for maya."""
if issubclass(category, ExecutionWarning):
s = u"%s: %s\n" % (category.__name__, message)
else:
s = u'%s: %s, at line %s, in "%s"\n' % (category.__name__, message, lineno, filename)
# name, ext = os.path.splitext(filename)
# line = ""
# if ext == ".py" :
# line = unicode(linecache.getline(filename, lineno)).strip()
# if line:
# s += (u"#\t %s" % line)
return s
warnings.formatwarning = formatwarning
# def showwarning(message, category, filename, lineno, file=None, line=None):
# msg = warnings.formatwarning(message, category, filename, lineno, line)
# if file:
# msg += " >> %r" % file
# _logger.warning(msg)
#
#warnings.showwarning = showwarning
# Subclass just to allow users to configure filtering of pymel-specific
# deprecations
class PymelBaseWarning(Warning):
pass
class ExecutionWarning (UserWarning, PymelBaseWarning):
""" Simple Warning class that doesn't print any information besides warning message """
class PymelBaseDeprecationWarning(PymelBaseWarning):
pass
# Subclass from FutureWarning so it's displayed by default
class PymelFutureWarning(FutureWarning, PymelBaseDeprecationWarning):
pass
# Subclass from DeprecationWarning so it's not displayed by default
class MayaDeprecationWarning(DeprecationWarning, PymelBaseDeprecationWarning):
pass
def warn(*args, **kwargs):
""" Default Maya warn which uses ExecutionWarning as the default warning class. """
if len(args) == 1 and not isinstance(args[0], Warning):
args = args + (ExecutionWarning,)
stacklevel = kwargs.pop("stacklevel", 1) + 1 # add to the stack-level so that this wrapper func is skipped
return warnings.warn(stacklevel=stacklevel, *args, **kwargs)
def deprecated(funcOrMessage=None, className=None,
baseMessage="The function '{objName}' is deprecated and will"
" become unavailable in future pymel versions",
warningType=PymelFutureWarning):
"""Decorates a function so that it prints a deprecation warning when called.
The decorator can either receive parameters or the function directly.
Parameters
----------
funcOrMessage : Union[str, Callable[..., Any], None]
If passed a message, the message will be appended to the standard
deprecation warning and should serve to further clarify why the function
is being deprecated and/or suggest an alternative function. In this
case, the return result of this function is another decorator (with the
ammended message), which then needs to be fed the function to be
decorated. Otherwise, funcOrMessage should be the func to be decorated,
and the return result is decorated version of funcOrMessage
className : Union[str, False, None]
If given as a str, then the decorated function is asssumed to be method,
and the name is printed as "module.className.funcName". If False, it
is assumed to NOT be a method, and the name is printed as
"module.funcName". If None, then the decorator will try to
automatically determine whether the passed function is a method, and if
so, what it's className is.
baseMessage : Optional[str]
Message which will be combined with the optional message (in
funcOrMessage) to form the final message. Maybe set to None to ensure
only the message (in funcOrMessage) is printed.
warningType : Type[Warning]
Warning class to raise. Note that DeprecationWarning is ignored by
default.
"""
import inspect
def isClassMethodOrMethod(test_func):
isClassMethod = False
isMethod = False
args = list(inspect.signature(test_func).parameters)
if args:
if args[0] == 'cls':
isClassMethod = True
elif args[0] == 'self':
isMethod = True
return isClassMethod, isMethod
if PY2:
def isClassMethodOrMethod(test_func):
isClassMethod = False
isMethod = False
args = inspect.getargspec(test_func).args
if args:
if args[0] == 'cls':
isClassMethod = True
elif args[0] == 'self':
isMethod = True
return isClassMethod, isMethod
#@decorator
def deprecated2(func):
useClassName = False
info = dict(
name=func.__name__,
module=func.__module__)
if className is None:
isClassMethod, isMethod =isClassMethodOrMethod(func)
if isClassMethod or isMethod:
useClassName = True
elif className is not False:
useClassName = True
info['className'] = className
if useClassName:
objName = '%(module)s.%(className)s.%(name)s'
else:
objName = '%(module)s.%(name)s'
message2 = message.format(objName=objName)
def deprecationLoggedFunc(*args, **kwargs):
if useClassName and className is None:
if isClassMethod:
info['className'] = args[0].__name__
else:
info['className'] = type(args[0]).__name__
# add to the stack-level so that this wrapper func is skipped
warnings.warn(message2 % info, warningType, stacklevel=2)
return func(*args, **kwargs)
deprecationLoggedFunc.__name__ = func.__name__
deprecationLoggedFunc.__module__ = func.__module__
deprecationLoggedFunc.__doc__ = message % info
deprecationLoggedFunc._func_before_deprecation = func
if func.__doc__:
deprecationLoggedFunc.__doc__ += '\n\n' + func.__doc__
return deprecationLoggedFunc
# check if the decorator got a 'message' parameter
if funcOrMessage is None:
message = baseMessage
return deprecated2
elif isinstance(funcOrMessage, basestring):
if baseMessage is None:
message = funcOrMessage
else:
message = baseMessage + '. ' + funcOrMessage
return deprecated2
else:
message = baseMessage
return deprecated2(funcOrMessage)
def maya_deprecated(
funcOrMessage=None, className=None,
baseMessage="The function '{objName}' has been deprecated by maya and"
" may become unavailable in future maya versions",
warningType=MayaDeprecationWarning):
return deprecated(funcOrMessage=funcOrMessage, className=className,
baseMessage=baseMessage, warningType=warningType)
if __name__ == '__main__':
import doctest
doctest.testmod()
```
#### File: pymel/util/py2to3.py
```python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import re
from future.utils import PY2
def trystr(input):
'''If the input is unicode, but just holds normal ascii, convert to str'''
if PY2:
if isinstance(input, unicode):
try:
return str(input)
except UnicodeEncodeError:
pass
return input
if PY2:
RePattern = re._pattern_type
else:
RePattern = re.Pattern
```
#### File: pymel/tests/test_api_plugins.py
```python
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# this is segregated into it's own, separate test file from test_api both
# because I'd like to start standardzing on one test-file per python module,
# and because this test requires maya.standalone to be initialized
import os
import re
import unittest
import pymel.api.plugins
class Test_plugins(unittest.TestCase):
def test_mayaPlugins(self):
# this test needs to initialize maya, to ensure MAYA_PLUG_IN_PATH
# is set correctly
import pymel.core
pluginPath = os.environ.get('MAYA_PLUG_IN_PATH')
print(pluginPath)
self.assertTrue(pluginPath)
knownPlugins = ['mayaHIK', 'objExport', 'tiffFloatReader']
# to deal with os-differences, strip any extensions
def mayaPlugins(*args, **kwargs):
mayaPlugins = pymel.api.plugins.mayaPlugins(*args, **kwargs)
return [os.path.splitext(x)[0] for x in mayaPlugins]
allPlugs = mayaPlugins()
for plug in knownPlugins:
self.assertIn(plug, allPlugs)
unknownPlugs = set(allPlugs) - set(knownPlugins)
filtered1 = mayaPlugins(filters=knownPlugins)
self.assertEqual(set(filtered1), unknownPlugs)
regexs = [re.compile(r'^{}.*$'.format(x)) for x in knownPlugins]
filtered2 = mayaPlugins(filters=regexs)
self.assertEqual(filtered2, filtered1)
funcs = []
for known in knownPlugins:
# use kwarg to "freeze" the value of known in the function
def filterFunc(testPlug, knownPlug=known):
return os.path.splitext(testPlug)[0] == knownPlug
funcs.append(filterFunc)
filtered3 = mayaPlugins(filters=funcs)
self.assertEqual(filtered3, filtered1)
```
#### File: pymel/tests/test_mayaBugs.py
```python
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import object
import sys
import os
import unittest
import maya.cmds as cmds
import maya.OpenMaya as om
import maya.OpenMayaAnim as oma
import maya.OpenMayaFX as omfx
import pymel.versions
from pymel.util.testing import TestCaseExtended
if not hasattr(cmds, 'about'):
import maya.standalone
maya.standalone.initialize()
#===============================================================================
# Current Bugs
#===============================================================================
# For CURRENT bugs, we PASS is the bug is still present, and FAIL if it goes
# away... this may be counter-intuitive, but it acts as an alert if a bug is
# fixed (so we can possibly get rid of yucky work-around code...)
# Bug report 378211
class TestConstraintAngleOffsetQuery(TestCaseExtended):
def setUp(self):
cmds.file(new=1, f=1)
def runTest(self):
for cmdName in ('aimConstraint', 'orientConstraint'):
cube1 = cmds.polyCube()[0]
cube2 = cmds.polyCube()[0]
cmd = getattr(cmds, cmdName)
constraint = cmd(cube1, cube2)[0]
setVals = (12, 8, 7)
cmd(constraint, e=1, offset=setVals)
getVals = tuple(cmd(constraint, q=1, offset=1))
# self.assertVectorsEqual(setVals, getVals)
# check that things are BAD!
try:
self.assertVectorsEqual(setVals, getVals)
except AssertionError:
pass
else:
self.fail("TestConstraintAngleOffsetQuery was fixed! Huzzah!")
# Bug report 378192
class TestEmptyMFnNurbsCurve(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
def runTest(self):
shapeStr = cmds.createNode('nurbsCurve', n="RigWorldShape")
selList = om.MSelectionList()
selList.add(shapeStr)
node = om.MObject()
selList.getDependNode(0, node)
mnc = om.MFnNurbsCurve()
self.assertTrue(mnc.hasObj(node))
# try:
# mnc.setObject(node)
# except Exception:
# self.fail("MFnNurbs curve doesn't work with empty curve object")
# check that things are BAD!
try:
mnc.setObject(node)
except Exception:
pass
else:
self.fail("MFnNurbs curve now works with empty curve objects! Yay!")
# Bug report 344037
class TestSurfaceRangeDomain(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
def runTest(self):
try:
# create a nurbs sphere
mySphere = cmds.sphere()[0]
# a default sphere should have u/v
# parameter ranges of 0:4/0:8
# The following selections should
# result in one of these:
desiredResults = ('nurbsSphere1.u[2:3][0:8]',
'nurbsSphere1.u[2:3][*]',
'nurbsSphere1.u[2:3]',
'nurbsSphere1.uv[2:3][0:8]',
'nurbsSphere1.uv[2:3][*]',
'nurbsSphere1.uv[2:3]',
'nurbsSphere1.v[0:8][2:3]',
'nurbsSphere1.v[*][2:3]')
# Passes
cmds.select('nurbsSphere1.u[2:3][*]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
# Passes
cmds.select('nurbsSphere1.v[*][2:3]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
# Fails! - returns 'nurbsSphere1.u[2:3][0:1]'
cmds.select('nurbsSphere1.u[2:3]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
# Fails! - returns 'nurbsSphere1.u[2:3][0:1]'
cmds.select('nurbsSphere1.uv[2:3][*]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
# The following selections should
# result in one of these:
desiredResults = ('nurbsSphere1.u[0:4][2:3]',
'nurbsSphere1.u[*][2:3]',
'nurbsSphere1.uv[0:4][2:3]',
'nurbsSphere1.uv[*][2:3]',
'nurbsSphere1.v[2:3][0:4]',
'nurbsSphere1.v[2:3][*]',
'nurbsSphere1.v[2:3]')
# Passes
cmds.select('nurbsSphere1.u[*][2:3]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
# Passes
cmds.select('nurbsSphere1.v[2:3][*]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
# Fails! - returns 'nurbsSphereShape1.u[0:1][2:3]'
cmds.select('nurbsSphere1.v[2:3]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
# Fails! - returns 'nurbsSphereShape1.u[0:4][0:1]'
cmds.select('nurbsSphere1.uv[*][2:3]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
except AssertionError:
pass
else:
# check that things are BAD!
self.fail("Nurbs surface range domain bug fixed!")
# Bug report 345384
# This bug only seems to affect windows (or at least, Win x64 -
# haven't tried on 32-bit).
class TestMMatrixMEulerRotationSetAttr(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
def runTest(self):
# This bug was apparently fixed in maya 2020 for all OSes
# For pre-2020:
# We expect it to fail on windows, and pass on other operating systems...
shouldPass = os.name != 'nt' or pymel.versions.current() >= pymel.versions.v2020
try:
class InfoBaseClass(object):
# These two are just so we can trace what's going on...
def __getattribute__(self, name):
# don't just use 'normal' repr, as that will
# call __getattribute__!
print("__getattribute__(%s, %r)" % (object.__repr__(self), name))
return super(InfoBaseClass, self).__getattribute__(name)
def __setattr__(self, name, val):
print("__setattr__(%r, %r, %r)" % (self, name, val))
return super(InfoBaseClass, self).__setattr__(name, val)
class MyClass1(InfoBaseClass):
def __init__(self):
self._bar = 'not set'
def _setBar(self, val):
print("setting bar to:", val)
self._bar = val
def _getBar(self):
print("getting bar...")
return self._bar
bar = property(_getBar, _setBar)
foo1 = MyClass1()
# works like we expect...
foo1.bar = 7
print("foo1.bar:", foo1.bar)
self.assertTrue(foo1.bar == 7)
class MyClass2(MyClass1, om.MMatrix): pass
foo2 = MyClass2()
foo2.bar = 7
# Here, on windows, MMatrix's __setattr__ takes over, and
# (after presumabably determining it didn't need to do
# whatever special case thing it was designed to do)
# instead of calling the super's __setattr__, which would
# use the property, inserts it into the object's __dict__
# manually
print("foo2.bar:", foo2.bar)
self.assertTrue(foo2.bar == 7)
# Starting in Maya2018 (at least on windows?), many wrapped datatypes
# define a __setattr__ which will work in the "general" case tested
# above, but will still take precedence if a "_swig_property" is
# defined - ie, MEulerRotation.order. Check to see if the apicls has
# any properties, and ensure that our property still overrides theirs...
class MyEulerClass1(InfoBaseClass):
def _setOrder(self, val):
print("setting order to:", val)
self._order = val
def _getOrder(self):
print("getting order...")
return self._order
order = property(_getOrder, _setOrder)
er1 = MyEulerClass1()
# works like we expect...
er1.order = "new order"
print("er1.order:", er1.order)
self.assertTrue(er1.order == "new order")
class MyEulerClass2(MyEulerClass1, om.MEulerRotation): pass
er2 = MyEulerClass2()
er2.order = "does it work?"
print("er2.order:", er2.order)
self.assertTrue(er2.order == "does it work?")
except Exception:
if shouldPass:
raise
else:
print("MMatrix/MEulerRotation setattr bug is still around...")
else:
if not shouldPass:
self.fail("MMatrix/MEulerRotation setattr bug seems to have"
" been fixed!")
else:
print("MMatrix/MEulerRotation still functions properly on {},"
" as expected".format(os.name))
# Introduced in maya 2014
# Change request #: BSPR-12597
class TestShapeParentInstance(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
def runTest(self):
try:
import maya.cmds as cmds
def getShape(trans):
return cmds.listRelatives(trans, children=True, shapes=True)[0]
cmds.file(new=1, f=1)
shapeTransform = cmds.polyCube(name='singleShapePoly')[0]
origShape = getShape(shapeTransform)
dupeTransform1 = cmds.duplicate(origShape, parentOnly=1)[0]
cmds.parent(origShape, dupeTransform1, shape=True, addObject=True, relative=True)
dupeTransform2 = cmds.duplicate(dupeTransform1)[0]
cmds.delete(dupeTransform1)
dupeShape = getShape(dupeTransform2)
# In maya 2014, this raises:
# Error: Connection not made: 'singleShapePolyShape2.instObjGroups[1]' -> 'initialShadingGroup.dagSetMembers[2]'. Source is not connected.
# Connection not made: 'singleShapePolyShape2.instObjGroups[1]' -> 'initialShadingGroup.dagSetMembers[2]'. Destination attribute must be writable.
# Connection not made: 'singleShapePolyShape2.instObjGroups[1]' -> 'initialShadingGroup.dagSetMembers[2]'. Destination attribute must be writable.
# Traceback (most recent call last):
# File "<maya console>", line 13, in <module>
# RuntimeError: Connection not made: 'singleShapePolyShape2.instObjGroups[1]' -> 'initialShadingGroup.dagSetMembers[2]'. Source is not connected.
# Connection not made: 'singleShapePolyShape2.instObjGroups[1]' -> 'initialShadingGroup.dagSetMembers[2]'. Destination attribute must be writable.
# Connection not made: 'singleShapePolyShape2.instObjGroups[1]' -> 'initialShadingGroup.dagSetMembers[2]'. Destination attribute must be writable. #
cmds.parent(dupeShape, shapeTransform, shape=True, addObject=True, relative=True)
except Exception:
pass
else:
self.fail("ShapeParentInstance bug fixed!")
# This test gives inconsistent results - the bug will show up (meaning the
# unittest "passes") if the test is run by itself (or just this module is run),
# but the bug will not show up (meaning the unittest "fails") if the entire test
# suite is run
@unittest.skip("inconsistent results")
class TestUndoRedoConditionNewFile(unittest.TestCase):
CONDITION = '_pymel_test_UndoRedoAvailable'
def setUp(self):
self.origUndoState = cmds.undoInfo(q=1, state=1)
# flush the undo queue
cmds.undoInfo(state=0)
cmds.undoInfo(state=1)
cmds.file(new=1, f=1)
# there seems to be a bug with cmds.scriptJob(listConditions=1) where
# it returns none from a non-gui session
import maya.api.OpenMaya as om2
if self.CONDITION in om2.MConditionMessage.getConditionNames():
cmds.condition(self.CONDITION, delete=True)
om.MGlobal.executeCommand('''
global proc int _test_UndoOrRedoAvailable_proc()
{
return (isTrue("UndoAvailable") || isTrue("RedoAvailable"));
}
''', False, False)
cmds.condition(self.CONDITION, initialize=True,
d=['UndoAvailable', 'RedoAvailable'],
s='_test_UndoOrRedoAvailable_proc')
def tearDown(self):
try:
cmds.condition(self.CONDITION, delete=True)
finally:
if self.origUndoState != cmds.undoInfo(q=1, state=1):
cmds.undoInfo(state=self.origUndoState)
def _doTest(self):
self.assertFalse(cmds.isTrue('UndoAvailable'))
self.assertFalse(cmds.isTrue('RedoAvailable'))
self.assertFalse(cmds.isTrue(self.CONDITION))
cmds.setAttr('persp.tx', 10)
cmds.setAttr('top.tx', 10)
self.assertTrue(cmds.isTrue('UndoAvailable'))
self.assertFalse(cmds.isTrue('RedoAvailable'))
self.assertTrue(cmds.isTrue(self.CONDITION))
cmds.undo()
self.assertTrue(cmds.isTrue('UndoAvailable'))
self.assertTrue(cmds.isTrue('RedoAvailable'))
self.assertTrue(cmds.isTrue(self.CONDITION))
# after doing a new file, does UndoOrRedoAvailable reset properly?
cmds.file(new=1, force=1)
self.assertFalse(cmds.isTrue('UndoAvailable'))
self.assertFalse(cmds.isTrue('RedoAvailable'))
self.assertFalse(cmds.isTrue(self.CONDITION),
'expected failure here')
def runTest(self):
try:
self._doTest()
except AssertionError as e:
if e.args[0] != 'expected failure here':
raise
else:
# check that things are BAD!
self.fail("UndoRedoCondition with newFile bug fixed!")
class TestScriptJobListConditions(unittest.TestCase):
def _doTest(self):
# this seems to return None in non-gui mayapy sessions
conditions = cmds.scriptJob(listConditions=1)
self.assertIsNot(conditions, None, 'expected failure here')
self.assertIn('MayaInitialized', conditions)
self.assertIn('UndoAvailable', conditions)
def runTest(self):
# we only get failures in non-gui
expectFailure = om.MGlobal.mayaState() not in \
(om.MGlobal.kInteractive, om.MGlobal.kBaseUIMode)
try:
self._doTest()
except Exception as e:
if not expectFailure:
raise
if not isinstance(e, AssertionError) \
or 'expected failure here' not in e.args[0]:
raise
else:
if expectFailure:
# check that things are BAD!
self.fail("scriptJob(listConditions=1) bug fixed!")
#===============================================================================
# Current bugs that will cause Maya to CRASH (and so are commented out!)
#===============================================================================
# This is commented out as it will cause a CRASH - uncomment out (or just
# copy/ paste the relevant code into the script editor) to test if it's still
# causing a crash...
# If you're copy / pasting into a script editor, in order for a crash to occur,
# all lines must be executed at once - if you execute one at a time, there will
# be no crash
# Also, I'm making the code in each of the test functions self-contained (ie,
# has all imports, etc) for easy copy-paste testing...
# class TestSubdivSelectCrash(unittest.TestCase):
# def testCmds(self):
# import maya.cmds as cmds
# cmds.file(new=1, f=1)
# polyCube = cmds.polyCube()[0]
# subd = cmds.polyToSubdiv(polyCube)[0]
# cmds.select(subd + '.sme[*][*]')
#
# def testApi(self):
# import maya.cmds as cmds
# import maya.OpenMaya as om
#
# polyCube = cmds.polyCube()[0]
# subd = cmds.polyToSubdiv(polyCube)[0]
# selList = om.MSelectionList()
# selList.add(subd + '.sme[*][*]')
#===============================================================================
# FIXED (Former) Bugs
#===============================================================================
# Fixed in Maya 2009! yay!
class TestConstraintVectorQuery(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
def _doTestForConstraintType(self, constraintType):
cmd = getattr(cmds, constraintType)
if constraintType == 'tangentConstraint':
target = cmds.circle()[0]
else:
target = cmds.polyCube()[0]
constrained = cmds.polyCube()[0]
constr = cmd(target, constrained)[0]
self.assertEqual(cmd(constr, q=1, worldUpVector=1), [0,1,0])
self.assertEqual(cmd(constr, q=1, upVector=1), [0,1,0])
self.assertEqual(cmd(constr, q=1, aimVector=1), [1,0,0])
def test_aimConstraint(self):
self._doTestForConstraintType('aimConstraint')
def test_normalConstraint(self):
self._doTestForConstraintType('normalConstraint')
def test_tangentConstraint(self):
self._doTestForConstraintType('tangentConstraint')
# Fixed ! Yay! (...though I've only check on win64...)
# (not sure when... was fixed by time of 2011 Hotfix 1 - api 201101,
# and still broken in 2009 SP1a - api 200906)
class TestMatrixSetAttr(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
res = cmds.sphere(n='node')
cmds.addAttr(ln='matrixAttr',dt="matrix")
def runTest(self):
cmds.setAttr( 'node.matrixAttr', 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, type='matrix' )
# Bug report 345382
# Fixed ! Yay! (...though I've only check on win64...)
# (not sure when... was fixed by time of 2011 Hotfix 1 - api 201101,
# and still broken in 2009 SP1a - api 200906)
class TestFluidMFnCreation(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
def runTest(self):
fluid = cmds.createNode('fluidShape')
selList = om.MSelectionList()
selList.add(fluid)
dag = om.MDagPath()
selList.getDagPath(0, dag)
omfx.MFnFluid(dag)
# nucleus node fixed in 2014
# symmetryConstraint fixed in 2015
# transferAttributes fixed <= 2016.5
# jointFFd fixed in 2021
class TestMFnCompatibility(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
def assertInheritMFn(self, nodeType, parentNodeType, mfnEnumName, mfnType):
if parentNodeType:
if isinstance(parentNodeType, tuple):
parentNodeType, concreteParentType = parentNodeType
else:
concreteParentType = parentNodeType
self.assertTrue(
parentNodeType in cmds.nodeType(nodeType, isTypeName=1,
inherited=True),
"{} did not have parent {}".format(nodeType, parentNodeType))
self.assertInheritMFn(concreteParentType, None, mfnEnumName,
mfnType)
mfnEnum = getattr(om.MFn, mfnEnumName)
nodeInstName = cmds.createNode(nodeType)
selList = om.MSelectionList()
selList.add(nodeInstName)
mobj = om.MObject()
selList.getDependNode(0, mobj)
self.assertTrue(mobj.hasFn(mfnEnum),
"{} did not have {}".format(nodeType, mfnEnumName))
try:
mfnType(mobj)
except Exception as e:
self.fail("{} did not support {}".format(nodeType,
mfnType.__name__))
def assertNotInheritMFn(self, nodeType, parentNodeType, mfnEnumName,
mfnType):
try:
self.assertInheritMFn(nodeType, parentNodeType, mfnEnumName,
mfnType)
except AssertionError as e:
# this is expected... swallow it
pass
else:
self.fail("{} passed inheritance test (for {} / {}), when it was"
" expected to fail".format(nodeType, mfnEnumName,
mfnType.__name__))
def test_nucleus_MFnDagNode(self):
self.assertInheritMFn('nucleus', ('dagNode', 'transform'), 'kDagNode',
om.MFnDagNode)
def test_nucleus_MFnTransform(self):
self.assertInheritMFn('nucleus', 'transform', 'kTransform',
om.MFnTransform)
def test_symmetryConstraint_MFnDagNode(self):
self.assertInheritMFn('symmetryConstraint', ('dagNode', 'transform'),
'kDagNode', om.MFnDagNode)
def test_symmetryConstraint_MFnTransform(self):
self.assertInheritMFn('symmetryConstraint', 'transform', 'kTransform',
om.MFnTransform)
def test_jointFfd_ffd(self):
self.assertInheritMFn('jointFfd', 'ffd', 'kFFD', oma.MFnLatticeDeformer)
def test_jointFfd_geometryFilter(self):
# fixed in 2021!
if pymel.versions.current() >= pymel.versions.v2021:
self.assertInheritMFn(
'jointFfd', ('geometryFilter', 'softMod'),
'kGeometryFilt', oma.MFnGeometryFilter)
else:
self.assertNotInheritMFn(
'jointFfd', ('geometryFilter', 'softMod'),
'kGeometryFilt', oma.MFnGeometryFilter)
def test_transferAttributes_weightGeometryFilter(self):
self.assertInheritMFn(
'transferAttributes', ('weightGeometryFilter', 'softMod'),
'kWeightGeometryFilt', oma.MFnWeightGeometryFilter)
def test_transferAttributes_geometryFilter(self):
self.assertInheritMFn(
'transferAttributes', ('geometryFilter', 'softMod'),
'kGeometryFilt', oma.MFnGeometryFilter)
# These probably aren't strictly considered "bugs" by autodesk, though I
# think they should be...
def test_hikHandle_ikHandle(self):
self.assertNotInheritMFn('hikHandle', 'ikHandle', 'kIkHandle',
oma.MFnIkHandle)
# Fixed in 2014! yay!
class TestGroupUniqueness(unittest.TestCase):
'''Test to check whether cmds.group returns a unique name
'''
def setUp(self):
cmds.file(new=1, f=1)
def runTest(self):
cmds.select(cl=1)
cmds.group(n='foo', empty=1)
cmds.group(n='bar')
cmds.select(cl=1)
res = cmds.group(n='foo', empty=1)
sameNames = cmds.ls(res)
if len(sameNames) < 1:
self.fail('cmds.group did not return a valid name')
elif len(sameNames) > 1:
self.fail('cmds.group did not return a unique name')
```
#### File: pymel/tests/test_startup.py
```python
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
import shutil
import tempfile
import unittest
import pymel.internal.startup as startup
class TestCacheFormats(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_dumpLoad(self):
smiley = u'\U0001F600'
DATAS = {
'unicode': {'foo': smiley},
'ascii': {'foo': 7}
}
filebase = os.path.join(self.tmpdir, 'test_bin')
for name, data in DATAS.items():
for fmt in startup.PymelCache.FORMATS:
filename = '{}_{}{}'.format(filebase, name, fmt.ext)
#print("testing: {} - {} - {}".format(name, data, fmt[0]))
fmt.writer(data, filename)
read_data = fmt.reader(filename)
assert read_data == data
assert type(read_data) is type(data)
```
#### File: pymel/tests/test_trees.py
```python
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from pymel.util.testing import TestCase, setupUnittestModule
import pymel.util.trees as trees
class testCase_typeTrees(TestCase):
def setUp(self):
self.types = ('dependNode', ('FurAttractors', ('FurCurveAttractors', 'FurDescription', 'FurGlobals'), 'abstractBaseCreate'))
self.tree = trees.Tree( *(self.types) )
def test01_parentMethod(self):
""" Test the parent method on type tree """
pass
def tearDown(self):
pass
# to be organised in nice unit tests :
#print dir(FrozenTree)
#print dir(Tree)
##print dir(IndexedFrozenTree)
##print dir(IndexedTree)
#a = Tree ('a', ('aa', 'ab'), 'b', ('ba', 'bb'))
#print a
#print list(a)
#print list(a.preorder())
#print str(a)
#print repr(a)
#print unicode(a)
#print a.formatted()
#print a.debug()
#t = Tree ('a', ('aa', 'ab'))
#print id(t)
#print t.debug()
#t.graft('b')
#print id(t)
#print t.debug()
#b = Tree ('a')
#print id(b)
#print b.debug()
#b.graft('b')
#print b.debug()
#b.graft('ab', 'a')
#print b.debug()
#aa = Tree ('aa', ('aaa', 'aab'))
#print id(aa)
#print aa.debug()
## FIXME : next doesn't work
#b.graft(aa, 'a', 'ab')
#print id(b)
#print id(aa), id(b['aa'])
#print b.debug()
#b.remove('ab')
#ab = FrozenTree('ab', ('aba', 'abb'))
#print id(ab)
#print ab.debug()
#b.graft(ab, 'a')
#print id(b)
#print id(ab), id(b['ab'])
#print b.debug()
#b.graft('c')
#print b.debug()
#b.remove('c')
#print b.debug()
#b.graft('c', 'b')
#print b.debug()
#b.graft(('ba', 'bb'), 'c')
#print b.debug()
## FIXME : pop not working yet
## b.pop('c')
#print b.debug()
#b.prune('a')
#print b.debug()
#b.graft(('a', ('aa', 'ab')), None, 'b')
#print b.debug()
#print list(b.tops())
#print b.top(0)
#print b.top(1)
##print isinstance(a, list)
##print issubclass(a.__class__, list)
#print id(a)
#print a.root()
#print id(a)
#print a.next
#print a.child(0)
#print a.child(0).next
#print a.formatted()
#print a.debug()
#b = a
#print b.debug()
#c = a.copy()
#print c.debug()
#print c.formatted()
#print a == b
#print a is b
#print a == c
#print a is c
#for k in a.breadth() :
# print k.value
#for k in a :
# print k.value
#for k in a.postorder() :
# print k.value
#
#A = Tree ('a', ('aa', ('aaa', 'aab', 'aac'), 'ab', 'ac', ('aca', 'acb')), 'b', ('ba', 'bb'), 'c', ('ca', ('caa', 'cab', 'cac'), 'cb', ('cba', 'cbb'), 'cc', ('cca', 'ccb', 'ccc')))
#print id(A)
#for k in A :
# print k.value
#for k in A.preorder() :
# print k.value
#for k in A.postorder() :
# print k.value
#for k in A.breadth() :
# print k.value
#print b in a
#print c in a
#print a.child(0) in a
#print c.child(0) in a
#print c.child(0).value in a
#for k in A :
# parentValues = [j.value for j in k.parents()]
# root = k.root()
# if root :
# rootValue = root.value
# else :
# rootValue = None
# print "%s: %s, %s" % (k.value, rootValue, parentValues)
#
#
#temp = Tree ('a', ('aa', 'ab'), 'b', ('ba', 'bb'))
#suba = temp['aa']
#print suba
#print suba.root()
#print temp
#print id(temp)
#print suba.root().parent
#print id(suba.root().parent)
##print a[a.child(0)]
##print a
##l = a['a']
##print l
##print a[('a', 'aa')]
#del (temp)
## print a
#print suba
#print suba.root()
#print suba.root().parent
#print id(suba.root().parent)
#d = Tree ('a', ('aa', 'ab'), 'b', ('aa', 'ab'))
#def getAsList(tree, value):
# msg = ""
# try :
# tree[value]
# print "Found exactly one match"
# except :
# msg = "Not exactly one match"
# f = tree.get(value, [])
# if msg :
# print msg+": %i found" % len(f)
# for k in f:
# print k, k.parent
# return f
#getAsList(d, 'aa')
#getAsList(d,('b', 'ab'))
#getAsList(d,'xyz')
#getAsList(d,(None, 'aa'))
#getAsList(d,(None, d.child(0).child(0)))
#getAsList(d,(None, 'a', 'aa'))
#getAsList(d,('a', 'aa'))
#A = Tree ('a', ('aa', ('aaa', 'aab', 'aac'), 'ab', 'ac', ('aca', 'acb')), 'b', ('ba', 'bb'), 'c', ('ca', ('caa', 'cab', 'cac'), 'cb', ('cba', 'cbb'), 'cc', ('cca', 'ccb', 'ccc')))
#print list(A.path('aca'))
#for k in A.path('aca') :
# print k.value
#for k in A['aca'].path(A) :
# if k.value :
# print k.value
#
#def getParent(c) :
# res = cmds.listRelatives(c, parent=True)
# if res :
# return res[0]
#
#def isExactChildFn(c, p) :
# """ a function to check if c is a direct child of p """
# if (c is not None) and (p is not None) :
# #print "checking if "+c+" is child of "+p
# prt = getParent(c)
# if prt is not None and p is not None :
# return prt == p
# elif prt is None and p is None :
# return True
# else :
# return False
# else :
# return False
#
#def asOldHierarchy (*args) :
# """returns a Tree containing the PyMel objects representing Maya nodes that were passed
# as argument, or the current seleciton if no arguments are provided,
# in a way that mimics the Maya scene hierarchy existing on these nodes.
# Note that:
# >>> cmds.file ("~/pymel/examples/skel.ma", f=True, typ="mayaAscii",o=True)
# >>> File read in 0 seconds.
# >>> u'~/pymel/examples/skel.ma'
# >>> select ('FBX_Hips', replace=True, hierarchy=True)
# >>> sel=ls(selection=True)
# >>> skel=asHierarchy (sel)
# >>> skel.find('FBX_Head')
# >>> Tree(Joint('FBX_Head'), Tree(Joint('FBX_LeftEye')), Tree(Joint('FBX_RightEye')))
# >>> skel.parent('FBX_Head')
# >>> Joint('FBX_Neck1')
# >>> util.expandArgs( skel ) == tuple(sel) and sel == [k for k in skel]
# >>> True """
#
# if len(args) == 0 :
# nargs = cmds.ls( selection=True)
# else :
# args = util.expandArgs (*args)
# # nargs = map(PyNode, args)
# nargs = args
# # print "Arguments: %s"+str(nargs)
# result = oldTreeFromChildLink (isExactChildFn, *nargs)
# # print "Result: %s"+str(result)
# return result
#
#def asHierarchy (*args) :
# """returns a Tree containing the PyMel objects representing Maya nodes that were passed
# as argument, or the current seleciton if no arguments are provided,
# in a way that mimics the Maya scene hierarchy existing on these nodes.
# Note that:
# >>> cmds.file ("~/pymel/examples/skel.ma", f=True, typ="mayaAscii",o=True)
# >>> File read in 0 seconds.
# >>> u'~/pymel/examples/skel.ma'
# >>> select ('FBX_Hips', replace=True, hierarchy=True)
# >>> sel=ls(selection=True)
# >>> skel=asHierarchy (sel)
# >>> skel.find('FBX_Head')
# >>> Tree(Joint('FBX_Head'), Tree(Joint('FBX_LeftEye')), Tree(Joint('FBX_RightEye')))
# >>> skel.parent('FBX_Head')
# >>> Joint('FBX_Neck1')
# >>> util.expandArgs( skel ) == tuple(sel) and sel == [k for k in skel]
# >>> True """
#
# if len(args) == 0 :
# nargs = cmds.ls( selection=True)
# else :
# args = util.expandArgs (*args)
# # nargs = map(PyNode, args)
# nargs = args
# # print "Arguments: %s"+str(nargs)
# result = treeFromChildLink (isExactChildFn, *nargs)
# # print "Result: %s"+str(result)
# return result
#
#def asIndexedHierarchy (*args) :
# """returns a Tree containing the PyMel objects representing Maya nodes that were passed
# as argument, or the current seleciton if no arguments are provided,
# in a way that mimics the Maya scene hierarchy existing on these nodes.
# Note that:
# >>> cmds.file ("~/pymel/examples/skel.ma", f=True, typ="mayaAscii",o=True)
# >>> File read in 0 seconds.
# >>> u'~/pymel/examples/skel.ma'
# >>> select ('FBX_Hips', replace=True, hierarchy=True)
# >>> sel=ls(selection=True)
# >>> skel=asHierarchy (sel)
# >>> skel.find('FBX_Head')
# >>> Tree(Joint('FBX_Head'), Tree(Joint('FBX_LeftEye')), Tree(Joint('FBX_RightEye')))
# >>> skel.parent('FBX_Head')
# >>> Joint('FBX_Neck1')
# >>> util.expandArgs( skel ) == tuple(sel) and sel == [k for k in skel]
# >>> True """
#
# if len(args) == 0 :
# nargs = cmds.ls( selection=True)
# else :
# args = util.expandArgs (*args)
# # nargs = map(PyNode, args)
# nargs = args
# # print "Arguments: %s"+str(nargs)
# result = indexedTreeFromChildLink (isExactChildFn, *nargs)
# # print "Result: %s"+str(result)
# return result
#
#def asNetworkXHierarchy (*args) :
# """returns a Tree containing the PyMel objects representing Maya nodes that were passed
# as argument, or the current seleciton if no arguments are provided,
# in a way that mimics the Maya scene hierarchy existing on these nodes.
# Note that:
# >>> cmds.file ("~/pymel/examples/skel.ma", f=True, typ="mayaAscii",o=True)
# >>> File read in 0 seconds.
# >>> u'~/pymel/examples/skel.ma'
# >>> select ('FBX_Hips', replace=True, hierarchy=True)
# >>> sel=ls(selection=True)
# >>> skel=asHierarchy (sel)
# >>> skel.find('FBX_Head')
# >>> Tree(Joint('FBX_Head'), Tree(Joint('FBX_LeftEye')), Tree(Joint('FBX_RightEye')))
# >>> skel.parent('FBX_Head')
# >>> Joint('FBX_Neck1')
# >>> util.expandArgs( skel ) == tuple(sel) and sel == [k for k in skel]
# >>> True """
#
# if len(args) == 0 :
# nargs = cmds.ls( selection=True)
# else :
# args = util.expandArgs (*args)
# # nargs = map(PyNode, args)
# nargs = args
# # print "Arguments: "+str(nargs)
# result = networkXTreeFromChildLink (isExactChildFn, *nargs)
# # print "Result: "+str(result)
# return result
#
#
#
#def networkXTreeFromChildLink (isExactChildFn, *args):
# """
# This function will build a tree from the provided sequence and a comparison function in the form:
# cmp(a,b): returns True if a is a direct child of b, False else
# >>> lst = ['aab', 'aba', 'aa', 'bbb', 'ba', 'a', 'b', 'bb', 'ab', 'bab', 'bba']
# >>> def isChild(s1, s2) :
# >>> return s1.startswith(s2) and len(s1)==len(s2)+1
# >>> forest = treeFromChildLink (isChild, lst)
# >>> for tree in forest :
# >>> print tree
# A child cannot have more than one parent, if the isChild is ambiguous an exception will be raised
# >>> def isChild(s1, s2) :
# >>> return s1.startswith(s2)
# >>> forest = treeFromChildLink (isChild, lst)
# """
# deq = deque()
# for arg in args :
# t = nt.Tree()
# t.add_node(arg)
# t.root = arg
# deq.append(t)
# lst = []
# it = 0
# while deq:
# it+=1
# # print "iteration %i" % it
# c = deq.popleft()
# r = c.root
# hasParent = False
# fulllist = list(deq)+lst
# sd = len(deq)
# nextlist = []
# for p in fulllist :
# plist = []
# for n in p.nodes_iter() :
# # print "Is %s child of %s?" % (r, n)
# if isExactChildFn(r, n) :
# plist.append(n)
# # print "%s is child of %s!" % (r, n)
# for pr in plist :
# if not hasParent :
# # print "graft %s on %s, under %s" % (r, p.root, pr)
# np = p.union_sub(c, v_from=p.root, v_to=c.root)
# np.root = p.root
# p = np
# hasParent = True
# else :
# # should only be one parent, break on first encountered
# raise ValueError, "A child in Tree cannot have multiple parents, check the provided isChild(c, p) function: '%s'" % isExactChildFn.__name__
# nextlist.append(p)
# deq = deque(nextlist[:sd])
# lst = nextlist[sd:]
# # If it's a root we move it to final list
# if not hasParent :
# # print "%s has no parent, it goes to the list as root" % str(c.root)
# lst.append(c)
#
# # print "final list %s" % str(lst)
# if len(lst) == 1 :
# return lst[0]
# else :
# return tuple(lst)
setupUnittestModule(__name__)
```
#### File: pymel/tests/test_util_common.py
```python
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# used to have tests for util.isClassRunningStack, but it turned out
# inspect.stack() could cause crashes in some cases...
import unittest
from pymel.util.scanf import (sscanf, fscanf, CharacterBufferFromIterable,
makeCharBuffer, handleWhitespace, handleChar,
handleDecimalInt, handleString, CappedBuffer,
IncompleteCaptureError, FormatError, compile)
class ScanfTests(unittest.TestCase):
def bufferFromString(self, s):
return CharacterBufferFromIterable(s)
def testBufferFromString(self):
b = self.bufferFromString("hello")
for letter in list('hello'):
self.assertEqual(letter, b.getch())
self.assertEqual('', b.getch())
def testCharacterSetScanning(self):
b = makeCharBuffer("+++-+++++1234")
self.assertEqual("+++", b.scanCharacterSet(set("+")))
self.assertEqual("", b.scanCharacterSet(set("+")))
self.assertEqual("-", b.scanCharacterSet(set("-")))
self.assertEqual("+", b.scanCharacterSet(set("+"), 1))
def testPredicateScanning(self):
b = makeCharBuffer("+++-+++++1234")
self.assertEqual("+++", b.scanPredicate(lambda ch: ch == '+'))
def testUngetch(self):
b = self.bufferFromString("ong")
b.ungetch('y')
self.assertEqual('y', b.getch())
self.assertEqual('o', b.getch())
b.ungetch('u')
self.assertEqual('u', b.getch())
self.assertEqual('n', b.getch())
self.assertEqual('g', b.getch())
self.assertEqual('', b.getch())
def testRepeatedGetchOnEmptyStreamIsOk(self):
b = self.bufferFromString("")
self.assertEqual('', b.getch())
self.assertEqual('', b.getch())
def testCappedBuffer(self):
b = CappedBuffer(self.bufferFromString("supercalifragilisticexpialidocious"), 5)
self.assertEqual("s", b.getch())
self.assertEqual("u", b.getch())
self.assertEqual("p", b.getch())
self.assertEqual("e", b.getch())
self.assertEqual("r", b.getch())
self.assertEqual('', b.getch())
self.assertEqual('', b.getch())
b.ungetch('r')
self.assertEqual("r", b.getch())
self.assertEqual('', b.getch())
def testWhitespaceScanning(self):
b = self.bufferFromString(" 42\n43")
self.assertEqual(" ", handleWhitespace(b))
self.assertEqual("", handleWhitespace(b))
self.assertEqual("4", b.getch())
def testDecimalDigitScanning(self):
b = self.bufferFromString("42 43!44")
self.assertEqual(42, handleDecimalInt(b))
self.assertEqual(" ", handleWhitespace(b))
self.assertEqual(43, handleDecimalInt(b))
b2 = self.bufferFromString("-1-2+3-4")
self.assertEqual(-1, handleDecimalInt(b2))
self.assertEqual(-2, handleDecimalInt(b2))
self.assertEqual(3, handleDecimalInt(b2))
self.assertEqual(-4, handleDecimalInt(b2))
self.assertRaises(FormatError, handleDecimalInt, b2)
def testCharacter(self):
b = self.bufferFromString("hi!")
self.assertEqual("h", handleChar(b))
self.assertEqual("i", handleChar(b))
self.assertEqual("!", handleChar(b))
self.assertRaises(FormatError, handleChar, b)
def testString(self):
b = self.bufferFromString("-42 + 1 equals -41")
self.assertEqual("-42", handleString(b))
handleWhitespace(b)
self.assertEqual("+", handleString(b))
handleWhitespace(b)
self.assertEqual("1", handleString(b))
handleWhitespace(b)
self.assertEqual("equals", handleString(b))
handleWhitespace(b)
self.assertEqual("-41", handleString(b))
def testIntegerScanning(self):
self.assertEqual((42, 43),
sscanf(" 42\n 43 ", "%d %d"))
self.assertEqual((8,), sscanf("10", "%o"))
self.assertEqual((8,), sscanf("010", "%o"))
self.assertEqual((15,), sscanf("F", "%x"))
self.assertEqual((15,), sscanf("f", "%x"))
self.assertEqual((15,), sscanf("0xF", "%x"))
self.assertEqual((15,), sscanf("0XF", "%x"))
self.assertEqual((15,), sscanf("0Xf", "%x"))
self.assertEqual((-1, -2, 3, -4), sscanf("-1-2+3-4", "%d%d%d%d"))
def testWordScanning(self):
self.assertEqual(("hello", "world"),
sscanf(" hello world", "%s %s"))
def testSuppression(self):
self.assertEqual((), sscanf(" hello world", "%*s %*s"))
self.assertEqual(("happy",),
sscanf("hello happy world", "%*s %s %*s"))
self.assertEqual((), sscanf("h", "%*c"))
def testWidth(self):
self.assertEqual(("00010",), sscanf("00010101010111", "%5c"))
self.assertEqual(("xy",), sscanf("xyz", "%2s"))
self.assertEqual(("xy",), sscanf(" xyz", "%2s"))
self.assertEqual((" ",), sscanf(" xyz", "%2c"))
def testFscanf(self):
import io
b = io.StringIO(u"hello world")
self.assertEqual(("hello", " ", "world"), fscanf(b, "%s%c%s"))
# Check that calling fscanf() twice doesn't
# drop the last character
b2 = io.StringIO(u"hello world")
self.assertEqual(("hello",), fscanf(b2, "%s"))
self.assertEqual((" ",), fscanf(b2, "%c"))
self.assertEqual(("world",), fscanf(b2, "%s"))
def testSkipLeadingSpaceOnScanning(self):
"""<NAME> reported a bug where floats weren't being
parsed properly if there were leading whitespace for %f.
This case checks that"""
self.assertEqual((42.0,),
sscanf(" 42.0", "%f"))
def testFloats(self):
self.assertEqual((3.14,
10.,
.001,
1e100,
3.14e-10,
0e0,), sscanf("""3.14
10.
.001
1e100
3.14e-10
0e0""", "%f %f %f %f %f %f"))
def testMoreSimpleScanningExamples(self):
self.assertEqual((192, 168, 1, 1),
sscanf("192.168.1.1", "%d.%d.%d.%d"))
self.assertEqual(("a", "b", "c"),
sscanf(" ab c ", "%1s%1s%s"))
self.assertEqual(("hello", " ", "world"),
sscanf("hello world", "%s%c%s"))
self.assertRaises(IncompleteCaptureError,
sscanf, "192.168.1.1", "%d %d %d %d")
self.assertEqual(("danny",),
sscanf("hi danny", "hi %s"))
self.assertEqual(("danny",),
sscanf(" hi danny", " hi %s"))
self.assertEqual(("a", "b", 3),
sscanf("ab3", "%c%c%d"))
# this case is weird, but it happens in C too!
self.assertRaises(IncompleteCaptureError,
sscanf, " hi danny", "hi %s")
# The example that's used in
# 'http://docs.python.org/lib/node109.html'
self.assertEqual(("/usr/bin/sendmail", 0, 4),
sscanf("/usr/bin/sendmail - 0 errors, 4 warnings",
"%s - %d errors, %d warnings"))
def testErroneousFormats(self):
self.assertRaises(FormatError, compile, "%")
self.assertRaises(FormatError, compile, "% ")
self.assertRaises(FormatError, compile, "%*")
self.assertRaises(FormatError, compile, "%*z")
self.assertRaises(FormatError, compile, "% d")
self.assertRaises(FormatError, compile, "%* d")
```
|
{
"source": "jfly/pyright",
"score": 2
}
|
#### File: tests/samples/paramSpec20.py
```python
from typing import Callable, Concatenate, Generic, ParamSpec, TypeVar
T = TypeVar("T")
P1 = ParamSpec("P1")
P2 = ParamSpec("P2")
class X(Generic[T, P1]):
f: Callable[P1, int]
x: T
def x1(x: X[int, P2]) -> str:
...
def x2(x: X[int, Concatenate[int, P2]]) -> str:
...
def X3(x: X[int, [int, bool]]) -> str:
...
def x4(x: X[int, ...]) -> str:
...
# This should generate an error because "int" can't be bound to a ParamSpec.
def x5(x: X[int, int]) -> str:
...
# This should generate an error.
def x6(x: X[..., ...]) -> str:
...
# This should generate an error.
def x7(x: X[[int], [int, int]]) -> str:
...
class Z(Generic[P1]):
f: Callable[P1, int]
def z1(x: Z[[int, str, bool]]) -> str:
...
def z2(x: Z[int, str, bool]) -> str:
...
# This should generate an error.
def z3(x: Z[[int, [str], bool]]) -> str:
...
# This should generate an error.
def z4(x: Z[[[int, str, bool]]]) -> str:
...
# This should generate an error.
def z5(x: Z[[...]]) -> str:
...
```
|
{
"source": "jfly/python-opencage-geocoder",
"score": 3
}
|
#### File: python-opencage-geocoder/test/test_async.py
```python
import pytest
from opencage.geocoder import ForbiddenError, OpenCageGeocode, AioHttpError
# NOTE: Testing keys https://opencagedata.com/api#testingkeys
async def test_success():
async with OpenCageGeocode('<KEY>') as geocoder:
results = await geocoder.geocode_async("EC1M 5RF")
assert any(
abs(result['geometry']['lat'] - 51.952659 < 0.05 and
abs(result['geometry']['lng'] - 7.632473) < 0.05)
for result in results
)
async def test_failure():
async with OpenCageGeocode('6c79ee8e1ca44ad58ad1fc493ba9542f') as geocoder:
with pytest.raises(ForbiddenError) as excinfo:
await geocoder.geocode_async("Atlantis")
assert str(excinfo.value) == 'Your API key has been blocked or suspended.'
async def test_without_async_session():
geocoder = OpenCageGeocode('4372eff77b8343cebfc843eb4da4ddc4')
with pytest.raises(AioHttpError) as excinfo:
await geocoder.geocode_async("Atlantis")
assert str(excinfo.value) == 'Async methods must be used inside an async context.'
async def test_using_non_async_method():
async with OpenCageGeocode('6d0e711d72d74daeb2b0bfd2a5cdfdba') as geocoder:
with pytest.raises(AioHttpError) as excinfo:
await geocoder.geocode("Atlantis")
assert str(excinfo.value) == 'Cannot use `geocode` in an async context, use `gecode_async`.'
```
|
{
"source": "jfmaes/transformationsuite",
"score": 3
}
|
#### File: jfmaes/transformationsuite/main.py
```python
import argparse
from transformer import Transformer
from format import Formatter
from Crypto.Hash import MD5
parser = argparse.ArgumentParser(description="Transformer next generation by jfmaes")
#DONT FORGET TO PUT REQUIRED TRUE
parser.add_argument("-f", "--file", help="the payload file", required=True)
parser.add_argument("-x", "--xor", help="use xor encryption", action="store_true")
parser.add_argument("-key", help="the xor key")
parser.add_argument("-c", "--caesar", help="use caesar cipher", action="store_true")
parser.add_argument("-rotation", help="the rotation to follow, can be + or - ")
parser.add_argument("-b64","-base64","--base64", help= "base 64 encode payload", action="store_true")
parser.add_argument("-rev","--reverse", help= "reverse payload", action="store_true")
parser.add_argument("-o", "--output-file", help="the output file")
parser.add_argument("-vba", help="format to vba", action="store_true")
parser.add_argument("-csharp", help="format to csharp", action="store_true")
parser.add_argument("-cpp", help="format to cpp", action="store_true")
parser.add_argument("-raw", help="format to raw payload", action="store_true")
parser.add_argument("-v", "--verbose", help="print shellcode to terminal", action="store_true")
parser.add_argument("--no-transform", help="doesnt transform payload, just formats.", action="store_true")
def check_args(args):
if args.xor and not args.key:
print(f"[!] XOR encryption needs a key")
quit()
if args.caesar and not args.rotation:
print(f"[!] Caesar encryption needs a rotation")
quit()
if not args.verbose and not args.output_file:
print(f"[!] Your payload needs to go somewhere. Use either verbose or outfile params, or both.")
quit()
def get_shellcode_from_file(inFile):
try:
with open(inFile, "rb") as shellcodeFileHandle:
shellcodeBytes = bytearray(shellcodeFileHandle.read())
shellcodeFileHandle.close()
print (f"[*] Payload file [{inFile}] successfully loaded")
except IOError:
print(f"[!] Could not open or read file [{inFile}]")
quit()
print("[*] MD5 hash of the initial payload: [{}]".format(MD5.new(shellcodeBytes).hexdigest()))
print("[*] Payload size: [{}] bytes".format(len(shellcodeBytes)))
return shellcodeBytes
def main(args):
transformer = Transformer()
formatter = Formatter()
data = get_shellcode_from_file(args.file)
transform_blob = transformer.transform(args, data)
if not args.no_transform:
formatter.format(args, transform_blob)
if args.no_transform:
formatter.format(args, data)
if __name__ == '__main__':
args = parser.parse_args()
check_args(args)
main(args)
```
#### File: jfmaes/transformationsuite/transformer.py
```python
import base64
import random
class Transformer:
def __init__(self):
pass
def transform(self, args, data):
if args.xor:
return self.xor(data, args.key)
elif args.caesar:
return self.caesar(data, str(args.rotation))
elif args.reverse:
return self.reverse(data)
elif args.base64:
return self.base64(data)
def caesar(self, payload, rotation):
caesarstuff = bytearray(len(payload))
length = len(payload)
positiverotation = False
if "-" not in rotation:
positiverotation = True
rotation = rotation.strip("+")
else:
rotation = rotation.strip("-")
for i in range(length):
if positiverotation:
caesarstuff[i] = ((payload[i] + int(rotation)) & 0xFF)
else:
caesarstuff[i] = ((payload[i] - int(rotation)) & 0xFF)
print("[+] Successfully ciphered the payload!")
return caesarstuff
def xor(self,payload,key):
xorStuff = bytearray(len(payload))
keyEncoded = bytearray(str(key).encode('ascii'))
length = len(payload)
for i in range(length):
xorStuff[i] = payload[i] ^ keyEncoded[i % len(keyEncoded)]
print("[+] Successfully XOR'd the payload!")
return xorStuff
def reverse(self,payload):
data = payload[::-1]
return data
def base64(self,payload):
return base64.b64encode(payload)
#https://en.wikipedia.org/wiki/List_of_file_signatures
def prepend_magic_bytes(self,args,payload):
allowed_formats = {"ico":b"\x00\x00\x01\x00",
"gif":b"\x47\x49\x46\x38\x37\x61",
"jpeg":b"\xFF\xD8\xFF\xDB",
"jpg":b"\xFF\xD8\xFF\xEE",
"zip":b"\x50\x4B\x03\x04",
"rar":b"\x52\x61\x72\x21\x1A\x07\x01\x00",
"png":b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A",
"mp3":b"\xFF\xF3",
"iso":b"\x43\x44\x30\x30\x31",
"7z":b"\x37\x7A\xBC\xAF\x27\x1C"}
magic = b''
if args not in allowed_args:
raise ValueError
elif args.lower() == "random":
key = random.choice(allowed_formats.keys())
print("random format chosen was {0), magic bytes are {1}",key,allowed_formats[key])
magic = allowed_formats[key]
else:
magic = allowed_formats[args]
```
|
{
"source": "jfmalloy1/BlackCanyonUltra",
"score": 3
}
|
#### File: jfmalloy1/BlackCanyonUltra/BC_analysis.py
```python
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
import os
import time
import pandas as pd
import pickle
def main():
urls = [("https://ultrasignup.com/results_event.aspx?did=77199", 2021),
("https://ultrasignup.com/results_event.aspx?did=67039", 2020),
("https://ultrasignup.com/results_event.aspx?did=57827", 2019),
("https://ultrasignup.com/results_event.aspx?did=48278", 2018),
("https://ultrasignup.com/results_event.aspx?did=38965", 2017),
("https://ultrasignup.com/results_event.aspx?did=34087", 2016),
("https://ultrasignup.com/results_event.aspx?did=29244", 2015),
("https://ultrasignup.com/results_event.aspx?did=24355", 2014)]
#NOTE: Chromedriver is in /Lab/CitationNetworks
#driver = webdriver.Chrome("../../chromedriver", options=options)
#NOTE: driver setup from: https://stackoverflow.com/questions/60296873/sessionnotcreatedexception-message-session-not-created-this-version-of-chrome
driver = webdriver.Chrome(ChromeDriverManager().install())
for url, year in urls:
driver.get(url)
#Results from: https://medium.com/@elizabeth.guy86/gender-differences-in-ultra-running-f0880047b9ed
sel = "gbox_list"
results = driver.find_element_by_id(sel)
rows = results.text.split('\n')
runner_rows = [row.split() for row in rows]
cols = [c[0] for c in runner_rows[0:9]]
cols.insert(0, "Description")
cols.insert(5, "State")
# #10th element is number of finishers"['Finishers', '-', '363']"
#
content = runner_rows[10:]
content = [c[-6:] for c in content]
df = pd.DataFrame(content, columns = cols[-6:])
print(year)
print(df.head())
print()
pickle.dump(df, open("BC" + str(year) + "_100k.p", "wb"))
if __name__ == "__main__":
main()
```
|
{
"source": "jfmalloy1/ChemAsLanguage",
"score": 2
}
|
#### File: ChemAsLanguage/Fragments/common_fragments.py
```python
import sys
from rdkit import Chem
from itertools import combinations
from random import random, shuffle
from collections import defaultdict
from rdkit.Chem import MCS
import operator
import datetime
#from timeout import timeout
from math import factorial
""" Adopted from Cadaddeu 2014 """
def binomial(n,m):
return factorial(n) / (factorial(m) * factorial(n-m))
def time(): return datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
def findmcs(p,q):
#@timeout(2)
def fmcstimeout(p,q):
return MCS.FindMCS([p,q]).smarts
try:
return fmcstimeout(p,q)
except:
print("MCS of", p, "and", q, "timed out.")
pass
def fragments(mols):
s=[]
count = float(binomial(len(mols),2))
i = 0
percent = -1
for p,q in combinations(mols,2):
try:
s.append(findmcs(p,q))
except:
pass
p = int(i/count * 100)
if p > percent:
percent = p
print(percent, time())
i += 1
return set(s)
def loadSmarts(fn):
with open(fn,'r') as smartfile:
return set(smart.strip() for smart in smartfile)
def sameMolecule(a,b):
def same_or_timeout(a,b):
if a[1] == b[1] : return True #string compare
if a[0].GetNumAtoms() != b[0].GetNumAtoms() : return False
if a[0].GetNumBonds() != b[0].GetNumBonds() : return False
return a[0].HasSubstructMatch(b[0]) and b[0].HasSubstructMatch(a[0])
try :
res = same_or_timeout(a,b)
except:
res = False
return res
def UniqSmarts(frags):
result = set()
frags = set((g,t) for (g,t) in frags if g != None) # was successfully converted to smarts
while frags:
f,s = frags.pop()
result.add((f,s))
frags = set((g,t) for (g,t) in frags
if not (sameMolecule((f,s),(g,t)))) # check if they are the same or not. check also on the string to speed up
return result
if __name__ == "__main__":
#Input file - list of random molecules in SMILES format
print("Getting random molecules from", sys.argv[1])
with open(sys.argv[1],'r') as smiles:
mols = [Chem.MolFromSmiles(smi.strip()) for smi in smiles]
mols = [m for m in mols if m != None]
shuffle(mols)
print("Retieved",len(mols),"random molecules")
print("Aquiring random molecule fragments (and combining with molecules from", sys.argv[2])
#Input file 2 - list of molecules in SMART format (still not sure what this adds...)
frags = []
#Fragments - determines different fragments within the molecules. Why are only the first 20 random molecules used?
for s in (fragments(mols[:100]) | loadSmarts(sys.argv[2])):
try:
frags.append((Chem.MolFromSmarts(s),s))
except:
print("AAAGGGHHH",s,"failed")
print("uniquifying")
#Make sure they are unique
frags=UniqSmarts(frags)
print("Found", len(frags), "many fragments")
h = defaultdict(int)
percent = -1
i = 0
count = float(len(mols))
print("Constructing histogram of fragments")
for m in mols:
for (f,s) in frags:
if m.HasSubstructMatch(f):
h[s] += 1
p = int(i / count * 100)
if p > percent:
percent = p
print(percent, time())
i += 1
print("Writing out histogram")
#Output file
with open(sys.argv[3],'w') as out:
for k,v in sorted(h.items(), key=operator.itemgetter(1)):
print(k, file=out)
```
#### File: ChemAsLanguage/Fragments/distributions.py
```python
from rdkit import Chem
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
import os
import numpy as np
from scipy import stats
import pickle
import random
from multiprocessing import Pool
import time
""" Create a list of mol representations (from rdkit) from a list of smarts strings
Note: already unique (common_fragments.py takes care of removing duplicates)
Input: a list of smarts representations
Output: list of sets: (mol representations, smarts string)
"""
def mol_from_smarts(smarts):
frags = []
for s in smarts:
try:
frags.append((Chem.MolFromSmarts(s),s))
except:
pass
return frags
""" Creates a histogram of occurances of fragments within the overall cpd distribution
Input: mols - mol representations of overall distribution
frags - fragments found within KEGG
Output: dictionary containing number of time a fragment appears in the overall distribution {smarts:occurances}
"""
def mol_count(mols, frags):
print(frags)
h = {}
for (f,s) in tqdm(frags):
h[s] = 0
for m in mols:
if m.HasSubstructMatch(f):
h[s] += 1
return h
""" Parallel version of mol_count
Inputs: list of mol representations of overall distribution, individual fragment to be counted
Output: list of smarts string & count (will be converted to dictionary later)
"""
def mol_count_parallel(mols, frag):
try:
f = Chem.MolFromSmarts(frag)
except:
return [frag, 0]
count = 0
#f,s = frag
for m in mols:
if m.HasSubstructMatch(f):
count += 1
return [frag, count]
""" Find the base fragments - those that appear in all splits
Input: list of lists of fragments. Overarching list - all splits, each sublist - fragments within that split
Output: fragments which are common to all fragment splits
"""
def base_frags(frags):
# #Initial test: how many fragments do split 0 & 1 have?
# set0 = set(frags[0])
# print(frags[0])
# set1 = set(frags[1])
# print("\n\n\n")
# print(frags[1])
# base_frags = set0.intersection(set1)
# print(base_frags)
# print(len(base_frags))
#Initial step - base fragments are the initial split
base_frags = frags[0]
#Total number of fragments - starting with the initial fragment set
total_frags = frags[0]
#Statistics over number of frags
frag_stats = [len(frags[0])]
#Find the intersection of all further splits
for i in range(1, len(frags)):
print("Length of split", i, "is:", len(frags[i]))
frag_stats.append(len(frags[i]))
base_frags = set(frags[i]).intersection(set(base_frags))
total_frags = list(set(total_frags + frags[i]))
print("Number of base fragments:", len(base_frags))
# print(base_frags)
#Goal - find total number of different fragments
print("Number of total fragments:", len(total_frags))
print("Fragment mean:", np.mean(frag_stats))
print("Fragment std:", np.std(frag_stats))
""" Graphs basic disributions
Input: h - a dictionary of smarts strings and the number of occurances within KEGG, i: iteration of particular dictionary (e.g., 1-10)
Output: pretty graphs :)
"""
def distribution_graph(h, i):
#Calculate AUC to distinguish splits
yvals = list(sorted(h.values(), reverse=True))
xvals = np.linspace(0, 1, num=len(yvals))
#area = np.trapz(yvals, dx=xvals[1])
plt.plot(xvals, yvals, label = "100 Random KEGG Compounds", color="darkgreen", linewidth=3)#"Split " + str(i) + " AUC=" + str(round(area, 2))) #Note: AUC label
plt.yscale("log")
plt.xscale("log")
plt.xlabel("Rank-ordered Compounds")
plt.ylabel("Occurances in KEGG")
plt.legend
plt.show()
""" Convert a pickled distribution to csv format
Input: File path to a pickled dictionary
Output: csv file (same location) of data
"""
def convert_dist_toCSV(fp):
# for label in ["5000cpds"]:#["1000cpds", "2000cpds", "3000cpds", "4000cpds"]:#, "5000cpds"]:
# print(label)
sample_frags = pickle.load(open(fp, "rb"))
df = pd.DataFrame.from_dict(sample_frags, orient="index", columns=["Occurances"])
df["Fragments"] = df.index
df = df.reset_index(drop=True)
df.to_csv(fp[:-2] + ".csv")
def main():
# # ## Read in mol objects of KEGG ##
# with open("Biology/Data/kegg_smiles.txt",'r') as smiles:
# mols = [Chem.MolFromSmiles(smi.strip()) for smi in smiles]
# mols = [m for m in mols if m != None]
## Read in mol objects from KEGG sampling
for fp in os.listdir("Technology/Data/"):
mols = pickle.load(open("Technology/Data/" + fp, "rb"))
# Parallel Occurances calculations
pool = Pool(processes=6)
# dirpath = "Biology/Data/Tests/Timeout/"
# for file in os.listdir(dirpath): #For reading in all fragments
# if file.endswith("_unique.p"): #ensure only unique fragment sets are counted
start = time.time()
# fp = dirpath + file
print("Analyzing:", fp)
frags = pickle.load(open("Technology/Data/" + fp, "rb")) #Load in fragments
print("Analyzing", len(frags), "fragments")
frag_occurances = []
frag_occurances = pool.starmap(mol_count_parallel, tqdm(zip([mols]*len(frags), frags), total=len(frags))) #Parallel occurances calculations
h = {f[0]: f[1] for f in frag_occurances} #Convert list of lists into dictionary
pickle.dump(h, open("Technology/Data/" + fp[:-2] + "unique.p", "wb")) #Save dictionary to new pickle file
print("Time:", time.time() - start)
print()
# with open("Tests/Hundred_cpds/dup_100cpds_0.txt") as f:
# frags.append([line.rstrip('\n') for line in f])
#
# # ## Find repeatability ##
# # base_frags(frags)
# ## Find distribution of a fragment sample over full database ##
# for label in ["1000cpds", "2000cpds", "3000cpds", "4000cpds", "5000cpds"]:
# print(label)
# frags = pickle.load(open("Biology/Data/KEGG_fragments_full.p", "rb"))
# h = mol_count(mols, frags)
#
# pickle.dump(h, open("Biology/Data/KEGG_fragments_full_occurances.p", "wb"))
# for f in os.listdir("Biology/Data/Tests/Timeout/"):
# if f.endswith("_occurances.p"):
convert_dist_toCSV("Technology/Data/" + fp[:-2] + "unique.p")
# ## Find pre-made distribution over random molecule set ##
# h = pd.read_csv("Tests/Hundred_cpds_random_subsampleOccurances/dup_0_occurances.csv", header=None, skiprows=1, index_col=0, squeeze=True).to_dict()
# print(h)
# distribution_graph(h, 0)
#
# plt.show()
if __name__ == "__main__":
main()
```
#### File: ChemAsLanguage/Word2Vect/build_KEGG_gensim.py
```python
import pandas as pd
from rdkit import Chem
import numpy as np
import re
import itertools
import random
import os
from gensim.models import Word2Vec
from gensim.test.utils import get_tmpfile
from gensim.models import KeyedVectors
""" Return a dataframe of all KEGG data """
""" Input: the filepath (fp) to a csv file containing canonical smiles strings """
""" Output: dataframe of KEGG data, including canoncial smiles strings """
def get_KEGG_smiles(fp):
df = pd.read_csv(fp)
return df
""" Return a list of all chemical fragments in SMARTS form """
""" Input: filepath (fp) to txt file """
""" Output: list of SMARTS fragments """
def get_chem_fragments(fp):
fragments = []
with open(fp, "r") as f:
for line in f:
fragments.append(line.strip())
return fragments
""" Return the mol representation of a given Smiles string
Input: Smiles string of a compound
Output: mol representation
"""
def get_mol_representation(smiles):
return Chem.MolFromSmiles(smiles)
""" Return an ordered list of all fragments present in the given compounds
Input: Dataframe containing smiles strings of compounds
Output: ordered list of chemical fragments
"""
def get_fragment_list(df, fragments):
#Master list of words
words = []
#Remove old line-sentence format
os.remove("frags_linesentence.txt")
#Loop through all molecules
for index, row in df.iterrows():
print(index)
#store fragments present in each molecule
mol_words = []
#Turn smiles into mol format
mol = get_mol_representation(row["Original SMILES"])
#find fragments present
for f in fragments:
try:
if mol.HasSubstructMatch(Chem.MolFromSmarts(f)):
mol_words.append(f)
except:
continue
#Randomize order of fragments (because what is order in a compound?)
random.shuffle(mol_words)
words.append(mol_words)
#Make a file of the fragements within each molecule - easier to train gensim model on
for w in mol_words:
print(w, end=" ", file=open("frags_linesentence.txt", "a"))
print(file=open("frags_linesentence.txt", "a"))
def main():
#Read in KEGG data
df = get_KEGG_smiles("kegg_data.csv")
#Read in chem fragments
fragments = get_chem_fragments("frags.txt")
### TEST on a sample of KEGG ###
ordered_frags = get_fragment_list(df, fragments)
print(ordered_frags)
# for frag in ordered_frags:
# print(frag)
# #Build Word2Vec model on ordered fragments
word2vec = Word2Vec(corpus_file = "frags_linesentence.txt", min_count=2)
## MODEL TESTING ##
v1 = word2vec.wv["[#6]-[#6]"]
print(v1)
sim_frags = word2vec.wv.most_similar("[#6]-[#6]")
print(sim_frags)
#Save trained model
word_vectors = word2vec.wv
#fname = get_tmpfile("vectors_01.kv")
word_vectors.save("vectors_fullKEGG.kv")
if __name__ == "__main__":
main()
```
#### File: Word2Vect/DEGREES/find_degrees.py
```python
import pandas as pd
from rdkit import Chem
import numpy as np
import json
from gensim.models import Word2Vec
from gensim.test.utils import get_tmpfile
from gensim.models import KeyedVectors
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import seaborn as sns
import networkx as nx
import re
""" Load trained Word2Vec model - Gensim on full KEGG
Input: None
Output: trained word2vec model
"""
def load_w2v():
return KeyedVectors.load("../vectors_fullKEGG.kv", mmap="r")
""" Return a list of all chemical fragments in SMARTS form """
""" Input: filepath (fp) to txt file """
""" Output: list of SMARTS fragments """
def get_chem_fragments(fp):
fragments = []
with open(fp, "r") as f:
for line in f:
fragments.append(line.strip())
return fragments
""" Find the fragments within a list of smiles strings
Input: SMILES string
Output: List of lists of fragments within smiles strings
"""
def find_frags_within_SMILES(cpd_list, smiles, frags):
cpd_frags = []
removed_cpds = []
i = 0
for smi in smiles:
#Turn AA into mol file
mol = Chem.MolFromSmiles(smi)
#Loop through all fragments to find occurances within AAs
individual_frags = []
for f in frags:
try:
#If a fragment is found in an AA, add it to the individual frags list
if mol.HasSubstructMatch(Chem.MolFromSmarts(f)):
individual_frags.append(f)
except:
removed_cpds.append(cpd_list[i])
pass
#Add each individual AA to AA frags - remove
cpd_frags.append(list(set(individual_frags)))
i += 1
return cpd_frags, [x for x in cpd_list if x not in removed_cpds]
""" Find all SMILES sequences for a random subset of KEGG
Input: kegg dataframe, number of samples to be collected
Output: a list of smiles strings from the random sample
"""
def find_random_SMILES(kegg_df, n_samples, cpds_to_ignore):
#Remove all compounds which are being classified
kegg_df = kegg_df[~kegg_df["MOL file"].isin(cpds_to_ignore)]
#Remove all empty SMILES values
kegg_df = kegg_df.dropna(subset=["Original SMILES"])
kegg_df = kegg_df[kegg_df["Original SMILES"] != ""]
#Randomly sample KEGG
sub_df = kegg_df.sample(n_samples)
#Return smiles strings
return sub_df["Original SMILES"].tolist(), sub_df["MOL file"].tolist()
""" Find & add all fragment vectors within a compound
Goal is to have a single vector for each compound
Inputs: trained word2vec model, list of lists of fragments within amino acids
Outputs: one vector (sum of all fragment vectors) per amino acid
"""
def add_frag_vectors(cpd_list, word2vec, frags):
vectors = []
removed_cpds = []
i = 0
#loop through amino acids
for cpd in frags:
vs = []
#Loop through fragments within each amino acid, add vectors to a list
for f in cpd:
try:
vs.append(word2vec[f])
except:
pass
#Only sum vectors if vectors were present in the compound
if vs:
vectors.append(np.sum(vs, axis=0).astype("float64"))
else:
removed_cpds.append(cpd_list[i])
#Ensure the correct compound gets removed
i+=1
return vectors, [x for x in cpd_list if x not in removed_cpds]
""" Run TSNE visualization
Input: dataframe of compoud vectors (df["label"] is the compound label)
Output: Visualization of the trained vectors
"""
def TSNE_visual(df, n_categories):
#find values to pass to TSNE
data_values = df[list(range(0,100))].values
tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300)
tsne_results = tsne.fit_transform(data_values)
df["tsne-2d-one"] = tsne_results[:,0]
df["tsne-2d-two"] = tsne_results[:,1]
pal = sns.color_palette("hls", n_categories)
plt.figure(figsize=(16,10))
sns.scatterplot(
x="tsne-2d-one", y="tsne-2d-two",
hue="label",
palette=sns.color_palette(palette=pal),
data=df,
legend="full"
)
plt.show()
""" Find all compounds associated with a particular class of compounds within KEGG
Input: dataframe of KEGG data, trained W2V model, fragment list, label of class to search for
Output: dataframe of vectors associated with a particular class
"""
def get_class_dataframe(kegg_df, word2vec, frags, class_label, cpd_classes):
#Find all compound IDs associated with a particular label
cpd_ids = [k for k,v in cpd_classes.items() if v == class_label]
cpd_smiles = kegg_df[kegg_df["MOL file"].isin(cpd_ids)]["Original SMILES"].tolist()
class_frags = find_frags_within_SMILES(cpd_smiles, frags)
vectors = add_frag_vectors(word2vec, class_frags)
class_df = pd.DataFrame(vectors)
class_df["label"] = [class_label] * len(class_df)
print("Number of", class_label, "compounds:", len(class_df))
return class_df
""" Builds a KEGG network, finds the central nodes, calculates distances between all nodes
Input: None (assumes newKEGG_reactionEdges.json exists within the current directory
Output: networkx graph of KEGG (unipartite, consisting only of compounds), the central node of the network, distances between all cpds
"""
def KEGG_network():
#Load KEGG json file
f = open("newKEGG_reactionEdges.json", "r")
kegg = json.load(f)
#Find all reaction-compound pairs
rxn_list = []
cpd_list = []
cpd_rxn_pairs = []
#loop through both products and substrates
for option in ["products", "substrates"]:
for rxn in kegg[option]:
#add each reaction to a master list
rxn_list.append(rxn)
for cpd in kegg["products"][rxn]:
#add each compound to a master list
cpd_list.append(cpd)
#create a tuple of each cpd_rxn pair, add them to a master list
cpd_rxn_pairs.append(tuple([cpd, rxn]))
#remove duplicates of reactions and compounds
rxn_list = list(set(rxn_list))
cpd_list = list(set(cpd_list))
#Create a bipartite graph using reactions, compounds, and the cpd/rxn pair
KEGG_graph = nx.Graph()
KEGG_graph.add_nodes_from(rxn_list, bipartite=0)
KEGG_graph.add_nodes_from(cpd_list, bipartite=1)
KEGG_graph.add_edges_from(cpd_rxn_pairs)
#Create a project of only compounds
KEGG_cpd_graph = nx.bipartite.projected_graph(KEGG_graph, cpd_list)
#Find the central node of the largest connected component
lcc = max(nx.connected_components(KEGG_cpd_graph), key=len)
lcc_graph = KEGG_cpd_graph.subgraph(lcc)
## CENTER(s) ##
centers = ['C00006', 'C00014', 'C00025', 'C00001', 'C00011']
#Calculate distances between all nodes
distances = dict(nx.all_pairs_shortest_path_length(KEGG_cpd_graph))
return KEGG_cpd_graph, centers, distances
""" Find the maximum distance between a given compound and the centers of the graph
Input: Compound, centers of the largest connected component within the graph
Output: Distance (int), or "NC" if not connected
"""
def find_distance(cpd, centers, distances):
d = []
#Find the distance between the "centers" of the largest connected component
for c in centers:
try:
d.append(distances[c][cpd])
except:
pass
#If the random compound is not in the largest connected component, labeld "NC" (not connected)
if not d:
return "NC"
#Otherwise, label with the max distance from the center
else:
return str(max(d))
def main():
#Load w2v model, kegg dataframe, and all fragments
word2vec = load_w2v()
kegg_df = pd.read_csv("../kegg_data.csv")
frags = get_chem_fragments("../frags.txt")
KEGG_cpd_graph, centers, distances = KEGG_network()
## RANDOM CPDS ##
#Find 10 of them, ignoring no compounds (initially)
rand_cpds, cpd_list = find_random_SMILES(kegg_df, 1000, [])
rand_frags, cpd_list = find_frags_within_SMILES(cpd_list, rand_cpds, frags)
rand_vectors, cpd_list = add_frag_vectors(cpd_list, word2vec, rand_frags)
rand_df = pd.DataFrame(rand_vectors)
rand_df["Cpds"] = cpd_list
#Label by max distance from central compound
cpd_distance = []
for index, row in rand_df.iterrows():
cpd_distance.append(find_distance(re.sub(".mol", "", row["Cpds"]), centers, distances))
rand_df["label"] = cpd_distance
#Remove all "NC" labels for clearer interpretation
sub_df = rand_df[rand_df["label"] != "NC"]
#print("Number of random vectors:", len(rand_df))
#Run TSNE
#TSNE_visual(rand_df, len(rand_df["label"].unique()))
TSNE_visual(sub_df, len(sub_df["label"].unique()))
if __name__ == "__main__":
main()
```
#### File: Word2Vect/DISTRIBUTIONS/common_fragments.py
```python
import sys
from rdkit import Chem
from itertools import combinations
from random import random, shuffle, sample
from collections import defaultdict
from rdkit.Chem import MCS
import operator
import datetime
#from timeout import timeout
from math import factorial
from tqdm import tqdm
import json
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
import os
import numpy as np
from scipy import stats
""" Adopted from Cadaddeu 2014 """
def binomial(n,m):
return factorial(n) / (factorial(m) * factorial(n-m))
def time(): return datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
def findmcs(p,q, t):
#@timeout(2)
def fmcstimeout(p,q):
return MCS.FindMCS([p,q], timeout=1).smarts
try:
return fmcstimeout(p,q)
except:
print("MCS of", p, "and", q, "timed out.")
pass
def fragments(mols, t):
s=[]
count = float(binomial(len(mols),2))
i = 0
percent = -1
for p,q in tqdm(combinations(mols,2)):
try:
s.append(findmcs(p,q, t))
except:
pass
return set(s)
def loadSmarts(fn):
with open(fn,'r') as smartfile:
return set(smart.strip() for smart in smartfile)
def sameMolecule(a,b):
def same_or_timeout(a,b):
if a[1] == b[1] : return True #string compare
if a[0].GetNumAtoms() != b[0].GetNumAtoms() : return False
if a[0].GetNumBonds() != b[0].GetNumBonds() : return False
return a[0].HasSubstructMatch(b[0]) and b[0].HasSubstructMatch(a[0])
try :
res = same_or_timeout(a,b)
except:
res = False
return res
def UniqSmarts(frags):
result = set()
frags = set((g,t) for (g,t) in frags if g != None) # was successfully converted to smarts
while frags:
f,s = frags.pop()
result.add((f,s))
frags = set((g,t) for (g,t) in frags
if not (sameMolecule((f,s),(g,t)))) # check if they are the same or not. check also on the string to speed up
return result
""" Find specific compound classes given the KEGG Brite json file, given a specific labl
Input: filepath to the KEGG BRITE classification, KEGG compound list (including smiles), classification label to find
Output: SMILES strings associated with each compound class
"""
def cpd_classes(brite_fp, cpd_fp, label):
with open(brite_fp) as json_file:
br_json = json.load(json_file)
#Create a dictionary with each compound having the appropriate label
classes = []
cpd_class = {}
for key1 in br_json["children"]:
classes.append(key1["name"])
for key2 in key1["children"]:
for key3 in key2["children"]:
for cpd in key3["children"]:
cpd_class[cpd["name"][:6]] = key1["name"]
#print(cpd["name"][:6] + " " + key1["name"])
#Read in all KEGG compound labels
df = pd.read_csv(cpd_fp)
#Find & return all smiles strings associated with a specific label
cpds = [k for k, v in cpd_class.items() if v == label]
return df[df["C"].isin(cpds)]["S"].tolist()
""" Read in a specific percentile of universal compounds (5 - top 5%, 10 - top 10%, etc...)
Input: percentile of compound (must be 5, 10, or 15)
Output: list of smiles corresponding to unviversal compounds
"""
def percentiles(p):
#Read in compound labels
cpds = []
if p == 5:
with open("95percentile.txt") as f:
cpds = [line.rstrip("\n") for line in f]
elif p == 10:
with open("90percentile.txt") as f:
cpds = [line.rstrip("\n") for line in f]
else:
with open("85percentile.txt") as f:
cpds = [line.rstrip("\n") for line in f]
#Read in all KEGG compound smiles data
df = pd.read_csv("../chiral_molweight_formula_labels.csv")
#Return list of smiles
smiles = list(map(str, df[df["C"].isin(cpds)]["S"].tolist()))
return smiles
""" Create a list of mol representations (from rdkit) from a list of smarts strings
Note: already unique (common_fragments.py takes care of removing duplicates)
Input: a list of (mol, smarts) representations
Output: list of sets: (mol representations, smarts string)
"""
def mol_from_smarts(smarts):
mols = []
for m, s in smarts:
mols.append(m)
return mols
""" Creates a histogram of occurances of fragments within the overall cpd distribution
Input: mols - mol representations of overall distribution
frags - fragments found within KEGG
Output: dictionary containing number of time a fragment appears in the overall distribution {smarts:occurances}
"""
def mol_count(mols, frags):
h = {}
for (f,s) in tqdm(frags):
h[s] = 0
for m in mols:
if m.HasSubstructMatch(f):
h[s] += 1
return h
""" Find the base fragments - those that appear in all splits
Input: list of lists of fragments. Overarching list - all splits, each sublist - fragments within that split
Output: fragments which are common to all fragment splits
"""
def base_frags(frags):
#Initial step - base fragments are the initial split
base_frags = frags[0]
#Total number of fragments - starting with the initial fragment set
total_frags = frags[0]
#Statistics over number of frags
frag_stats = [len(frags[0])]
#Find the intersection of all further splits
for i in range(1, len(frags)):
print("Length of split", i, "is:", len(frags[i]))
frag_stats.append(len(frags[i]))
base_frags = set(frags[i]).intersection(set(base_frags))
total_frags = list(set(total_frags + frags[i]))
print("Number of base fragments:", len(base_frags))
# print(base_frags)
#Goal - find total number of different fragments
print("Number of total fragments:", len(total_frags))
print("Fragment mean:", np.mean(frag_stats))
print("Fragment std:", np.std(frag_stats))
""" Find all smiles strings associated with a particular domain
Input: Domain from ["Eukarya", "Bacteria", "Archaea"]
Output: list of all smiles strings associated with that particular domain
"""
def domain_smiles(domain):
fp = ""
if domain == "Eukarya":
fp = "eukarya_cpds.csv"
elif domain == "Bacteria":
fp = "bacteria_cpds.csv"
elif domain == "Archaea":
fp = "archaea_cpds.csv"
domain_df = pd.read_csv(fp)
#kegg_data_curated is the same as chiral_molweight_formula_labels, containing smiles in the "S" column
kegg_df = pd.read_csv("kegg_data_curated.csv") #Assumes kegg_data_curated.csv is in above directory
#Return smiles strings (in list object) of that particular domain
return kegg_df[kegg_df["C"].isin(domain_df["compounds"])].dropna(subset=["S"])["S"].tolist()
""" Find all smiles compounds associated with minerals (COD database subset)
Input: filepath to csv file containing a subset of the COD database
Output: List of smiles
"""
def mineral_smiles(fp):
df = pd.read_csv(fp)
return df["SMI"].tolist()
""" Graphs basic disributions
Input: h - a dictionary of smarts strings and the number of occurances within KEGG, i: iteration of particular dictionary (e.g., 1-10);
l - label of graph, rev - True/False distinction for reversability of sorting, fp - filepath for savefig, title - title for plot
Output: pretty graphs :)
"""
def distribution_graph(h, i, l, rev, fp, title):
#Calculate AUC to distinguish splits
yvals = list(sorted(h.values(), reverse=rev))
xvals = np.linspace(0, 1, num=len(yvals))
area = np.trapz(yvals, dx=xvals[1])
plt.plot(xvals, yvals, linewidth=3, label=str(l) + " AUC=" + str(round(area, 2))) #Note: AUC label
plt.yscale("log")
plt.xscale("log")
plt.xlabel("Rank-ordered Compounds")
plt.ylabel("Occurances")
plt.title(title)
plt.legend()
""" RUNTIME NOTES:
argv[1] = random molecues in SMILES format
argv[2] = output filepath (ex: "Tests/Hundred_cpds/")
argv[3] = figure label
argv[2] = more molecules in SMART format (optional?) - REMOVED FOR NOW
"""
if __name__ == "__main__":
#Input file - list of random molecules in SMILES format
print("Getting (database) compounds from", sys.argv[1])
#Make directory (if it does not already exist)
if not os.path.isdir(sys.argv[2]):
os.mkdir(sys.argv[2])
# ## Compound Classes ##
# smiles = cpd_classes("../br08001.json", "../chiral_molweight_formula_labels.csv", "Hormones and transmitters")
# mols = [Chem.MolFromSmiles(smi.strip()) for smi in smiles]
# mols = [m for m in mols if m != None]
# print("Retieved",len(mols),"random molecules")
# ## Percentiles ##
# #Input should be 5, 10, or 15 to refer to top 5%, 10%, and 15% of universal compounds
# smiles = percentiles(5)
# mols = [Chem.MolFromSmiles(smi.strip()) for smi in smiles]
# mols = [m for m in mols if m != None]
# print("Retieved",len(mols),"classified compounds")
# ## Domains ##
# #Goal - have either Eukarya, Bacteria, or Archaea compounds (general compounds) be used for fragments
# smiles = domain_smiles("Eukarya")
# mols = [Chem.MolFromSmiles(smi.strip()) for smi in smiles]
# mols = [m for m in mols if m != None]
# print("Retieved",len(mols),"random molecules")
## Minerals
#Goal - see mineral occurance, both within minerals themselves and across KEGG (for now)
smiles = mineral_smiles("COD_SMI_IMA_subset218.csv")
mols = [Chem.MolFromSmiles(smi.strip()) for smi in smiles]
mols = [m for m in mols if m != None]
print("Retieved",len(mols),"random molecules")
#Get all of KEGG
with open(sys.argv[1],'r') as db_smiles:
db_mols = [Chem.MolFromSmiles(smi.strip()) for smi in db_smiles]
db_mols = [m for m in db_mols if m != None]
print("Retieved",len(db_mols),"base molecules")
#print("Aquiring random molecule fragments") # (and combining with molecules from", sys.argv[2])
## DUPLICATION ##
for i in range(1):
#shuffle(mols)
#Input file 2 - list of molecules in SMART format (still not sure what this adds...)
frags = []
#Fragments - determines different fragments within the molecules.
# # NOTE: use a subset of full random mols (mols) or class_mols, as needed
#mols = sample(db, 100)
#mols = db_mols #Note: for full database test
# for t in [0.01, 0.1, 1, 10, 100]: #Note: testing timeout for MCS algorithm
t = 0.1 #timeout time for MCS - 0.1
for s in (fragments(mols, t)): # either mols or a sample, depending on if a subsample is taken or not
try:
frags.append((Chem.MolFromSmarts(s),s))
except:
print("AAAGGGHHH",s,"failed")
print("uniquifying")
#Make sure they are unique
frags=UniqSmarts(frags)
print("Found", len(frags), "many fragments")
h = defaultdict(int)
count = float(len(mols))
#Construct histogram over full random molecule set
print("Constructing histogram of fragments")
for m in tqdm(mols): #mol_sample or mols, depending on if a subsample is taken or not
for (f,s) in frags:
if m.HasSubstructMatch(f):
h[s] += 1
print("Writing out histogram V" + str(i))
#Output file - only smiles fragments
with open(sys.argv[2] + str(i) + ".txt",'w') as out: #NOTE: CHANGED THIS TO argv[2]
for k,v in sorted(h.items(), key=operator.itemgetter(1)):
print(k, file=out)
#Output file 2 - csv containing smiles fragments & occurances within random molecule set
with open(sys.argv[2] + str(i) + "_sampleOccurances.csv",'a') as out: #NOTE: CHANGED THIS TO argv[2]
print("Frags,Occurances", file=out)
for k,v in sorted(h.items(), key=operator.itemgetter(1)):
print(str(k) + "," + str(v), file=out)
## DISTRIBUTION ANALYSIS ##
# ## Find repeatability ## #Note: if more than one loop is done, this (and everything below) should be outside
# base_frags(frags)
## Find distribution over full database ##
#Find histogram of frag occurances over entire database
h = mol_count(db_mols, frags)
#Graph things (values, split, label, reverse T/F, plot filepath, plot title)
distribution_graph(h, i, sys.argv[3], True, sys.argv[2], sys.argv[3].replace("_", " "))
with open(sys.argv[2] + str(i) + "_fullOccurances.csv",'a') as out: #NOTE: CHANGED THIS TO argv[2]
print("Frags,Occurances", file=out)
for k,v in sorted(h.items(), key=operator.itemgetter(1)):
print(str(k) + "," + str(v), file=out)
## Find pre-made distribution over random molecule set ##
h = pd.read_csv(sys.argv[2] + str(i) + "_fullOccurances.csv", header=None, skiprows=1, index_col=0, squeeze=True).to_dict()
distribution_graph(h, 0, sys.argv[3], True, sys.argv[2] + "subset_only", sys.argv[3].replace("_", " ") + " Subset Only")
plt.savefig(sys.argv[2] + "distribution_graph")
plt.close()
```
#### File: Word2Vect/DISTRIBUTIONS/mineral_parse.py
```python
from tqdm import tqdm
def main():
#Open mineral database
for line in tqdm(open("all_cod_smiles.txt")):
print(line.split("\t")[0], file=open("mineral_smiles.txt", "a"))
if __name__ == "__main__":
main()
```
#### File: ChemAsLanguage/Word2Vect/transpose_matrix.py
```python
import pandas as pd
import numpy as np
def main():
#read in V1 coocurrance
df = pd.read_csv("frags_coocurrance_V1.csv")
#convert df to numpy array (drop first column, convert)
sub_df = df.drop(df.columns[0], axis=1)
X = sub_df.to_numpy()
print(X)
#transpose matrix
X = X + X.T - np.diag(np.diag(X))
#print(X)
#Write back out to csv file
df_transposed = pd.DataFrame(data = X, index = df.columns[1:], columns = df.columns[1:])
#print(df_transposed)
df_transposed.to_csv("frags_coocurrance_full.csv")
if __name__ == "__main__":
main()
```
|
{
"source": "jfmalloy1/Patents",
"score": 3
}
|
#### File: jfmalloy1/Patents/assemblyCalcs_percentiles.py
```python
import assemblycalculator as ac
import multiprocessing as mp
import pickle
import pandas as pd
import os
def calculate_assembly_MC(inchi):
""" Calculate the assembly value of an inchi string using the monteCarlo assembly method
Args:
month (string): YYYY-MM description of the month where the compound was sampled from
inchi (string): inchi representation of the SureChemBL compound
Returns:
dict: values of month, inchi, and the assembly index
"""
ai = ac.calculate_ma(inchi,
120,
"monte-carlo",
num_frags_hist=10000,
path_samples=20000)
return {"inchi": inchi, "ai": ai}
def calculate_assembly_fragment(inchi):
""" Calculate the assembly value of an inchi string using the fragment assembly method
Args:
month (string): YYYY-MM description of the month where the compound was sampled from
inchi (string): inchi representation of the SureChemBL compound
Returns:
dict: values of month, inchi, and the assembly index
"""
ai = ac.calculate_ma(inchi, method="fragment", timeout=300)
return {"inchi": inchi, "ai": ai}
def read_cpds(fp):
""" Read inchi compounds from csv files
Args:
fp (string): Relative file path to csv file with SureChemBL cpd data
Returns:
list: list of all inchis contained in the csv file
"""
data = pd.read_csv(fp)
return data["InChI"].tolist()
def get_changing_percentileFiles(side, precision):
""" Find the files which correspond to change percentiles
Args:
side (string): min/max, referring to the largest negative/positive changes, respectively
precision (string): string representation of percentile precision
Returns:
list: list of file names which fit the appropriate criteria
"""
files = []
for f in os.listdir("Data/Cpd_Data/"):
if f.startswith("ids_change_" + side + "Percentile_" +
precision) and f.endswith(".csv"):
files.append(f)
return files
def get_top_percentileFiles():
""" Returns the csv files corresponding to compounds above the 99.99th percentile
of total attachment values
Returns:
list: list of file names which fit the appropriate criteria
"""
files = []
for f in os.listdir("Data/Cpd_Data/"):
if f.startswith("ids_above99_99percentile"):
files.append(f)
return files
def calculate_MAs(files):
""" Wrapper function for MA calculation
Args:
files (list): list of all files which contain relevant data
Returns:
Writes a file containing inchis linked with assembly values
"""
for f in files:
cpds = read_cpds("Data/Cpd_Data/" + f)
#Set up parallelization - a bit of overhead for setting it up, but that's fine
pool = mp.Pool(64)
#Calculate assembly values using MC method
assemblies = pool.map(calculate_assembly_MC, cpds)
pool.close()
pickle.dump(assemblies,
file=open("Data/Cpd_Data/" + f[:-4] + "_assembly.p", "wb"))
def calculate_largeMAs(f):
""" Calculates the MA of compounds with a Monte Carlo values >= 40 using
the "fragment" method - also a rough approximation, but is better for large values
Args:
f (string): file containing inchi strings &
"""
data = pickle.load(file=open("Data/Cpd_Data/" + f, "rb"))
large_MA_cpds = []
for cpd in data:
if cpd["ai"] >= 40:
large_MA_cpds.append(cpd["inchi"])
print("----- " + f + " -----")
print(large_MA_cpds)
print()
pool = mp.Pool(64)
assemblies = pool.map(calculate_assembly_fragment, large_MA_cpds)
pool.close
pickle.dump(assemblies,
file=open("Data/Cpd_Data/" + f[:-2] + "_large.p", "wb"))
def main():
""" Steps
1. Read in compounds from a specific file
2. MC assembly algorithm
3. Save calculations using name + "assembly" - link inchi & MA
4. Link assembly values to csv file (eventually, probably do this in a separate script)
"""
# ### SMALLEST & LARGEST CHANGE VALUES ###
# #Options: min/max, 0.1/0.01
# for option in [("min", "0.1"), ("min", "0.01"), ("max", "0.1"),
# ("max", "0.01")]:
# files = get_changing_percentileFiles(option[0], option[1])
# calculate_MAs(files)
# ### TOP ATTACHMENT VALUES ###
# files = get_top_percentileFiles()
# calculate_MAs(files)
for pair in [(1980, 1984), (1985, 1989), (1990, 1994), (1995, 1999),
(2000, 2004), (2005, 2009), (2010, 2014), (2015, 2019)]:
f = "ids_above99_99percentile" + str(pair[0]) + "_" + str(
pair[1]) + "cpdData_assembly.p"
calculate_largeMAs(f)
if __name__ == "__main__":
main()
```
#### File: jfmalloy1/Patents/cpd_analysis.py
```python
import pickle
import pandas as pd
from tqdm import tqdm
import os
import heapq
import scipy.stats as stats
from random import sample
def build_cpd_df(fp):
""" Takes 29 separate compound data files and combines them into a single pandas dataframe for ease of access
Args:
fp (string): Filepath to SureChemBL data files (assuming G drive goes to jmalloy3 Google Account)
Returns:
None - but does write a pickled dataframe to SureChemBL_Patents/Cpd_Data/ directory
"""
dfs = []
for f in tqdm(os.listdir(fp)):
if f.endswith(".txt"):
dfs.append(pd.read_csv(fp + f, sep="\t", header=0))
df = pd.concat(dfs, ignore_index=True)
print(df)
pickle.dump(df, file=open(fp + "SureChemBL_allCpds.p", "wb"))
del df
def find_highest_degrees(df, n, start, stop):
""" Finds the n highest-degree compounds within a specific date range
Saves various data associated with those n comopunds - smiles, inchi,
inchikey, degree, preferential attachment value
Args:
df (pandas dataframe): dataframe containing all SureChemBL compounds
n (int): the number of highest-degree compounds to select
start (int): 1st year of the range
stop (int): last year of the range
"""
print("----------", start, stop, "----------")
#Finding the top 10 preferential attachment compounds (from 1980-1984 as a test)
full_id_degrees = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\Degrees\\full_id_degrees_" +
str(start) + "_" + str(stop) + ".p", "rb"))
pref_attach_dict = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\pref_attach_dict_" +
str(start) + "_" + str(stop) + ".p", "rb"))
#Find n compounds with largest degree
highest_degree_cpds = heapq.nlargest(n,
full_id_degrees,
key=full_id_degrees.get)
highest_degree_cpds_df = df[df["SureChEMBL_ID"].isin(highest_degree_cpds)]
pref_attach_values = list(pref_attach_dict.values())
#Extra information to be added to the csv output file
degrees = []
pref_attach_highestCpd_values = []
pref_attach_percentiles = []
for cpd in tqdm(highest_degree_cpds_df["SureChEMBL_ID"]):
#Degree of compound
degrees.append(full_id_degrees[cpd][-1])
#Preferential attachment value
pref_attach_highestCpd_values.append(pref_attach_dict[cpd])
#Percentile of preferential attachment value
pref_attach_percentiles.append(
stats.percentileofscore(pref_attach_values, pref_attach_dict[cpd]))
highest_degree_cpds_df["degree"] = degrees
highest_degree_cpds_df["pref_attach_value"] = pref_attach_highestCpd_values
highest_degree_cpds_df["pref_attach_percentile"] = pref_attach_percentiles
highest_degree_cpds_df.to_csv(
"G:\\Shared drives\\SureChemBL_Patents\\Cpd_Data/highest_degree_data_" +
str(start) + "_" + str(stop) + "_1000.csv")
print()
def find_llanos_cpds(fp, df):
""" Tests various compounds found in Llanos et al (2019) in SureChemBL data
Llanos et al used Reaxys data to find the most popular compounds. This checks
where those compounds appear, if at all, in SureChembL patent data
Args:
df (pandas dataframe): dataframe of all SureChemBL chemistry
"""
cpds_1980_2015_inchi = {
"acetic anhydride":
"InChI=1S/C4H6O3/c1-3(5)7-4(2)6/h1-2H3",
"methanol":
"InChI=1S/CH4O/c1-2/h2H,1H3",
"methyl iodide":
"InChI=1S/CH3I/c1-2/h1H3",
"diazomethane":
"InChI=1S/CH2N2/c1-3-2/h1H2",
"formaldehyde":
"InChI=1S/CH2O/c1-2/h1H2",
"benzaldehyde":
"InChI=1S/C7H6O/c8-6-7-4-2-1-3-5-7/h1-6H",
"copper(II) oxide":
"InChI=1S/Cu.O",
"ethanol":
"InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3",
"benzoyl chloride":
"InChI=1S/C7H5ClO/c8-7(9)6-4-2-1-3-5-6/h1-5H",
"carbon monoxide":
"InChI=1S/CO/c1-2",
"water (2000)":
"InChI=1S/H2O/h1H2",
"Trifluoroacetic acid (2000)":
"InChI=1S/C2HF3O2/c3-2(4,5)1(6)7/h(H,6,7)",
"Phenylacetylene (2000)":
"InChI=1S/C8H6/c1-2-8-6-4-3-5-7-8/h1,3-7H",
"benzyl bromide (2000)":
"InChI=1S/C7H7Br/c8-6-7-4-2-1-3-5-7/h1-5H,6H2"
}
#Find stats for Llanos compounds - use 2015 data for stats (I really need to make a consensus graph)
full_id_degrees = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\Degrees\\full_id_degrees_2015_2019.p",
"rb"))
pref_attach_dict = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\pref_attach_dict_2015_2019.p",
"rb"))
pref_attach_values = list(pref_attach_dict.values())
#Loop through Llanos compounds
with open(fp + "llanos_cpds.csv", "a") as f:
f.write(
"name,inchi,SureChemBL_ID,degree,pref_attach_value,pref_attach_percentile\n"
)
for name, inchi in cpds_1980_2015_inchi.items():
s = df[df["InChI"] == inchi]
if not s.empty: #if SureChemBL holds that compound, save id & stats
#Degree of compound
degree = full_id_degrees[s.iloc[0]["SureChEMBL_ID"]][-1]
#Preferential attachment value
pref_attach_value = pref_attach_dict[s.iloc[0]["SureChEMBL_ID"]]
#Percentile of preferential attachment value
pref_attach_percentile = stats.percentileofscore(
pref_attach_values,
pref_attach_dict[s.iloc[0]["SureChEMBL_ID"]])
f.write(name + ",\"" + inchi + "\"," +
s.iloc[0]["SureChEMBL_ID"] + "," + str(degree) + "," +
str(pref_attach_value) + "," +
str(pref_attach_percentile) + "\n")
else: #if not, no name nor stats
f.write(name + ",\"" + inchi + "\",na,na,na,na\n")
def build_month_increments(start, stop):
""" Build all monthly increments from the start year to stop year in the
format YEAR-MONTH
Args:
start (int): start year of increments
stop (int): end year of increments
Returns:
list: list of strings holding the YEAR-MONTH increments
"""
months = []
while start <= stop:
for month in [
"01", "02", "03", "04", "05", "06", "07", "08", "09", "10",
"11", "12"
]:
months.append(str(start) + "-" + month)
start += 1
return months
def sample_compounds_unique(n, months, cpds, cpd_df):
""" Sample compounds which are uniquely added in a specific month
This uniquess is determined by determing when a compound is added in a month
and has not been present in the patent record before that month.
Args:
n (int): Number of compounds to sample every month
months (list): list of months to sample from
cpds (list): all SureChemBL IDs of compounds added in a specific month
cpd_df (pandas dataframe): Master dataframe of all compounds
"""
sample_inchis = {}
print("----- Sampling unique compounds -----")
for i in tqdm(range(len(months))):
offset = 216 #Account for starting in 1980 instead of 1962
#Only sample if there are more than 1000 compounds
if len(cpds[i+offset]) > n:
sample_cpds = sample(cpds[i+offset], n)
else:
sample_cpds = cpds[i+offset]
sub_df = cpd_df[cpd_df["SureChEMBL_ID"].isin(sample_cpds)]
sample_inchis[months[i]] = list(sub_df["InChI"])
print("\n----- Saving compounds -----")
pickle.dump(sample_inchis, file=open("Data/sample_inchi_1000_NEW.p", "wb"))
def sample_compounds(n1, n2, months, cpd_df):
""" Sample n compounds from each month, initially with overlap allowed
//TODO: fix so that only unique-to-that-month compounds are sampled
Args:
n (int): number of compounds to sample
n2 (int): another number of compounds to sample
months (string): description of month, e.g. 1980-01
cpd_df (pandas dataframe): contains information for each compound in SureChemBL, including InChIKey
Returns:
list: list of all randomly sampled compounds (in inchi?)
"""
#Inchis for all sampled compounds
sample_inchis_n1 = {}
sample_inchis_n2 = {}
print("----- Sampling Compounds ------\n")
for month in tqdm(months):
cpds = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\CpdPatentIdsDates\\unique_cpds_"
+ month + ".p", "rb"))
sample_cpds_n1 = sample(cpds, n1)
sample_cpds_n2 = sample(cpds, n2)
sub_df = cpd_df[cpd_df["SureChEMBL_ID"].isin(sample_cpds_n1)]
sample_inchis_n1[month] = list(sub_df["InChI"])
sub_df = cpd_df[cpd_df["SureChEMBL_ID"].isin(sample_cpds_n2)]
sample_inchis_n2[month] = list(sub_df["InChI"])
#Save memory by removing cpd datframe and monthly compounds
del (cpd_df)
del (cpds)
#Save sampled inchis to pickle files
print("\n----- Saving Data -----")
pickle.dump(sample_inchis_n1, file=open("Data/sample_inchi_100.p", "wb"))
pickle.dump(sample_inchis_n2, file=open("Data/sample_inchi_1000.p", "wb"))
def main():
# ### Highest Degree compounds ###
data_fp = "G:\\Shared drives\\SureChemBL_Patents\\Cpd_Data\\"
# # build_cpd_df(data_fp) #NOTE: only needs to be run once
cpd_df = pickle.load(file=open(data_fp + "SureChemBL_allCpds.p", "rb"))
print(cpd_df.columns)
# ### Statistics over highest degree compounds ###
# n = 1000 #Number of compounds to find
# for range in [(1980, 1984), (1985, 1989), (1990, 1994), (1995, 1999),
# (2000, 2004), (2005, 2009), (2010, 2014), (2015, 2019)]:
# find_highest_degrees(cpd_df, n, range[0], range[1])
# ### Testing Llanos et al (2019) compounds ###
# find_llanos_cpds(data_fp, cpd_df)
### Sampling compounds for MA analysis ###
month_unique_cpds = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\CpdPatentIdsDates\\unique_cpds_AllMonths.p",
"rb"))
sample_compounds_unique(1000, build_month_increments(1980, 2019),
month_unique_cpds, cpd_df)
# sample_compounds(100, 1000, build_month_increments(1980, 2019), cpd_df)
### MA Analysis ###
if __name__ == "__main__":
main()
```
|
{
"source": "jfmatth/openshift-django16",
"score": 2
}
|
#### File: openshift-django16/mysite/views.py
```python
from django.views.generic import View
from django.http import HttpResponse
class Index(View):
def get(self, request, *args, **kwargs):
return HttpResponse('django 1.6 on Openshift')
```
|
{
"source": "jfmcoronel/django-auth-lti",
"score": 3
}
|
#### File: django_auth_lti/tests/test_verification.py
```python
from unittest import TestCase
from unittest.mock import MagicMock
from django_auth_lti.verification import is_allowed
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
class TestVerification(TestCase):
def test_is_allowed_config_failure(self):
request = MagicMock(LTI={})
allowed_roles = ["admin", "student"]
self.assertRaises(ImproperlyConfigured, is_allowed,
request, allowed_roles, False)
def test_is_allowed_success(self):
request = MagicMock(LTI={"roles": ["admin"]})
allowed_roles = ["admin", "student"]
user_is_allowed = is_allowed(request, allowed_roles, False)
self.assertTrue(user_is_allowed)
def test_is_allowed_success_one_role(self):
request = MagicMock(LTI={"roles": ["admin"]})
allowed_roles = "admin"
user_is_allowed = is_allowed(request, allowed_roles, False)
self.assertTrue(user_is_allowed)
def test_is_allowed_failure(self):
request = MagicMock(LTI={"roles":[]})
allowed_roles = ["admin", "student"]
user_is_allowed = is_allowed(request, allowed_roles, False)
self.assertFalse(user_is_allowed)
def test_is_allowed_failure_one_role(self):
request = MagicMock(LTI={"roles":[]})
allowed_roles = "admin"
user_is_allowed = is_allowed(request, allowed_roles, False)
self.assertFalse(user_is_allowed)
def test_is_allowed_exception(self):
request = MagicMock(LTI={"roles":["TF"]})
allowed_roles = ["admin", "student"]
self.assertRaises(PermissionDenied, is_allowed,
request, allowed_roles, True)
```
|
{
"source": "jfm-data/NHLwagers",
"score": 3
}
|
#### File: jfm-data/NHLwagers/streamlit_OU.py
```python
import streamlit as st
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import altair as alt
from requests import get
import re
import os
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
import datetime
import time
import matplotlib.pyplot as plt
import statsmodels.api as sm
from geopy.geocoders import Nominatim
from geopy.distance import geodesic
geolocator = Nominatim(user_agent='myuseragent')
import lxml
import plotly.express as px
from PIL import Image
#with open("styles/style.css") as f:
# st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
st.set_page_config(
page_title="O/U Hockey Analytics",
page_icon=":ice_hockey_stick_and_puck:"
)
#Dummy data to get the header to display correctly
st.markdown("""<Head>
<Title> Test Title</Title><link rel="shortcut icon" href="favicon.ico" type="image/x-icon"> </Head>""",unsafe_allow_html=True)
#Title/Header
st.markdown("""<h1 style="text-align:center;color:white;font-weight:bolder;font-size:70px;font-family:helvetica; background:
-webkit-linear-gradient(#a73305,#000000,#093ff0); -webkit-background-clip:
text;-webkit-text-fill-color: transparent;">NHL<br>Wager<br>Analytics</h1>""",unsafe_allow_html=True)
# Load data
data_load_state = st.text('Checking and Fetching Data...')
#####################################
#### Data Gathering and Cleaning ####
#####################################
master_df = pd.read_csv('master_df.csv')
master_df = master_df.dropna(thresh=10)
start = pd.to_datetime(master_df.Date[-1:]).dt.date.values[0]+datetime.timedelta(days=1)
today = datetime.date.today()
yesterday = today-datetime.timedelta(days = 1)
#Function to covert dates to string
def covert_dates(date1, date2):
covert_list = []
days = pd.date_range(date1, date2, freq='d')
for i in range(len(days)):
covert_list.append(int(days[i].strftime('%Y%m%d')))
return covert_list
#Function to fetch missing data
@st.cache
def get_data(date1, date2):
new_df = pd.DataFrame()
for day in covert_dates(date1, date2):
site = f"https://sportsdatabase.com/nhl/query?output=default&sdql=date%3D{day}&submit=++S+D+Q+L+%21++"
hdr = {'User-Agent': 'Mozilla/5.0'}
req = Request(site, headers=hdr)
page = urlopen(req)
soup = BeautifulSoup(page)
tables = soup.find('table', attrs={'id':'DT_Table'})
page_df = pd.read_html(str(tables))[0]
new_df = pd.concat([new_df, page_df])
time.sleep(1)
return new_df
#Check if the data needs updating
if start <= today:
new_data = get_data(start, today)
master_df = pd.concat([master_df, new_data])
#Save updated data as csv
#master_df.to_csv("master_df.csv", index=False)
def clean_data(df):
df.Date =pd.to_datetime(df.Date)
df= df.sort_values(by=['Team', 'Date']).reset_index()
df.insert(2, "Date_Prev", df.Date.shift(1))
df.insert(2, "Days_Rest", (df.Date_Prev-df.Date)*-1)
df = df.drop(['index','Season', 'P1', 'P2', 'P3'], axis=1)
return df
#Fucntion to identify a team change to break streak counts
def trips(home_or_away, TeamChange, Site):
list =[]
x = 0
for i, j in zip(TeamChange, Site):
if i == False:
x = x
else:
x = 0
if j == home_or_away:
x += 1
else:
x = 0
list.append(x)
return list
#Function to calculate the distance the road team is from home
def distance_calc(df):
df.insert(4,"Team_City", df.Team.map(team_dict['City']))
df.insert(6,"Opp_City", df.Opp.map(team_dict['City']))
df.insert(9,"Team_point", df.Team.map(team_dict['Citypoint']))
df.insert(10,"Opp_point", df.Opp.map(team_dict['Citypoint']))
df['Distance'] = df.apply(lambda x: geodesic(x['Team_point'],x['Opp_point']).km, axis=1)
df['Team_distance'] = df.apply(lambda x: 0 if x.Site == "home" else x.Distance, axis=1)
df['Opp_distance'] = df.apply(lambda x: 0 if x.Site == "away" else x.Distance, axis=1)
df = df.drop(['Team_point','Distance','Opp_point'], axis=1)
return df
#Function to count the current streak of home or games
def road_trips(df):
df.insert(4, "TeamChange", df["Team"].shift(1, fill_value=df["Team"].head(1)) != df["Team"])
df.insert(10, "Home_Stand", trips("home", df.TeamChange, df.Site))
df.insert(11, "Road_Trip", trips("away", df.TeamChange, df.Site))
df.Days_Rest = df.Days_Rest.dt.days
df.Days_Rest = df.Days_Rest.fillna(5)
df.Days_Rest = df.Days_Rest.astype(int)-1
df.loc[df.Days_Rest < 0, 'Days_Rest'] = 5
df = df.drop('TeamChange', axis=1)
return df
#Function to pair games into a singel record -- for O/U analysis
def opp_func (df):
df.insert(2,"Opp_Days_Rest", eda_df.Oppkey.map(opp_days_rest))
df.insert(10,"Opp_home_stand", eda_df.Oppkey.map(opp_home_stand))
df.insert(11,"Opp_road_trip", eda_df.Oppkey.map(opp_road_trip))
return df
#Func to calculate the unit return of each game and team
def unit_value(Line, Result):
if Line < 0 and Result == 'W':
return 1
elif Line < 0 and Result == 'L':
return Line/100
elif Line > 0 and Result == 'W':
return Line/100
elif Line > 0 and Result == 'L':
return -1
nhltable= pd.read_csv('nhltable.csv')
team_dict = nhltable.set_index('Team').to_dict()
eda_df = clean_data(master_df)
eda_df = distance_calc(eda_df)
eda_df = road_trips(eda_df)
#Adding Division
eda_df = pd.merge(eda_df, nhltable[['Team', 'Division']], on='Team', how="left" )
#Create keys for pairing
Teamkey = []
Oppkey = []
for i in range(len(eda_df.Date)):
Teamkey.append(str(covert_dates(eda_df.Date[i], eda_df.Date[i])[0])+eda_df.Team[i])
Oppkey.append(str(covert_dates(eda_df.Date[i], eda_df.Date[i])[0])+eda_df.Opp[i])
eda_df['Oppkey'] = Oppkey
opp_days_rest = dict(zip(Teamkey, eda_df.Days_Rest))
opp_home_stand = dict(zip(Teamkey, eda_df.Home_Stand))
opp_road_trip = dict(zip(Teamkey, eda_df.Road_Trip))
opp_func(eda_df)
eda_df.Final = eda_df.Final.fillna('0-0')
eda_df = eda_df.fillna(0)
eda_df = pd.concat([eda_df, pd.get_dummies(eda_df.OUr)], axis=1)
goals_df = eda_df['Final'].str.split('-', expand=True).rename(columns={0:'Team_Goals', 1:'Opp_Goals'}).astype(int)
eda_df = pd.concat([eda_df, goals_df], axis=1)
eda_df['total_O'] = eda_df.groupby('Team')['O'].cumsum()
eda_df['total_U'] = eda_df.groupby('Team')['U'].cumsum()
eda_df['total_P'] = eda_df.groupby('Team')['P'].cumsum()
eda_df['total_Team_goals'] = eda_df.groupby('Team')['Team_Goals'].cumsum()
eda_df['total_Opp_goals'] = eda_df.groupby('Team')['Opp_Goals'].cumsum()
#eda_df = eda_df.loc[eda_df['OUr']!='P']
#eda_df['y'] = (eda_df.OUr=='O').astype(int)
eda_df['Team_U'] = eda_df.groupby('Team')['total_U'].transform('max')
eda_df['Team_O'] = eda_df.groupby('Team')['total_O'].transform('max')
eda_df['Opp_U'] = eda_df.groupby('Opp')['total_U'].transform('max')
eda_df['Opp_O'] = eda_df.groupby('Opp')['total_O'].transform('max')
eda_df['Team_Goals_Scored'] = eda_df.groupby('Team')['total_Team_goals'].transform('max')
eda_df['Team_Goals_Allowed'] = eda_df.groupby('Team')['total_Opp_goals'].transform('max')
eda_df['Opp_Goals_Scored'] = eda_df.groupby('Opp')['total_Team_goals'].transform('max')
eda_df['Opp_Goals_Allowed'] = eda_df.groupby('Opp')['total_Opp_goals'].transform('max')
#eda_df['Units'] = eda_df.apply(lambda x: unit_value(x.Line, x.SUr), axis=1)
#Tonight's games data
today_np = np.datetime64(today)
tonight_df= eda_df[['Team','Opp','Total','Home_Stand','Opp_road_trip','Days_Rest','Opp_Days_Rest', 'Opp_distance', 'Team_U',
'Opp_U','Team_O', 'Opp_O','Team_Goals_Scored', 'Opp_Goals_Scored','Team_Goals_Allowed', 'Opp_Goals_Allowed', "Date",'Site']]
tonight_df = tonight_df.loc[(tonight_df['Date']==today_np) & (tonight_df['Site']=='home')].reset_index(drop=True)
#Seperating the two EDA dataframes
eda_OU = eda_df.loc[(eda_df['Site']=='home') & (eda_df['Date']<today_np)]
eda_OU.insert(3, "Combined_Rest", eda_OU.loc[:,'Days_Rest'] + eda_OU.loc[:,'Opp_Days_Rest'])
cut_labels = [500, 1000, 1500, 2000, 3000, 4000]
cut_bins = [0, 500, 1000, 1500, 2000, 3000, 4000]
eda_OU['Distance'] = pd.cut(eda_OU.loc[:,'Opp_distance'], bins=cut_bins, labels= cut_labels)
eda_OU = eda_OU.sort_values('Date').reset_index(drop=True)
# Notify user that the data was successfully loaded.
data_load_state.text('Checking and Fetching Data...Done & done!')
st.write("Check out this [link to the sister site for Team Analysis](https://share.streamlit.io/jfm-data/nhlwagers/main/streamlit_Team.py)")
#############################################
### Streamlit Design ######################
############################################
st.subheader("Tonight's Games")
#st.dataframe(tonight_df.style.background_gradient(cmap='viridis', low=0.7, high=0).set_precision(1))
df1 = tonight_df.style.background_gradient(cmap='viridis', low=0.7, high=0).set_precision(1)
df2 = tonight_df.iloc[:,:3].style.set_precision(1)
st.table(df2)
st.dataframe(df1)
######################
## Space for Machine Learning Model
####################
st.subheader('Predictions')
st.write('*Coming soon....* :sunglasses:')
st.header('O/U Analysis')
date_select = st.slider(
"Select Dates",
datetime.date(2021,1,13), yesterday,
value=(datetime.date(2021,1,13), yesterday),
format="MM/DD/YY")
st.write("Start time:", date_select[0])
st.write("Endtime:", date_select[1])
filtered_df= eda_OU[(eda_OU['Date'] >= np.datetime64(date_select[0]))
& (eda_OU['Date'] <= np.datetime64(date_select[1]))]
#st.subheader('Overall')
fig_OU = px.histogram(filtered_df, x="Total", color='OUr',
barmode='group', template='plotly_dark', title="Totals",
color_discrete_map={
"O":"#FF9F1C",
"U":"#011627",
"P":"#2EC4B6"})
st.plotly_chart(fig_OU, use_container_width=True)
#st.subheader('By Combined Days Rest')
fig_DaysRest = px.histogram(filtered_df[filtered_df["Combined_Rest"] <10],
x="Combined_Rest", color='OUr', title='Test',
barmode='group', template='plotly_dark', color_discrete_map={
"O":"#FF9F1C",
"U":"#011627",
"P":"#2EC4B6"})
st.plotly_chart(fig_DaysRest, use_container_width=True)
#st.subheader('By Distance of Road Team from Home')
fig3 = px.histogram(filtered_df, x="Distance", color='OUr',
barmode='group', template='plotly_dark',title='By Distance of Road Team from Home',
color_discrete_map={
"O":"#FF9F1C",
"U":"#011627",
"P":"#2EC4B6"})
st.plotly_chart(fig3, use_container_width=True)
#st.subheader('By Length of Road Trip')
fig4 = px.histogram(filtered_df, x="Opp_road_trip", color='OUr',
barmode='group', template='plotly_dark', title='By Length of Road Trip',
color_discrete_map={
"O":"#FF9F1C",
"U":"#011627",
"P":"#2EC4B6"})
st.plotly_chart(fig4, use_container_width=True)
#st.subheader('By Length of Home Stand')
fig5 = px.histogram(filtered_df, x="Home_Stand", color='OUr',title='By Length of Home Stand',
barmode='group', template='plotly_dark', color_discrete_map={
"O":"#FF9F1C",
"U":"#011627",
"P":"#2EC4B6"})
st.plotly_chart(fig5, use_container_width=True)
st.subheader('Division Analysis')
div_select = st.selectbox("Select Division?",
list(pd.unique(eda_df.Division)))
div_filter = eda_df[eda_df['Division']==div_select]
fig_OU = px.histogram(div_filter, x="Team", color='OUr', barmode='group', template= 'simple_white', title="Totals",
color_discrete_map={
"O":"#FF9F1C",
"U":"#011627",
"P":"#2EC4B6"})
st.plotly_chart(fig_OU, use_container_width=True)
#st.subheader('Select Parameters for situational outputs')
#Filtering For Days of Rest
#Days_to_filter = st.slider('Days of Rest', 0, max(eda_OU.Days_Rest), 3)
#st.text('Number of Days Rest %s' % Days_to_filter)
#filtered_data = eda_OU[eda_OU['Days_Rest'] == Days_to_filter]
#Filtering For Distance
#Distance_to_filter = st.slider('Distance of Opponent', 0.0, max(data.Distance), (0.0, 500.0))
#st.text('Distance From Home %s' % Distance_to_filter[0])
#filtered_data = filtered_data[(filtered_data['Distance'] >= Distance_to_filter[0]) & (filtered_data['Distance'] <= Distance_to_filter[1])]
# #Filtering For Home and Away
# st.header('O/U Team Analysis -- TO BE MOVED')
# team_select = st.selectbox("Select Team",
# list(pd.unique(eda_df.Team)))
# st.write('You selected', team_select)
# filtered_data = eda_df[eda_df['Team'] == team_select]
# home_away = st.selectbox("Is the Team Home or Away?",
# ('home', 'away'))
# st.write('You selected', home_away)
# filtered_data = filtered_data[filtered_data['Site'] == home_away]
# days_rest = st.slider('Days Rest', 0, 5, 2)
# filtered_data = filtered_data[filtered_data['Days_Rest'] == days_rest]
# st.subheader('O/U by Selected Inputs')
# fig_OU_team = px.histogram(filtered_data, x="Total", color='OUr',
# barmode='group', template='plotly_dark')
# st.plotly_chart(fig_OU_team, use_container_width=True)
#Filtering For Distance
#Distance_to_filter = st.slider('Distance From Home', 0.0, max(data.Distance), (0.0, 500.0))
#st.text('Distance From Home %s' % Distance_to_filter[0])
#filtered_data = filtered_data[(filtered_data['Distance'] >= Distance_to_filter[0]) & (filtered_data['Distance'] <= Distance_to_filter[1])]
#st.subheader('Selected # of Days on Home Stand')
#st.subheader('Selected # of Days on Road Trip')
#if genre == 'Comedy':
# st.write('You selected comedy.')
#else:
# st.write("You didn't select comedy.")
#fig = px.histogram(data, x="Date_diff", color='OUr',
# barmode='group', template='plotly_white')
#st.plotly_chart(fig, use_container_width=True)
#st.subheader('Home Stand O/U Results')
#fig1 = px.histogram(data[data["Home_Stand"]>0], x="Home_Stand", color='OUr',
# barmode='group', template='plotly_white')
#st.plotly_chart(fig1, use_container_width=True)
#st.subheader('Road Trip O/U Results')
#fig2 = px.histogram(data[data["Road_Trip"]>0], x="Road_Trip", color='OUr',
# barmode='group', template='plotly_white')
#st.plotly_chart(fig2, use_container_width=True)
st.text("Raw Data")
st.dataframe(eda_df.iloc[:,1:])
st.header('Unit Analysis')
unit_team = st.selectbox("Select Team for Unit",
list(pd.unique(eda_df.Team)))
st.write('You selected', unit_team)
#Filter for OU Line
#Line_to_filter = st.slider('Unit Line', 0.0, max(eda_OU.Total), (0.0, 5.5))
#filtered_data2 = filtered_data[(eda_OU['Total'] >= Line_to_filter[0]) &
# (eda_OU['Total'] <= Line_to_filter[1])]
```
|
{
"source": "jfmendez11/supervoices",
"score": 3
}
|
#### File: supervoices/conversion_batch/conversion_batch.py
```python
import os
import pymongo
from bson.objectid import ObjectId
import sys
import boto3
import time
import requests
import dotenv
import traceback
from shutil import copyfile
import sendgrid
from pydub import AudioSegment
def get_audio_file_for_conversion(url):
'''
Method that requests the audio file from the file server
'''
response = requests.get(url)
print 'FILE OBTAINED FROM SERVER'
# sys.stdout.flush()
return response.content
def convert_audio(path):
'''
Method that converts any audio format into mp3
'''
if not path.endswith(".mp3"):
# File extension is obtained
split = path.split(".")
extension = split[len(split) - 1]
# The file path is split to obtain the file name
path_split = path.split("/")
file_name = path_split[len(path_split) - 1]
file_name = file_name.replace(extension, "mp3")
# Audio load and export to mp3
audio = AudioSegment.from_file(path, format=extension)
export_file_name = "./temp/converted_" + file_name
audio.export(export_file_name)
print 'FILE CONVERTED AND EXPORTED'
return export_file_name
else:
path_split = path.split("/")
file_name = path_split[len(path_split) - 1]
source_path = "./temp/" + file_name
export_file_name = "./temp/converted_" + file_name
copyfile(source_path, export_file_name)
return export_file_name
def post_converted(path, base_url, entry):
'''
Method that posts the converted file to the file server
'''
# Upload entry converted to S3
s3 = boto3.client(
's3',
aws_access_key_id=os.getenv("ACCESS_KEY"),
aws_secret_access_key=os.getenv("SECRET_KEY"),
region_name=os.getenv('AWS_REGION')
)
base_url = base_url.split('/')
del base_url[len(base_url) - 1]
del base_url[0]
del base_url[0]
del base_url[0]
base_url = '/'.join(base_url)
file_name = path.split('/')
file_name = file_name[len(file_name) - 1]
base_url = base_url + '/' + file_name
s3.upload_file(path, os.getenv('S3_BUCKET_NAME'), base_url, ExtraArgs={'ACL': 'public-read'})
# Update Mongo
myclient = pymongo.MongoClient(os.getenv('MLAB_URI'))
mydb = myclient[os.getenv("MLAB_DATABASE")]
mycol = mydb["entries"]
myquery = {"_id": ObjectId(entry['entry_id'])}
newvalues = {"$set": {
"STATUS": "Convertida",
"URL_CONVERTED": os.getenv('CLOUDFRONT_DOMAIN_NAME') + '/' + base_url
}}
mycol.update_one(myquery, newvalues)
print 'FILE SUBMITTED: ' + entry['url_original']
def process_entry(entry):
'''
Method that processes an audio entry
'''
correct = True
# Request audio file from server
try:
if entry['url_original'] != '':
print 'PROCESSING: ' + entry['url_original']
url_original = entry['url_original']
audio_file = get_audio_file_for_conversion(url_original)
file_name = url_original.split('/')
file_name = file_name[len(file_name) - 1]
# Audio saving in temporal directory
local_path = './temp/' + file_name
temp = open(local_path, "wb")
temp.write(audio_file)
temp.close()
# Audio conversion
path_converted = convert_audio(local_path)
post_converted(path_converted, url_original, entry)
try:
os.remove(path_converted)
os.remove(local_path)
except Exception:
print "CANNOT REMOVE FILE"
entry_data = get_mail(entry)
entry['name'] = entry_data['NAME']
entry['email'] = entry_data['EMAIL']
entry['url_contest'] = os.getenv('WS_URL') + entry['contest_id']
print("READY TO SEND EMAIL")
send_mail(entry)
print 'FILE REMOVED: ' + entry['url_original']
# sys.stdout.flush()
except Exception as e:
print 'AN ERROR HAS OCCURRED'
print traceback.print_exc()
correct = False
finally:
print('---------------------')
# end = time.time()
# start = entry["created_at"]
# duration = end - float(start) / 1000
# with open("log.txt", "a") as myfile:
# myfile.write(entry['url_original'] + " " + str(duration) + "\n")
return correct
def execute_batch():
'''
Method that executes the batch conversion task
'''
# Create SQS client
sqs = boto3.client(
'sqs',
aws_access_key_id=os.getenv("ACCESS_KEY"),
aws_secret_access_key=os.getenv("SECRET_KEY"),
region_name=os.getenv('AWS_REGION')
)
queue_url = os.getenv('SQS_URL')
entries = []
# Receive message from SQS queue
response = sqs.receive_message(
QueueUrl=queue_url,
AttributeNames=[
'EntryId',
'ContestId',
'RecordingPath'
],
MaxNumberOfMessages=2,
MessageAttributeNames=[
'All'
]
)
if 'Messages' in response:
messages = response['Messages']
for message in messages:
receipt_handle = message['ReceiptHandle']
# Delete received message from queue
entries.append({
'url_original': message['MessageAttributes']['RecordingPath']['StringValue'],
'entry_id': message['MessageAttributes']['EntryId']['StringValue'],
'contest_id': message['MessageAttributes']['ContestId']['StringValue'],
'receipt_handle': receipt_handle
})
for en in entries:
correct = process_entry(en)
if correct:
sqs.delete_message(
QueueUrl=queue_url,
ReceiptHandle=en['receipt_handle']
)
def get_mail(entry):
myclient = pymongo.MongoClient(os.getenv('MLAB_URI'))
mydb = myclient[os.getenv("MLAB_DATABASE")]
mycol = mydb["entries"]
myquery = {"_id": ObjectId(entry['entry_id'])}
mydoc = mycol.find_one(myquery)
return mydoc
def send_mail(entry):
sender_email = os.getenv('SENDER_EMAIL')
receiver_email = entry['email']
receiver_name = entry['name']
sg = sendgrid.SendGridAPIClient(api_key=os.getenv('SENDGRID_API_KEY'))
subject = "Tu entrada ha sido agregada al concurso!"
from_email = sender_email
to_email = receiver_email
text = """\
Gracias por usar SuperVoices
{name}, tu voz ya se encuentra disponible en el concurso!
La entrada ya fue cargada a la página del concurso donde podra ser revisada por el organizador.
Visita el concurso: https://supervoices10.herokuapp.com/#/contests/{contestURL}
""".format(contestURL=entry['url_contest'], name=receiver_name)
html = """\
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0;">
<meta name="format-detection" content="telephone=no"/>
<style>
/* Reset styles */
body {{ margin: 0; padding: 0; min-width: 100%; width: 100% !important; height: 100% !important;}}
body, table, td, div, p, a {{ -webkit-font-smoothing: antialiased; text-size-adjust: 100%; -ms-text-size-adjust: 100%; -webkit-text-size-adjust: 100%; line-height: 100%; }}
table, td {{ mso-table-lspace: 0pt; mso-table-rspace: 0pt; border-collapse: collapse !important; border-spacing: 0; }}
img {{ border: 0; line-height: 100%; outline: none; text-decoration: none; -ms-interpolation-mode: bicubic; }}
#outlook a {{ padding: 0; }}
.ReadMsgBody {{ width: 100%; }} .ExternalClass {{ width: 100%; }}
.ExternalClass, .ExternalClass p, .ExternalClass span, .ExternalClass font, .ExternalClass td, .ExternalClass div {{ line-height: 100%; }}
/* Rounded corners for advanced mail clients only */
@media all and (min-width: 560px) {{
.container {{ border-radius: 8px; -webkit-border-radius: 8px; -moz-border-radius: 8px; -khtml-border-radius: 8px;}}
}}
/* Set color for auto links (addresses, dates, etc.) */
a, a:hover {{
color: #127DB3;
}}
.footer a, .footer a:hover {{
color: #999999;
}}
</style>
<!-- MESSAGE SUBJECT -->
<title>SuperVoices</title>
</head>
<p>{prevText}</p>
<!-- BODY -->
<body topmargin="0" rightmargin="0" bottommargin="0" leftmargin="0" marginwidth="0" marginheight="0" width="100%" style="border-collapse: collapse; border-spacing: 0; margin: 0; padding: 0; width: 100%; height: 100%; -webkit-font-smoothing: antialiased; text-size-adjust: 100%; -ms-text-size-adjust: 100%; -webkit-text-size-adjust: 100%; line-height: 100%;
background-color: #F0F0F0;
color: #000000;"
bgcolor="#F0F0F0"
text="#000000">
<table width="100%" align="center" border="0" cellpadding="0" cellspacing="0" style="border-collapse: collapse; border-spacing: 0; margin-top: 5%; padding: 0; width: 100%;" class="background"><tr><td align="center" valign="top" style="border-collapse: collapse; border-spacing: 0; margin: 0; padding: 0;"
bgcolor="#F0F0F0">
<table border="0" cellpadding="0" cellspacing="0" align="center"
width="560" style="border-collapse: collapse; border-spacing: 0; padding: 0; width: inherit;
max-width: 560px;" class="wrapper">
<!-- End of WRAPPER -->
</table>
<!-- WRAPPER / CONTEINER -->
<table border="0" cellpadding="0" cellspacing="0" align="center"
bgcolor="#FFFFFF"
width="560" style="border-collapse: collapse; border-spacing: 0; padding: 0; width: inherit;
max-width: 560px;" class="container">
<!-- HEADER -->
<tr>
<td align="center" valign="top" style="border-collapse: collapse; border-spacing: 0; margin: 0; padding: 0; padding-left: 6.25%; padding-right: 6.25%; width: 87.5%; font-size: 24px; font-weight: bold; line-height: 130%;
padding-top: 25px;
color: #000000;
font-family: sans-serif;" class="header">
Gracias por usar SuperVoices
</td>
</tr>
<!-- SUBHEADER -->
<tr>
<td align="center" valign="top" style="border-collapse: collapse; border-spacing: 0; margin: 0; padding: 0; padding-bottom: 3px; padding-left: 6.25%; padding-right: 6.25%; width: 87.5%; font-size: 18px; font-weight: 300; line-height: 150%;
padding-top: 5px;
color: #000000;
font-family: sans-serif;" class="subheader">
{name}, tu voz ya se encuentra disponible en el concurso!
</td>
</tr>
<!-- PARAGRAPH -->
<tr>
<td align="center" valign="top" style="border-collapse: collapse; border-spacing: 0; margin: 0; padding: 0; padding-left: 6.25%; padding-right: 6.25%; width: 87.5%; font-size: 17px; font-weight: 400; line-height: 160%;
padding-top: 25px;
color: #000000;
font-family: sans-serif;" class="paragraph">
La entrada ya fue cargada a la pagina del concurso donde podra ser revisada por el organizador.
</td>
</tr>
<tr>
<td align="center" valign="top" style="border-collapse: collapse; border-spacing: 0; margin: 0; padding: 0; padding-left: 6.25%; padding-right: 6.25%; width: 87.5%;
padding-top: 25px;" class="line"><hr
color="#E0E0E0" align="center" width="100%" size="1" noshade style="margin: 0; padding: 0;" />
</td>
</tr>
<tr>
<td align="center" valign="top" style="border-collapse: collapse; border-spacing: 0; margin: 0; padding: 0; padding-left: 6.25%; padding-right: 6.25%; width: 87.5%; font-size: 17px; font-weight: 400; line-height: 160%;
padding-top: 20px;
padding-bottom: 25px;
color: #000000;
font-family: sans-serif;" class="paragraph">
Visita el concurso: <a href="{contestURL}" target="_blank" style="color: #127DB3; font-family: sans-serif; font-size: 17px; font-weight: 400; line-height: 160%;">supervoices.com</a>
</td>
</tr>
<!-- End of WRAPPER -->
</table>
<!-- End of SECTION / BACKGROUND -->
</td></tr></table>
</body>
</html>
""".format(prevText=text, contestURL=entry['url_contest'], name=receiver_name)
message = {
'personalizations': [
{
'to': [
{
'email': to_email
}
],
'subject': subject
}
],
'from': {
'email': from_email
},
'content': [
{
'type': 'text/html',
'value': html
}
]
}
# Send email
response = sg.send(message)
print(response.status_code)
print(response.body)
print(response.headers)
if __name__ == "__main__":
dotenv.load_dotenv(".env")
while True:
sys.stdout.flush()
execute_batch()
time.sleep(5)
```
|
{
"source": "jfmennedy/fhempy",
"score": 2
}
|
#### File: lib/tuya_cloud/tuya_cloud_device.py
```python
import asyncio
import functools
import json
import colorsys
from tuya_iot.device import TuyaDevice
from fhempy.lib.generic import FhemModule
from fhempy.lib import fhem, fhem_pythonbinding, utils
class tuya_cloud_device:
def __init__(self, logger, fhemdevice: FhemModule):
self.logger = logger
self.fhemdev = fhemdevice
self.hash = fhemdevice.hash
async def Define(self, hash, args, argsh):
self._t_setupdev = args[3]
self._t_deviceid = args[4]
self._t_devicelist = []
self.hash["DEVICEID"] = self._t_deviceid
self.tuyaiot = None
self.default_code = None
await fhem.readingsSingleUpdate(self.hash, "state", "ready", 1)
self.fhemdev.create_async_task(self._init_device())
async def _init_device(self):
try:
await self._connect_to_setup_device()
await self._setup_device()
except Exception as ex:
self.logger.exception(ex)
async def _connect_to_setup_device(self):
while self.tuyaiot is None or self.tuyaiot.ready is False:
await asyncio.sleep(1)
self.tuyaiot = fhem_pythonbinding.getFhemPyDeviceByName(self._t_setupdev)
if self.tuyaiot is not None:
self.tuyaiot = self.tuyaiot.tuya_cloud_device
self.tuyaiot.register_tuya_device(self)
async def _setup_device(self):
# retrieve functions/status/types
self._t_specification = await utils.run_blocking(
functools.partial(
self.tuyaiot.device_manager.get_device_specification, self._t_deviceid
)
)
if self._t_specification["success"]:
self._t_specification = self._t_specification["result"]
else:
await fhem.readingsSingleUpdate(
self.hash, "state", self._t_specification["msg"], 1
)
self._t_specification = {"functions": [], "status": []}
# retrieve general infos
self._t_info = await utils.run_blocking(
functools.partial(
self.tuyaiot.device_manager.get_device_info, self._t_deviceid
)
)
self._t_info = self._t_info["result"]
await self.update_readings_dict(self._t_info)
# retrieve current status
self._t_status = await utils.run_blocking(
functools.partial(
self.tuyaiot.device_manager.get_device_status, self._t_deviceid
)
)
self._t_status = self._t_status["result"]
# setup set commands
await self._generate_set()
# update status
await self.update_readings_arr(self._t_status)
async def _generate_set(self):
set_conf = {}
for fct in self._t_specification["functions"]:
if fct["type"] == "Boolean":
set_conf[fct["code"]] = {
"options": "on,off",
"args": ["onoff"],
"function_param": fct,
"function": "set_boolean",
}
elif fct["type"] == "Enum":
options = json.loads(fct["values"])["range"]
set_conf[fct["code"]] = {
"options": ",".join(options),
"args": ["selected_val"],
"function_param": fct,
"function": "set_enum",
}
elif fct["type"] == "Integer":
spec = json.loads(fct["values"])
slider = f"slider,{spec['min']},{spec['step']},{spec['max']}"
set_conf[fct["code"]] = {
"options": slider,
"args": ["selected_val"],
"params": {"selected_val": {"format": "int"}},
"function_param": fct,
"function": "set_integer",
}
elif fct["type"] == "String":
set_conf[fct["code"]] = {
"args": ["new_val"],
"function_param": fct,
"function": "set_string",
}
elif fct["type"] == "Json":
set_conf[fct["code"]] = {
"args": ["new_val"],
"function_param": fct,
"function": "set_json",
}
if fct["code"] == "colour_data":
set_conf[fct["code"]]["function"] = "set_colour_data"
set_conf[fct["code"]]["options"] = "colorpicker,RGB"
elif fct["code"] == "colour_data_v2":
set_conf[fct["code"]]["function"] = "set_colour_data_v2"
set_conf[fct["code"]]["options"] = "colorpicker,RGB"
self.default_code = None
if "switch" in set_conf:
self.default_code = "switch"
elif "switch_1" in set_conf:
self.default_code = "switch_1"
elif "switch_led" in set_conf:
self.default_code = "switch_led"
if self.default_code is not None:
set_conf["on"] = {
"function_param": {"code": self.default_code},
"function": "set_boolean",
}
set_conf["off"] = {
"function_param": {"code": self.default_code},
"function": "set_boolean",
}
del set_conf[self.default_code]
self.fhemdev.set_set_config(set_conf)
async def set_boolean(self, hash, params):
code = params["function_param"]["code"]
onoff = False
if "onoff" in params:
if params["onoff"] == "on":
onoff = True
else:
if params["cmd"] == "on":
onoff = True
await self.send_commands([{"code": code, "value": onoff}])
async def set_enum(self, hash, params):
code = params["function_param"]["code"]
await self.send_commands([{"code": code, "value": params["selected_val"]}])
async def set_string(self, hash, params):
code = params["function_param"]["code"]
await self.send_commands([{"code": code, "value": params["new_val"]}])
async def set_json(self, hash, params):
code = params["function_param"]["code"]
await self.send_commands(
[{"code": code, "value": json.loads(params["new_val"])}]
)
async def set_integer(self, hash, params):
code = params["function_param"]["code"]
await self.send_commands([{"code": code, "value": params["selected_val"]}])
async def set_colour_data(self, hash, params):
# convert e.g. ff0000 to hsv (360, 100, 100) and set hsv values with json
hsv = self.fhemrgb2hsv(params["new_val"])
if self._t_info["category"] == "dj":
hsv["s"] = int(hsv["s"] / 10)
hsv["v"] = int(hsv["v"] / 10)
code = params["function_param"]["code"]
await self.send_commands([{"code": code, "value": hsv}])
async def set_colour_data_v2(self, hash, params):
# convert e.g. ff0000 to hsv (360, 1000, 1000) and set hsv values with json
hsv = self.fhemrgb2hsv(params["new_val"])
code = params["function_param"]["code"]
await self.send_commands([{"code": code, "value": hsv}])
def fhemrgb2hsv(self, rgb):
red = int(rgb[0:2], base=16)
green = int(rgb[2:4], base=16)
blue = int(rgb[4:6], base=16)
hsv = colorsys.rgb_to_hsv(red / 255, green / 255, blue / 255)
return {
"h": int(hsv[0] * 360),
"s": int(hsv[1] * 1000),
"v": int(hsv[2] * 1000),
}
async def send_commands(self, commands):
await self.tuyaiot.send_commands(self._t_deviceid, commands)
@property
def id(self):
return self._t_deviceid
def _convert_code2fhem(self, code):
if code == self.default_code:
return "state"
# pir device
if code == "pir" and self._t_info["category"] == "pir":
return "state"
# smoke detector
if code == "smoke_sensor_status":
return "state"
# water detector
if code == "watersensor_state":
return "state"
# door window sensor
if code == "doorcontact_state":
return "state"
return code
def _convert_value2fhem(self, code, value):
for code_def in self._t_specification["status"]:
if code_def["code"] == code and code_def["type"] == "Integer":
values = json.loads(code_def["values"])
return value / (10 ** values["scale"])
if code == "icon":
return (
self.tuyaiot.device_manager.api.endpoint.replace("openapi", "images")
+ "/"
+ value
)
# pir device
elif code == "pir" and self._t_info["category"] == "pir":
if value == "pir":
self.fhemdev.create_async_task(
self.reset_reading("state", "nomotion", 180)
)
return "motion"
# door window sensor
elif code == "doorcontact_state":
if value is True:
return "open"
return "closed"
if isinstance(value, bool):
if value:
return "on"
return "off"
return value
async def reset_reading(self, reading, resetvalue, timeout):
await asyncio.sleep(timeout)
await fhem.readingsSingleUpdate(self.fhemdev.hash, reading, resetvalue, 1)
async def update(self, device: TuyaDevice):
await self.update_readings_dict(device.status)
async def update_readings_arr(self, status_arr):
await fhem.readingsBeginUpdate(self.hash)
try:
for status in status_arr:
if status["code"] in ["colour_data", "colour_data_v2"]:
await self.update_readings_hsv(
status["code"], json.loads(status["value"])
)
else:
await fhem.readingsBulkUpdate(
self.hash,
self._convert_code2fhem(status["code"]),
self._convert_value2fhem(status["code"], status["value"]),
)
except Exception as ex:
self.logger.exception(ex)
await fhem.readingsEndUpdate(self.hash, 1)
async def update_readings_dict(self, status_dic):
await fhem.readingsBeginUpdate(self.hash)
try:
for st_name in status_dic:
if st_name in ["colour_data", "colour_data_v2"]:
await self.update_readings_hsv(
st_name, json.loads(status_dic[st_name])
)
else:
await fhem.readingsBulkUpdate(
self.hash,
self._convert_code2fhem(st_name),
self._convert_value2fhem(st_name, status_dic[st_name]),
)
except Exception as ex:
self.logger.exception(ex)
await fhem.readingsEndUpdate(self.hash, 1)
async def update_readings_hsv(self, hsv_code, hsv_json):
if hsv_code == "colour_data" and self._t_info["category"] == "dj":
# only category dj (light) has old colour_data
rgb = colorsys.hsv_to_rgb(
int(hsv_json["h"]) / 360,
int(hsv_json["s"]) / 100,
int(hsv_json["v"]) / 100,
)
else:
rgb = colorsys.hsv_to_rgb(
int(hsv_json["h"]) / 360,
int(hsv_json["s"]) / 1000,
int(hsv_json["v"]) / 1000,
)
red = int(rgb[0] * 255)
green = int(rgb[1] * 255)
blue = int(rgb[2] * 255)
rgb_hex = f"{red:02x}{green:02x}{blue:02x}"
await fhem.readingsBulkUpdate(
self.hash,
hsv_code,
rgb_hex,
)
```
#### File: xiaomi_gateway3/core/elelabs_ezsp_utility.py
```python
import binascii
import io
import socket
import time
from xmodem import XMODEM
# Maximum untouched utility with fix only serial class from pyserial to TCP
# https://github.com/Elelabs/elelabs-zigbee-ezsp-utility
class serial:
PARITY_NONE = None
STOPBITS_ONE = None
class Serial:
def __init__(self, port, **kwargs):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.settimeout(5)
self.s.connect(port)
def flushInput(self):
pass
def read(self, size: int = 1):
try:
return self.s.recv(size)
except:
return b""
def readline(self):
raw = b""
while True:
c = self.read()
raw += c
if c == b"\n" or c == b"":
break
return raw
def write(self, data: bytes):
self.s.send(data)
def close(self):
self.s.close()
class AdapterModeProbeStatus:
NORMAL = 0
BOOTLOADER = 1
ERROR = 2
class SerialInterface:
def __init__(self, port, baudrate):
self.port = port
self.baudrate = baudrate
def open(self):
try:
self.serial = serial.Serial(
port=self.port,
baudrate=self.baudrate,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
xonxoff=True,
timeout=3,
)
except Exception as e:
raise Exception("PORT ERROR: %s" % str(e))
def close(self):
self.serial.close()
class AshProtocolInterface:
FLAG_BYTE = b"\x7E"
RANDOMIZE_START = 0x42
RANDOMIZE_SEQ = 0xB8
RSTACK_FRAME_CMD = b"\x1A\xC0\x38\xBC\x7E"
RSTACK_FRAME_ACK = b"\x1A\xC1\x02\x0B\x0A\x52\x7E"
def __init__(self, serial, config, logger):
self.logger = logger
self.config = config
self.serial = serial
self.ackNum = 0
self.frmNum = 0
def dataRandomize(self, frame):
rand = self.RANDOMIZE_START
out = bytearray()
for x in frame:
out += bytearray([x ^ rand])
if rand % 2:
rand = (rand >> 1) ^ self.RANDOMIZE_SEQ
else:
rand = rand >> 1
return out
def ashFrameBuilder(self, ezsp_frame):
ash_frame = bytearray()
# Control byte
ash_frame += bytearray(
[(((self.ackNum << 0) & 0xFF) | (((self.frmNum % 8) << 4) & 0xFF)) & 0xFF]
)
self.ackNum = (self.ackNum + 1) % 8
self.frmNum = (self.frmNum + 1) % 8
ash_frame += self.dataRandomize(ezsp_frame)
crc = binascii.crc_hqx(ash_frame, 0xFFFF)
ash_frame += bytearray([crc >> 8, crc & 0xFF])
ash_frame = self.replaceReservedBytes(ash_frame)
ash_frame += self.FLAG_BYTE
if self.config.dlevel == "ASH":
self.logger.debug(
"[ ASH REQUEST ] " + " ".join(format(x, "02x") for x in ash_frame)
)
return ash_frame
def revertEscapedBytes(self, msg):
msg = msg.replace(b"\x7d\x5d", b"\x7d")
msg = msg.replace(b"\x7d\x5e", b"\x7e")
msg = msg.replace(b"\x7d\x31", b"\x11")
msg = msg.replace(b"\x7d\x33", b"\x13")
msg = msg.replace(b"\x7d\x38", b"\x18")
msg = msg.replace(b"\x7d\x3a", b"\x1a")
return msg
def replaceReservedBytes(self, msg):
msg = msg.replace(b"\x7d", b"\x7d\x5d")
msg = msg.replace(b"\x7e", b"\x7d\x5e")
msg = msg.replace(b"\x11", b"\x7d\x31")
msg = msg.replace(b"\x13", b"\x7d\x33")
msg = msg.replace(b"\x18", b"\x7d\x38")
msg = msg.replace(b"\x1a", b"\x7d\x3a")
return msg
def getResponse(self, applyRandomize=False):
timeout = time.time() + 3
msg = bytearray()
receivedbyte = None
while (time.time() < timeout) and (receivedbyte != self.FLAG_BYTE):
receivedbyte = self.serial.read()
msg += receivedbyte
if len(msg) == 0:
return -1, None, None
msg = self.revertEscapedBytes(msg)
if self.config.dlevel == "ASH":
self.logger.debug(
"[ ASH RESPONSE ] " + " ".join(format(x, "02x") for x in msg)
)
if applyRandomize:
msg_parsed = self.dataRandomize(bytearray(msg[1:-3]))
if self.config.dlevel == "ASH" or self.config.dlevel == "EZSP":
self.logger.debug(
"[ EZSP RESPONSE ] "
+ " ".join(format(x, "02x") for x in msg_parsed)
)
return 0, msg, msg_parsed
else:
return 0, msg
def sendResetFrame(self):
self.serial.flushInput()
self.logger.debug("RESET FRAME")
if self.config.dlevel == "ASH":
self.logger.debug(
"[ ASH REQUEST ] "
+ " ".join(format(x, "02x") for x in self.RSTACK_FRAME_CMD)
)
self.serial.write(self.RSTACK_FRAME_CMD)
status, response = self.getResponse()
if status:
return status
if not (self.RSTACK_FRAME_ACK in response):
return -1
return 0
def sendAck(self, ackNum):
ack = bytearray([ackNum & 0x07 | 0x80])
crc = binascii.crc_hqx(ack, 0xFFFF)
ack += bytearray([crc >> 8, crc & 0xFF])
ack = self.replaceReservedBytes(ack)
ack += self.FLAG_BYTE
if self.config.dlevel == "ASH":
self.logger.debug("[ ASH ACK ] " + " ".join(format(x, "02x") for x in ack))
self.serial.write(ack)
def sendAshCommand(self, ezspFrame):
ash_frame = self.ashFrameBuilder(ezspFrame)
self.serial.flushInput()
self.serial.write(ash_frame)
status, ash_response, ezsp_response = self.getResponse(True)
if status:
return status, None
self.sendAck(ash_response[0])
return 0, ezsp_response
class EzspProtocolInterface:
def __init__(self, serial, config, logger):
self.logger = logger
self.config = config
self.INITIAL_EZSP_VERSION = 4
self.VERSION = b"\x00"
self.GET_VALUE = b"\xAA"
self.GET_MFG_TOKEN = b"\x0B"
self.LAUNCH_STANDALONE_BOOTLOADER = b"\x8F"
self.EZSP_VALUE_VERSION_INFO = 0x11
self.EZSP_MFG_STRING = 0x01
self.EZSP_MFG_BOARD_NAME = 0x02
self.STANDALONE_BOOTLOADER_NORMAL_MODE = 1
self.ezspVersion = self.INITIAL_EZSP_VERSION
self.sequenceNum = 0
self.ash = AshProtocolInterface(serial, config, logger)
def ezspFrameBuilder(self, command):
ezsp_frame = bytearray()
# Sequence byte
ezsp_frame += bytearray([self.sequenceNum])
self.sequenceNum = (self.sequenceNum + 1) % 255
ezsp_frame += b"\x00"
if self.ezspVersion >= 5:
# Legacy frame ID - always 0xFF
ezsp_frame += b"\xFF"
# Extended frame control
ezsp_frame += b"\x00"
ezsp_frame = ezsp_frame + command
if self.ezspVersion >= 8:
ezsp_frame[2] = 0x01
ezsp_frame[3] = command[0] & 0xFF # LSB
ezsp_frame[4] = command[0] >> 8 # MSB
if self.config.dlevel == "ASH" or self.config.dlevel == "EZSP":
self.logger.debug(
"[ EZSP REQUEST ] " + " ".join(format(x, "02x") for x in ezsp_frame)
)
return ezsp_frame
def sendEzspCommand(self, commandData, commandName=""):
self.logger.debug(commandName)
status, response = self.ash.sendAshCommand(self.ezspFrameBuilder(commandData))
if status:
raise Exception("sendAshCommand status error: %d" % status)
return response
def sendVersion(self, desiredProtocolVersion):
resp = self.sendEzspCommand(
self.VERSION + bytearray([desiredProtocolVersion]),
"sendVersion: V%d" % desiredProtocolVersion,
)
return resp[3] # protocolVersion
def getValue(self, valueId, valueIdName):
resp = self.sendEzspCommand(
self.GET_VALUE + bytearray([valueId]), "getValue: %s" % valueIdName
)
status = resp[5]
valueLength = resp[6]
valueArray = resp[7:]
return status, valueLength, valueArray
def getMfgToken(self, tokenId, tokenIdName):
resp = self.sendEzspCommand(
self.GET_MFG_TOKEN + bytearray([tokenId]), "getMfgToken: %s" % tokenIdName
)
tokenDataLength = resp[5]
tokenData = resp[6:]
return tokenDataLength, tokenData
def launchStandaloneBootloader(self, mode, modeName):
resp = self.sendEzspCommand(
self.LAUNCH_STANDALONE_BOOTLOADER + bytearray([mode]),
"launchStandaloneBootloader: %s" % modeName,
)
status = resp[5]
return status
def initEzspProtocol(self):
ash_status = self.ash.sendResetFrame()
if ash_status:
return ash_status
self.ezspVersion = self.sendVersion(self.INITIAL_EZSP_VERSION)
self.logger.debug("EZSP v%d detected" % self.ezspVersion)
if self.ezspVersion != self.INITIAL_EZSP_VERSION:
self.sendVersion(self.ezspVersion)
return 0
class ElelabsUtilities:
def __init__(self, config, logger):
self.logger = logger
self.config = config
def probe(self):
serialInterface = SerialInterface(self.config.port, self.config.baudrate)
serialInterface.open()
ezsp = EzspProtocolInterface(serialInterface.serial, self.config, self.logger)
ezsp_status = ezsp.initEzspProtocol()
if ezsp_status == 0:
status, value_length, value_array = ezsp.getValue(
ezsp.EZSP_VALUE_VERSION_INFO, "EZSP_VALUE_VERSION_INFO"
)
if status == 0:
firmware_version = (
str(value_array[2])
+ "."
+ str(value_array[3])
+ "."
+ str(value_array[4])
+ "-"
+ str(value_array[0])
)
else:
self.logger.info("EZSP status returned %d" % status)
token_data_length, token_data = ezsp.getMfgToken(
ezsp.EZSP_MFG_STRING, "EZSP_MFG_STRING"
)
if token_data.decode("ascii", "ignore") == "Elelabs":
token_data_length, token_data = ezsp.getMfgToken(
ezsp.EZSP_MFG_BOARD_NAME, "EZSP_MFG_BOARD_NAME"
)
adapter_name = token_data.decode("ascii", "ignore")
self.logger.info("Elelabs adapter detected:")
self.logger.info("Adapter: %s" % adapter_name)
else:
adapter_name = None
self.logger.info("Generic EZSP adapter detected:")
self.logger.info("Firmware: %s" % firmware_version)
self.logger.info("EZSP v%d" % ezsp.ezspVersion)
serialInterface.close()
return (
AdapterModeProbeStatus.NORMAL,
ezsp.ezspVersion,
firmware_version,
adapter_name,
)
else:
if self.config.baudrate != 115200:
serialInterface.close()
time.sleep(1)
serialInterface = SerialInterface(self.config.port, 115200)
serialInterface.open()
# check if allready in bootloader mode
serialInterface.serial.write(b"\x0A")
first_line = serialInterface.serial.readline() # read blank line
if len(first_line) == 0:
# timeout
serialInterface.close()
self.logger.info(
"Couldn't communicate with the adapter in normal or in bootloader modes"
)
return AdapterModeProbeStatus.ERROR, None, None, None
btl_info = (
serialInterface.serial.readline()
) # read Gecko BTL version or blank line
self.logger.info("EZSP adapter in bootloader mode detected:")
self.logger.info(
btl_info.decode("ascii", "ignore")[:-2]
) # show Bootloader version
serialInterface.close()
return AdapterModeProbeStatus.BOOTLOADER, None, None, None
def restart(self, mode):
adapter_status, ezsp_version, firmware_version, adapter_name = self.probe()
if adapter_status == AdapterModeProbeStatus.NORMAL:
if mode == "btl":
serialInterface = SerialInterface(
self.config.port, self.config.baudrate
)
serialInterface.open()
self.logger.info("Launch in bootloader mode")
ezsp = EzspProtocolInterface(
serialInterface.serial, self.config, self.logger
)
ezsp_status = ezsp.initEzspProtocol()
status = ezsp.launchStandaloneBootloader(
ezsp.STANDALONE_BOOTLOADER_NORMAL_MODE,
"STANDALONE_BOOTLOADER_NORMAL_MODE",
)
if status:
serialInterface.close()
self.logger.critical(
"Error launching the adapter in bootloader mode"
)
return -1
serialInterface.close()
# wait for reboot
time.sleep(2)
(
adapter_status,
ezsp_version,
firmware_version,
adapter_name,
) = self.probe()
if adapter_status == AdapterModeProbeStatus.BOOTLOADER:
return 0
else:
return -1
else:
self.logger.info("Allready in EZSP normal mode. No need to restart")
return 0
elif adapter_status == AdapterModeProbeStatus.BOOTLOADER:
if mode == "btl":
self.logger.info("Allready in bootloader mode. No need to restart")
return 0
else:
serialInterface = SerialInterface(self.config.port, 115200)
serialInterface.open()
self.logger.info("Launch in EZSP normal mode")
# Send Reboot
serialInterface.serial.write(b"2")
serialInterface.close()
# wait for reboot
time.sleep(2)
(
adapter_status,
ezsp_version,
firmware_version,
adapter_name,
) = self.probe()
if adapter_status == AdapterModeProbeStatus.NORMAL:
return 0
else:
return -1
def flash(self, filename):
# STATIC FUNCTIONS
def getc(size, timeout=1):
read_data = self.serialInterface.serial.read(size)
return read_data
def putc(data, timeout=1):
self.currentPacket += 1
if (self.currentPacket % 20) == 0:
print(".", end="")
if (self.currentPacket % 100) == 0:
print("")
self.serialInterface.serial.write(data)
time.sleep(0.001)
# if not (".gbl" in filename) and not (".ebl" in filename):
# self.logger.critical(
# 'Aborted! Gecko bootloader accepts .gbl or .ebl images only.')
# return
if self.restart("btl"):
self.logger.critical(
"EZSP adapter not in the bootloader mode. Can't perform update procedure"
)
self.serialInterface = SerialInterface(self.config.port, 115200)
self.serialInterface.open()
# Enter '1' to initialize X-MODEM mode
self.serialInterface.serial.write(b"\x0A")
self.serialInterface.serial.write(b"1")
time.sleep(1)
self.serialInterface.serial.readline() # BL > 1
self.serialInterface.serial.readline() # begin upload
self.logger.info(
"Successfully restarted into X-MODEM mode! Starting upload of the new firmware... DO NOT INTERRUPT(!)"
)
self.currentPacket = 0
# Wait for char 'C'
success = False
start_time = time.time()
while time.time() - start_time < 10:
if self.serialInterface.serial.read() == b"C":
success = True
if time.time() - start_time > 5:
break
if not success:
self.logger.info(
"Failed to restart into bootloader mode. Please see users guide."
)
return
# Start XMODEM transaction
modem = XMODEM(getc, putc)
# stream = open(filename, 'rb')
stream = io.BytesIO(filename)
sentcheck = modem.send(stream)
print("")
if sentcheck:
self.logger.info("Firmware upload complete")
else:
self.logger.critical(
"Firmware upload failed. Please try a correct firmware image or restart in normal mode."
)
return
self.logger.info("Rebooting NCP...")
# Wait for restart
time.sleep(4)
# Send Reboot into App-Code command
self.serialInterface.serial.write(b"2")
self.serialInterface.close()
time.sleep(2)
return self.probe()
def ele_update(self, new_version):
adapter_status, ezsp_version, firmware_version, adapter_name = self.probe()
if adapter_status == AdapterModeProbeStatus.NORMAL:
if adapter_name == None:
self.logger.critical(
"No Elelabs product detected.\r\nUse 'flash' utility for generic EZSP products.\r\nContact <EMAIL> if you see this meesage for original Elelabs product"
)
return
if new_version == "v6" and ezsp_version == 6:
self.logger.info(
"Elelabs product is operating EZSP protocol v%d. No need to update to %s"
% (ezsp_version, new_version)
)
return
if new_version == "v8" and ezsp_version == 8:
self.logger.info(
"Elelabs product is operating EZSP protocol v%d. No need to update to %s"
% (ezsp_version, new_version)
)
return
if adapter_name == "ELR023" or adapter_name == "ELU013":
if new_version == "v6":
self.flash("data/ELX0X3_MG13_6.0.3_ezsp_v6.gbl")
elif new_version == "v8":
self.flash("data/ELX0X3_MG13_6.7.0_ezsp_v8.gbl")
else:
self.logger.critical("Unknown EZSP version")
elif adapter_name == "ELR022" or adapter_name == "ELU012":
self.logger.critical("TODO!. Contact Elelabs at <EMAIL>")
elif adapter_name == "EZBPIS" or adapter_name == "EZBUSBA":
self.logger.critical("TODO!. Contact Elelabs at <EMAIL>")
else:
self.logger.critical(
"Unknown Elelabs product %s detected.\r\nContact <EMAIL> if you see this meesage for original Elelabs product"
% adapter_name
)
elif adapter_status == AdapterModeProbeStatus.BOOTLOADER:
self.logger.critical(
"The product not in the normal EZSP mode.\r\n'restart' into normal mode or use 'flash' utility instead"
)
else:
self.logger.critical("No upgradable device found")
```
#### File: lib/xiaomi_gateway3_device/xiaomi_gateway3_device.py
```python
import asyncio
from .. import fhem
from .. import fhem_pythonbinding as fhepy
from .. import generic
# imports for dynamical usage, do NOT remove
from .devices.gateway import Gateway # noqa: F401
from .devices.sensor import ( # noqa: F401
ContactSensor,
HTSensor,
MotionSensor,
WaterLeakSensor,
)
device_type_mapping = {
"lumi.sensor_magnet.v2": "ContactSensor",
"lumi.sensor_magnet.aq2": "ContactSensor",
"lumi.sensor_wleak.aq1": "WaterLeakSensor",
"lumi.sensor_ht.v1": "HTSensor",
"lumi.sensor_ht.v2": "HTSensor",
"lumi.weather.v1": "HTSensor",
"lumi.sensor_motion.v1": "MotionSensor",
"lumi.sensor_motion.v2": "MotionSensor",
"lumi.gateway.mgl03": "Gateway",
}
class xiaomi_gateway3_device(generic.FhemModule):
def __init__(self, logger):
super().__init__(logger)
self._fhempy_gateway = None
self._fhempy_device = None
self.loop = asyncio.get_event_loop()
# FHEM FUNCTION
async def Define(self, hash, args, argsh):
await super().Define(hash, args, argsh)
if len(args) < 5:
return (
"Usage: define devname fhempy xiaomi_gateway3_device"
" <GATEWAY_NAME> <DID>"
)
self.gw_name = args[3]
self.did = args[4]
hash["GATEWAY"] = self.gw_name
hash["DID"] = self.did
# change gateway did to 0, we just needed it for DID internals
if self.did.find("0x") >= 0:
self.did = "lumi.0"
await fhem.readingsSingleUpdateIfChanged(self.hash, "state", "offline", 1)
self.create_async_task(self.connect_gw())
async def connect_gw(self):
while self._fhempy_gateway is None:
self._fhempy_gateway = fhepy.getFhemPyDeviceByName(self.gw_name)
if self._fhempy_gateway:
try:
await self._fhempy_gateway.register_device(self, self.update)
await fhem.readingsSingleUpdateIfChanged(
self.hash, "state", "online", 1
)
except Exception:
self._fhempy_gateway = None
pass
else:
await fhem.readingsSingleUpdateIfChanged(
self.hash, "state", f"gateway {self.gw_name} not found", 1
)
await asyncio.sleep(10)
async def initialize(self, device):
if self._fhempy_gateway is None:
return
# first update, set attributes and device readings like model, sid, ...
if device["model"] not in device_type_mapping:
self.logger.error(
f"{device['model']} not yet supported, please report an issue here: "
f"https://github.com/dominikkarall/fhempy/issues"
)
await fhem.readingsSingleUpdateIfChanged(
self.hash, "state", f"unsupported device: {device['model']}", 1
)
return
# create device based on device model
self._fhempy_device = globals()[device_type_mapping[device["model"]]](
self.logger, self._fhempy_gateway
)
self._fhempy_device.set_hash(self.hash)
await self._fhempy_device.initialize(device)
self._fhempy_gateway.gateway3.set_entity(self._fhempy_device)
self._fhempy_gateway.gateway3.set_stats(self._fhempy_device)
def update(self, data):
if self._fhempy_device is not None:
self._fhempy_device.update(data)
# FHEM functions which will be redirected to device type class
async def FW_detailFn(self, hash, args, argsh):
if self._fhempy_device is None:
return await super().FW_detailFn(hash, args, argsh)
return await self._fhempy_device.FW_detailFn(hash, args, argsh)
async def Set(self, hash, args, argsh):
if self._fhempy_device is None:
return await super().Set(hash, args, argsh)
return await self._fhempy_device.Set(hash, args, argsh)
async def Attr(self, hash, args, argsh):
if self._fhempy_device is None:
return await super().Attr(hash, args, argsh)
return await self._fhempy_device.Attr(hash, args, argsh)
async def Undefine(self, hash):
await super().Undefine(hash)
if self._fhempy_device is not None:
await self._fhempy_device.Undefine(hash)
```
|
{
"source": "jfm/raspberry-devices",
"score": 3
}
|
#### File: raspberry-devices/RPi/events.py
```python
class OutputEvent:
def __init__(self, channel, outmode):
self.channel = channel
self.outmode = outmode
def as_dict(self):
return {
'type': 'output',
'channel': self.channel,
'outmode': self.outmode
}
class SetmodeEvent:
def __init__(self, mode):
self.mode = mode
def as_dict(self):
return {
'type': 'mode',
'mode': self.mode
}
```
#### File: sensors/temperature/ds18b20.py
```python
class Sensor:
def set_temperature(self, device, temperature):
file = open(device, 'w')
file.write('ab 12 34 56 78 bc 12 34 ff : crc=ff YES\n')
file.write('ab 12 34 56 78 bc 12 34 ff t=%s' % str(temperature * 1000))
file.close()
```
|
{
"source": "JfMRes/mask-detection-python",
"score": 3
}
|
#### File: JfMRes/mask-detection-python/usando.py
```python
from imageai.Prediction.Custom import CustomImagePrediction
prediction = CustomImagePrediction()
prediction.setModelTypeAsResNet()
modelo="imagenes\models\model_ex-049_acc-0.906250.h5"
prediction.setModelPath(modelo)
prediction.setJsonPath("imagenes\json\model_class.json")
prediction.loadModel(num_objects=2)
def inicia():
foto=r"estampado.webp"
predictions, probabilities = prediction.predictImage(foto, result_count=2)
for eachPrediction, eachProbability in zip(predictions, probabilities):
print(eachPrediction , " : " , eachProbability)
inicia()
```
|
{
"source": "JfMRes/tfg",
"score": 2
}
|
#### File: JfMRes/tfg/init.py
```python
import sys
font=("Arial Bold", 10)
with open(sys.path[0]+'/docs/settings.txt') as f_settings:
lines=[]
for line in f_settings:
lines.append(line)
def value_setting(ind):
return lines[ind][lines[ind].index('=')+1:len(lines[ind])-1]
def_lang=value_setting(0)
def_timeout=float(value_setting(1))
def_encode=value_setting(2)
def_baudrate=int(value_setting(3))
def_theme=value_setting(4)
with open(sys.path[0]+'/docs/languages/'+def_lang+'.txt') as f_lang:
lines=[]
for line in f_lang:
lines.append(line)
def value_lang(ind):
return lines[ind][lines[ind].index('=')+1:len(lines[ind])-1]
t_error_path=value_lang(0)
t_error_noinput=value_lang(1)
t_error_output=value_lang(2)
t_error_toomanyout=value_lang(3)
t_error_toomanyin=value_lang(4)
t_error_noconexion=value_lang(5)
t_head_main=value_lang(6)
t_head_debug=value_lang(7)
t_label_input_pins=value_lang(8)
t_label_output_pins=value_lang(9)
t_label_file_path=value_lang(10)
t_label_time_step=value_lang(11)
t_button_start=value_lang(12)
t_button_diagram=value_lang(13)
t_button_debug=value_lang(14)
t_button_connect=value_lang(15)
t_error_no_succes_connction=value_lang(16)
```
|
{
"source": "jfm/TimeGraph",
"score": 2
}
|
#### File: TimeGraph/tests/test_plotter.py
```python
from nose.tools import assert_equal
from timegraph.drawing.plotter import Plotter
#class PlotterTest(unittest.TestCase):
class TestPlotter:
def setup(self):
self.test_values = [('2015-08-18T00:00:00Z', 4.354952083333334),
('2015-08-19T00:00:00Z', 4.308158333333333),
('2015-08-20T00:00:00Z', 4.302979166666666),
('2015-08-21T00:00:00Z', 4.311062499999999),
('2015-08-22T00:00:00Z', 4.253908333333332),
('2015-08-23T00:00:00Z', 4.189418750000003),
('2015-08-24T00:00:00Z', 4.153933333333331),
('2015-08-25T00:00:00Z', 4.172108333333331),
('2015-08-26T00:00:00Z', 4.261525),
('2015-08-27T00:00:00Z', 4.292508333333336),
('2015-08-28T00:00:00Z', 4.4140041666666665),
('2015-08-29T00:00:00Z', 4.499274999999998),
('2015-08-30T00:00:00Z', 4.439795833333335),
('2015-08-31T00:00:00Z', 4.4404875000000015),
('2015-09-01T00:00:00Z', 4.42244375),
('2015-09-02T00:00:00Z', 4.421910416666667),
('2015-09-03T00:00:00Z', 4.410592436974791),
('2015-09-04T00:00:00Z', 4.3801125),
('2015-09-05T00:00:00Z', 4.318624217118998),
('2015-09-06T00:00:00Z', 4.288097916666669),
('2015-09-07T00:00:00Z', 4.393808333333334),
('2015-09-08T00:00:00Z', 4.476208333333333),
('2015-09-09T00:00:00Z', 4.569668750000003),
('2015-09-10T00:00:00Z', 4.60123125),
('2015-09-11T00:00:00Z', 4.608704166666667),
('2015-09-12T00:00:00Z', 4.658435416666667),
('2015-09-13T00:00:00Z', 4.7641395833333355),
('2015-09-14T00:00:00Z', 4.911522916666667),
('2015-09-15T00:00:00Z', 4.857977083333336),
('2015-09-16T00:00:00Z', 4.680843749999998),
('2015-09-17T00:00:00Z', 4.603674999999998),
('2015-09-18T00:00:00Z', 4.37027676240209)]
self.values = [(4.354952083333334, 7),
(4.308158333333333, 7),
(4.302979166666666, 7),
(4.311062499999999, 7),
(4.253908333333332, 7),
(4.189418750000003, 7),
(4.153933333333331, 7),
(4.172108333333331, 7),
(4.261525, 7),
(4.292508333333336, 7),
(4.4140041666666665, 7),
(4.499274999999998, 8),
(4.439795833333335, 8),
(4.4404875000000015, 8),
(4.42244375, 8),
(4.421910416666667, 8),
(4.410592436974791, 7),
(4.3801125, 7),
(4.318624217118998, 7),
(4.288097916666669, 7),
(4.393808333333334, 7),
(4.476208333333333, 8),
(4.569668750000003, 8),
(4.60123125, 8),
(4.608704166666667, 8),
(4.658435416666667, 8),
(4.7641395833333355, 8),
(4.911522916666667, 9),
(4.857977083333336, 8),
(4.680843749999998, 8),
(4.603674999999998, 8),
(4.37027676240209, 7)]
def test_get_y_scale(self):
plotter = Plotter(is_test=True)
y_scale = plotter.get_y_scale(self.values)
print(y_scale)
assert_equal(max(y_scale), '4.91 ')
assert_equal(min(y_scale), '2.64 ')
def test_generate_matrix(self):
scaled_values = [scaled for (original, scaled) in self.values]
plotter = Plotter(is_test=True)
matrix = plotter.generate_matrix(scaled_values)
assert_equal(matrix[7][0], '*')
def test_scale_values(self):
plotter = Plotter(is_test=True)
original_values = [original for (time, original) in self.test_values]
scaled_values = plotter.scale_values(original_values)
print(scaled_values)
assert_equal(scaled_values[0][1], 7)
```
#### File: timegraph/drawing/drawing.py
```python
from numbers import Number
from timegraph.drawing.plotter import Plotter
class Drawing:
def __init__(self):
self.plotter = Plotter()
def create_graph(self, title, db_response):
value_list = self.get_value_list(db_response.get_points())
self.plotter.plot_timeseries(value_list)
def get_value_list(self, points):
result = []
for point in points:
point_keys = point.keys()
for key in point_keys:
if key != 'time':
if (point[key] is not None and
isinstance(point[key], Number)):
result.append(point[key])
return result
def print_graph(self, lines):
for line in lines:
print(line)
class DrawingException(Exception):
def __init__(self, code, message):
super().__init__(code, message)
self.code = code
self.message = message
```
#### File: timegraph/shell/influx.py
```python
from cmd import Cmd
from influxdb import InfluxDBClient
from requests.exceptions import ConnectionError
from influxdb.exceptions import InfluxDBClientError
from timegraph.drawing.drawing import Drawing, DrawingException
import json
class InfluxShell(Cmd):
prompt = 'query> '
def __init__(self, db_host, db_port, db_name):
super().__init__()
self.db_host = db_host
self.db_port = db_port
self.db_name = db_name
self.client = InfluxDBClient(
host=db_host, port=db_port, database=db_name)
self.drawing = Drawing()
def do_auth(self, line):
args = line.split(' ')
dbuser = args[0]
dbpass = args[1]
self.client.switch_user(dbuser, dbpass)
def do_select(self, line):
query = 'select ' + line
if not query.endswith(';'):
query = query + ';'
try:
response = self.client.query(query)
self.drawing.create_graph(query, response)
except ConnectionError as conn_error:
self.connection_error_handler(conn_error)
except InfluxDBClientError as error:
self.influx_error_handler(error.code, error.content)
except DrawingException as drawing_error:
self.graphtool_error_handler(
drawing_error.code, drawing_error.message)
def do_list(self, line):
print('list - Not implemented yet')
def do_import(self, line):
print('improt - Not implemented yet')
# Convert input(file) to valid json
# self.client.write_points(json)
def do_create_db(self, line):
response = self.client.create_database(line)
print(response)
def do_disconnect(self, line):
return True
def do_EOF(self, line):
return True
def default(self, line):
print('Unrecognized command')
print(' ', line)
def main(self):
InfluxShell.cmdloop(self, intro='Connected to Influx Query Shell')
def connection_error_handler(self, exception):
print('Could not connect to {0} on {1}:{2}'.format(
self.db_name, self.db_host, self.db_port))
def influx_error_handler(self, code, content):
if code is None:
print(content)
else:
contentJson = json.loads(content)
print(code, ' -- ', contentJson['error'])
def graphtool_error_handler(self, code, message):
print(code, ' -- ', message)
# select count(water_level) from h2o_feet
# select * from h2o_feet limit 5
# select field(value) from h2o_feet limit 5
# select water_level from h2o_feet limit 5
# select mean("water_level") from h2o_feet group by time(1d) limit 100
# curl https://s3.amazonaws.com/noaa.water-database/NOAA_data.txt
# -o NOAA_data.txt
# influx -import -path=NOAA_data.txt -precision=s -database=testdb
```
#### File: timegraph/shell/timegraph.py
```python
from cmd import Cmd
from timegraph.shell.influx import InfluxShell
class TimeGraphShell(Cmd):
prompt = 'timegraph>'
def __init__(self):
super().__init__()
def do_connect(self, line):
args = line.split(' ')
if len(args) < 4:
self.print_help_missing_args()
else:
db_type = args[0]
db_host = args[1]
db_port = args[2]
db_name = args[3]
if db_type == 'influx':
influx_shell = InfluxShell(db_host, db_port, db_name)
influx_shell.main()
else:
self.print_help_influx_only()
def help_connect(self):
self.print_help_missing_args()
def main(self):
TimeGraphShell.cmdloop(self, intro='TimeGraph Shell')
def do_EOF(self, line):
return True
def do_exit(self, line):
return True
def print_help_missing_args(self):
print('connect takes the following arguments:')
print(' connect <dbtype> <host> <port> <db>')
print('Example:')
print(' connect influx localhost 8086 exampledb')
def print_help_influx_only(self):
print('Only the dbtype "influx" is currently supported')
```
|
{
"source": "jfmyers9/integrations-core",
"score": 2
}
|
#### File: commands/env/shell.py
```python
import click
from ...e2e import create_interface, get_configured_envs
from ...testing import complete_active_checks, complete_configured_envs
from ..console import CONTEXT_SETTINGS, abort, echo_failure, echo_info
@click.command('shell', context_settings=CONTEXT_SETTINGS, short_help='Run a shell inside agent container')
@click.argument('check', autocompletion=complete_active_checks)
@click.argument('env', autocompletion=complete_configured_envs, required=False)
@click.option('-v', '--install-vim', is_flag=True, help='Optionally install editing/viewing tools vim and less')
@click.option('-i', '--install-tools', multiple=True, help='Optionally install custom tools')
def shell(check, env, install_vim, install_tools):
"""Run a shell inside the Agent docker container."""
envs = get_configured_envs(check)
if not envs:
echo_failure(f'No active environments found for `{check}`.')
echo_info(f'See what is available to start via `ddev env ls {check}`.')
abort()
if not env:
if len(envs) > 1:
echo_failure(f'Multiple active environments found for `{check}`, please specify one.')
echo_info('See what is active via `ddev env ls`.')
abort()
env = envs[0]
if env not in envs:
echo_failure(f'`{env}` is not an active environment.')
echo_info('See what is active via `ddev env ls`.')
abort()
environment = create_interface(check, env)
if environment.ENV_TYPE == 'local':
abort('Shell subcommand only available for docker e2e environments')
if install_vim or install_tools:
tools = list(install_tools)
if install_vim:
tools.extend(('less', 'vim'))
echo_info(f'Installing helper tools: {", ".join(tools)}')
environment.exec_command('/bin/bash -c "apt update && apt install -y {}"'.format(" ".join(tools)))
result = environment.shell()
if result.code:
abort(result.stdout + result.stderr, code=result.code)
```
#### File: release/stats/csv_report.py
```python
import csv
from pathlib import Path
import click
from ...console import CONTEXT_SETTINGS, echo_success
from .common import Release
class ReportSerializer:
def __init__(self, release):
self.release = release
def write_report(self, filepath):
with open(filepath, 'w', newline='') as csvfile:
report = self._report()
fieldnames = report.keys()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerow(report)
def write_changes(self, filepath):
with open(filepath, 'w', newline='') as csvfile:
changes = [self._change(commit) for commit in self.release.commits]
fieldnames = changes[0].keys()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for change in changes:
writer.writerow(change)
def _report(self):
return {
'Release Branch': self.release.release_version,
'Release candidates': len(self.release.rc_tags),
'Number of Commits': len(self.release.commits),
'Commits with unknown PR': len([commit for commit in self.release.commits if commit.pull_request is None]),
'Release time (days)': self._release_delay(),
}
def _release_delay(self):
rc_1 = self.release.commits[0]
last_change = self.release.commits[-1]
duration = (last_change.date - rc_1.date).total_seconds()
return divmod(duration, 24 * 60 * 60)[0]
def _change(self, commit):
teams = []
title = commit.title
url = commit.url
next_tag = None
pull_request = commit.pull_request
if pull_request:
teams = [label.rpartition('/')[-1] for label in pull_request.labels if label.startswith('team')]
title = pull_request.title
url = pull_request.url
if commit.included_in_tag:
next_tag = commit.included_in_tag.name
return {'SHA': commit.sha, 'Title': title, 'URL': url, 'Teams': ' & '.join(teams), 'Next tag': next_tag}
@click.command(
context_settings=CONTEXT_SETTINGS, short_help="Writes the CSV report about a specific release",
)
@click.option('--from-ref', '-f', help="Reference to start stats on", required=True)
@click.option('--to-ref', '-t', help="Reference to end stats at", required=True)
@click.option('--release-version', '-r', help="Release version to analyze", required=True)
@click.option('--output-folder', '-o', help="Path to output folder")
@click.pass_context
def csv_report(ctx, from_ref, to_ref, release_version, output_folder=None):
"""Computes the release report and writes it to a specific directory
"""
if output_folder is None:
output_folder = release_version
folder = Path(output_folder)
folder.mkdir(parents=True, exist_ok=True)
release = Release.from_github(ctx, release_version, from_ref=from_ref, to_ref=to_ref)
serializer = ReportSerializer(release)
serializer.write_report(folder.joinpath('release.csv'))
serializer.write_changes(folder.joinpath('changes.csv'))
echo_success(f'Successfully wrote reports to directory `{output_folder}`')
```
#### File: commands/validate/readmes.py
```python
import re
from os import path
import click
from ...utils import complete_valid_checks, get_root, get_valid_integrations, read_readme_file
from ..console import CONTEXT_SETTINGS, abort, echo_failure, echo_info, echo_success
IMAGE_EXTENSIONS = {".png", ".jpg"}
@click.command(context_settings=CONTEXT_SETTINGS, short_help='Validate README.md files')
@click.pass_context
@click.argument('integration', autocompletion=complete_valid_checks, required=False)
def readmes(ctx, integration):
"""Validates README files
If `check` is specified, only the check will be validated,
otherwise all README files in the repo will be.
"""
repo = ctx.obj['repo_name']
integrations = []
failed_checks = 0
if integration:
integrations = [integration]
else:
integrations = sorted(get_valid_integrations())
for integration in integrations:
has_overview = False
has_setup = False
errors = False
display_queue = []
lines = read_readme_file(integration)
for line_no, line in lines:
if "## Overview" == line.strip():
has_overview = True
if "## Setup" == line.strip():
has_setup = True
for ext in IMAGE_EXTENSIONS:
if ext in line:
IMAGE_REGEX = (
rf".*https:\/\/raw\.githubusercontent\.com\/DataDog\/"
rf"{re.escape(repo)}\/master\/({re.escape(integration)}\/images\/.*.{ext}).*"
)
match = re.match(IMAGE_REGEX, line)
if not match:
errors = True
display_queue.append((echo_failure, f" No valid image file on line {line_no}"))
display_queue.append(
(
echo_info,
f" This image path must be in the form: "
f"https://raw.githubusercontent.com/DataDog/{repo}/master/{integration}/images/<IMAGE_NAME>", # noqa
)
)
break
rel_path = match.groups()[0]
if rel_path:
file_path = path.join(get_root(), rel_path)
if not path.exists(file_path):
errors = True
display_queue.append(
(echo_failure, f" image: {rel_path} is linked in its readme but does not exist")
)
if not (has_overview and has_setup):
errors = True
display_queue.append((echo_failure, " readme does not contain both an Overview and Setup H2 section"))
if errors:
failed_checks += 1
echo_info(f"{integration}/README.md... ", nl=False)
echo_failure("FAILED")
for display_func, message in display_queue:
display_func(message)
if failed_checks:
echo_failure(f"{failed_checks} invalid files")
abort()
else:
echo_success("All READMEs are valid!")
```
#### File: dev/tooling/dependencies.py
```python
import os
from collections import defaultdict
from packaging.requirements import InvalidRequirement, Requirement
from ..utils import stream_file_lines
from .constants import get_agent_requirements, get_root
from .utils import get_valid_checks
class DependencyDefinition:
__slots__ = ('name', 'requirement', 'file_path', 'line_number', 'check_name')
def __init__(self, name, requirement, file_path, line_number, check_name=None):
self.name = name
self.requirement = requirement
self.file_path = file_path
self.line_number = line_number
self.check_name = check_name
def create_dependency_data():
return defaultdict(lambda: defaultdict(lambda: []))
def load_dependency_data(req_file, dependencies, errors, check_name=None):
for i, line in enumerate(stream_file_lines(req_file)):
line = line.strip()
if not line or line.startswith('#'):
continue
try:
req = Requirement(line)
except InvalidRequirement as e:
errors.append(f'File `{req_file}` has an invalid dependency: `{line}`\n{e}')
continue
name = req.name.lower()
dependency = dependencies[name][req.specifier]
dependency.append(DependencyDefinition(name, req, req_file, i, check_name))
def read_check_dependencies():
root = get_root()
dependencies = create_dependency_data()
errors = []
for check_name in get_valid_checks():
req_file = os.path.join(root, check_name, 'requirements.in')
load_dependency_data(req_file, dependencies, errors, check_name)
return dependencies, errors
def read_agent_dependencies():
dependencies = create_dependency_data()
errors = []
load_dependency_data(get_agent_requirements(), dependencies, errors)
return dependencies, errors
```
#### File: http_check/tests/conftest.py
```python
import os
import sys
import pytest
from mock import patch
from datadog_checks.dev import docker_run, run_command
from datadog_checks.dev.utils import ON_WINDOWS
from datadog_checks.http_check import HTTPCheck
from .common import CONFIG_E2E, HERE
MOCKED_HOSTS = ['valid.mock', 'expired.mock', 'wronghost.mock', 'selfsigned.mock']
@pytest.fixture(scope='session')
def dd_environment():
cacert_path = os.path.join(HERE, 'fixtures', 'cacert.pem')
e2e_metadata = {'docker_volumes': ['{}:/opt/cacert.pem'.format(cacert_path)]}
with docker_run(
os.path.join(HERE, 'compose', 'docker-compose.yml'), build=True, log_patterns=["starting server on port"]
):
yield CONFIG_E2E, e2e_metadata
@pytest.fixture(scope='session')
def mock_dns():
import socket
_orig_getaddrinfo = socket.getaddrinfo
_orig_connect = socket.socket.connect
def patched_getaddrinfo(host, *args, **kwargs):
if host.endswith('.mock'):
# See socket.getaddrinfo, just updating the hostname here.
# https://docs.python.org/3/library/socket.html#socket.getaddrinfo
return [(2, 1, 6, '', ('127.0.0.1', 443))]
return _orig_getaddrinfo(host, *args, **kwargs)
def patched_connect(self, address):
host, port = address[0], address[1]
if host.endswith('.mock'):
host, port = '127.0.0.1', 443
return _orig_connect(self, (host, port))
socket.getaddrinfo = patched_getaddrinfo
socket.socket.connect = patched_connect
yield
socket.getaddrinfo = _orig_getaddrinfo
socket.socket.connect = _orig_connect
@pytest.fixture()
def mock_hosts_e2e():
"""Only for e2e testing"""
container_id = "dd_http_check_{}".format(os.environ["TOX_ENV_NAME"])
commands = []
for mocked_host in MOCKED_HOSTS:
commands.append(r'bash -c "printf \"127.0.0.1 {}\n\" >> /etc/hosts"'.format(mocked_host))
for command in commands:
run_command('docker exec {} {}'.format(container_id, command))
@pytest.fixture(scope='session')
def http_check():
# Patch the function to return the certs located in the `tests/` folder
with patch('datadog_checks.http_check.http_check.get_ca_certs_path', new=mock_get_ca_certs_path):
yield HTTPCheck('http_check', {}, [{}])
@pytest.fixture(scope='session')
def embedded_dir():
if ON_WINDOWS:
return 'embedded{}'.format(sys.version_info[0])
else:
return 'embedded'
def mock_get_ca_certs_path():
"""
Mimic get_ca_certs_path() by using the certificates located in the `tests/` folder
"""
embedded_certs = os.path.join(HERE, 'fixtures', 'cacert.pem')
if os.path.exists(embedded_certs):
return embedded_certs
raise Exception("Embedded certs not found: {}".format(embedded_certs))
```
#### File: vertica/tests/test_unit.py
```python
import logging
import os
import mock
import pytest
from datadog_checks.base import AgentCheck
from datadog_checks.base.log import TRACE_LEVEL
from datadog_checks.vertica import VerticaCheck
CERTIFICATE_DIR = os.path.join(os.path.dirname(__file__), 'certificate')
def test_ssl_config_ok(aggregator):
cert = os.path.join(CERTIFICATE_DIR, 'cert.cert')
private_key = os.path.join(CERTIFICATE_DIR, 'server.pem')
instance = {
'db': 'abc',
'server': 'localhost',
'port': '999',
'username': 'dbadmin',
'password': '<PASSWORD>',
'timeout': 10,
'tags': ['foo:bar'],
'tls_verify': True,
'validate_hostname': True,
'cert': cert,
'private_key': private_key,
'ca_cert': CERTIFICATE_DIR,
}
check = VerticaCheck('vertica', {}, [instance])
with mock.patch('datadog_checks.vertica.vertica.vertica') as vertica:
with mock.patch('datadog_checks.vertica.vertica.ssl') as ssl:
vertica.connect.return_value = mock.MagicMock()
tls_context = mock.MagicMock()
ssl.SSLContext.return_value = tls_context
check.check(instance)
assert tls_context.verify_mode == ssl.CERT_REQUIRED
assert tls_context.check_hostname is True
tls_context.load_verify_locations.assert_called_with(None, CERTIFICATE_DIR, None)
tls_context.load_cert_chain.assert_called_with(cert, keyfile=private_key)
assert check._connection is not None
aggregator.assert_service_check("vertica.can_connect", status=AgentCheck.OK, tags=['db:abc', 'foo:bar'])
def test_client_logging_enabled(aggregator, instance):
instance['client_lib_log_level'] = 'DEBUG'
check = VerticaCheck('vertica', {}, [instance])
with mock.patch('datadog_checks.vertica.vertica.vertica') as vertica:
check.check(instance)
vertica.connect.assert_called_with(
database=mock.ANY,
host=mock.ANY,
port=mock.ANY,
user=mock.ANY,
password=<PASSWORD>,
backup_server_node=mock.ANY,
connection_load_balance=mock.ANY,
connection_timeout=mock.ANY,
log_level='DEBUG',
log_path='',
)
def test_client_logging_disabled(aggregator, instance):
instance['client_lib_log_level'] = None
check = VerticaCheck('vertica', {}, [instance])
with mock.patch('datadog_checks.vertica.vertica.vertica') as vertica:
check.check(instance)
vertica.connect.assert_called_with(
database=mock.ANY,
host=mock.ANY,
port=mock.ANY,
user=mock.ANY,
password=<PASSWORD>,
backup_server_node=mock.ANY,
connection_load_balance=mock.ANY,
connection_timeout=mock.ANY,
)
@pytest.mark.parametrize(
'agent_log_level, expected_vertica_log_level', [(logging.DEBUG, logging.DEBUG), (TRACE_LEVEL, logging.DEBUG)]
)
def test_client_logging_enabled_debug_if_agent_uses_debug_or_trace(
aggregator, instance, agent_log_level, expected_vertica_log_level
):
"""
Improve collection of debug flares by automatically enabling client DEBUG logs when the Agent uses DEBUG logs.
"""
instance.pop('client_lib_log_level', None)
root_logger = logging.getLogger()
root_logger.setLevel(agent_log_level)
check = VerticaCheck('vertica', {}, [instance])
with mock.patch('datadog_checks.vertica.vertica.vertica') as vertica:
check.check(instance)
vertica.connect.assert_called_with(
database=mock.ANY,
host=mock.ANY,
port=mock.ANY,
user=mock.ANY,
password=<PASSWORD>,
backup_server_node=mock.ANY,
connection_load_balance=mock.ANY,
connection_timeout=mock.ANY,
log_level=expected_vertica_log_level,
log_path='',
)
def test_client_logging_disabled_if_agent_uses_info(aggregator, instance):
"""
Library logs should be disabled by default, in particular under normal Agent operation (INFO level).
"""
instance.pop('client_lib_log_level', None)
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
check = VerticaCheck('vertica', {}, [instance])
with mock.patch('datadog_checks.vertica.vertica.vertica') as vertica:
check.check(instance)
vertica.connect.assert_called_with(
database=mock.ANY,
host=mock.ANY,
port=mock.ANY,
user=mock.ANY,
password=<PASSWORD>,
backup_server_node=mock.ANY,
connection_load_balance=mock.ANY,
connection_timeout=mock.ANY,
)
```
|
{
"source": "jfnavarro/old_python_courses",
"score": 3
}
|
#### File: AB1/src/OverlapMatches.py
```python
import sys
import os.path
from numpy import *
class ReturnValue3(object):
def __init__(self,score,pointer):
self.score = score
self.pointer = pointer
class ReturnValue(object):
def __init__(self,name="",sequence=""):
self.sequence = sequence
self.name = name
class ReturnValue2(object):
def __init__(self,alignment1,alignment2):
self.alignment1 = alignment1
self.alignment2 = alignment2
def readFasta(infile):
saved = None
while 1:
if saved is not None:
line = saved
saved = None
else:
line = infile.readline()
if not line:
return
if line.isspace():
continue
if not line.startswith(">"):
raise TypeError(
"The title line must start with a '>': %r" % line)
title = line.rstrip()
sequences = []
while 1:
line = infile.readline()
if not line or line.isspace():
break
if line.startswith(">"):
saved = line
break
sequences.append(line.rstrip("\n"))
yield ReturnValue(title,"".join(sequences))
infile.close()
def readFile(name):
file = open(name, 'rU')
return file
def overlapmatches(seq1,seq2,score,d,pointer):
for i in xrange(1,len(seq1)+1):
for j in xrange(1,len(seq2)+1):
left = score[i][j-1] + d #insertion
up = score[i-1][j] + d #deletion
corner = score[i-1][j-1] + s(seq1[i-1],seq2[j-1]) #match
auxmax = max(corner,left,up)
score[i][j] = auxmax
if(auxmax == up):
pointer[i][j] = 1
elif(auxmax == left):
pointer[i][j] = 2
else:
pointer[i][j] = 3
return ReturnValue3(score,pointer)
def traceback(pointer,position,sequence1,sequence2):
i = position[0]
j = position[1]
alignment1 = ""
alignment2 = ""
alignment3 = ""
alignment4 = ""
for x in xrange(position[1]-1,0,-1):
alignment1 = sequence1[x-1] + alignment1
alignment2 = '-' + alignment2
while j != 0 and i != 0:
if (pointer[i][j] == 3):
alignment3 = alignment3 + sequence1[i-1]
alignment4 = alignment4 + sequence2[j-1]
i=i-1
j=j-1
elif (pointer[i][j] == 2):
alignment3 = alignment3 + '-'
alignment4 = alignment4 + sequence2[j-1]
j=j-1
elif (pointer[i][j] == 1):
alignment3 = alignment3 + sequence1[i-1]
alignment4 = alignment4 + '-'
i=i-1
print "Length1: " + str(len(alignment3)) + " Length2: " + str(len(alignment4))
alignment3 = alignment3[::-1]
alignment1 = alignment1 + alignment3
alignment4 = alignment4[::-1]
alignment2 = alignment2 + alignment4
alignment4 = ""
for y in xrange(len(sequence2),position[1],-1):
alignment1 = alignment1 + '-'
alignment4 = sequence2[y-1] + alignment4
alignment2 = alignment2 + alignment4
if (i >0):
print("Non homologous beginning")
if (j>0):
print("Non homologous ending")
print str(alignment2) + "\n\n"
print str(alignment1)
return ReturnValue2(alignment1,alignment2)
def s(a,b):
if(a == b):
return 1
else:
return -1
def getFmax(score,m,n):
Fmax = 0
position = (0,0)
m = m -1
n = n -1
for i in xrange(1,n):
if(score[i][m] > Fmax):
Fmax = score[i][m]
position = (i,m)
for j in xrange(1,m):
if(score[n][j] > Fmax):
Fmax = score[n][j]
position = (n,j)
return position
def printScore(score,n,m):
for i in xrange(n):
for j in xrange(m):
sys.stdout.write(str(score[i][j]) + " ")
sys.stdout.write("\n")
def main(argv):
if( len(argv) < 2):
sys.stderr.write("Error: Number of arguments incorrect\n")
sys.exit()
else:
if(os.path.isfile(argv[0]) and os.path.isfile(argv[1])):
infile = argv[0]
handler = readFile(infile)
sequence1 = readFasta(handler).next()
infile = argv[1]
handler = readFile(infile)
sequence2 = readFasta(handler).next()
d = -4 #gap penalty
n = len(sequence1.sequence) +1
m = len(sequence2.sequence) +1
score=zeros((n,m))
pointer=zeros((n,m))
result = overlapmatches(sequence1.sequence,sequence2.sequence,score,d,pointer)
maxposition = getFmax(result.score,m,n)
alignment = traceback(result.pointer,maxposition,sequence1.sequence,sequence2.sequence)
#printScore(score,n,m)
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: AB2/src/multiscore.py
```python
import pexpect
import os.path
import sys
from Bio import SeqIO
from numpy import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
global ha
global hb
class ReturnValue(object):
def __init__(self,accesions,file):
self.accesions = accesions
self.file = file
def writeFile(temp,file):
for line in file:
temp.write(line)
return temp
def readFile(name):
file = open(name, 'rU')
return file
def checkFile(file):
if(os.path.isfile(file) == False):
return False
else:
return True
def closeFile(file):
if(file.closed != True):
file.close()
def removeFile(file):
if(os.path.isfile(file) == True):
os.remove(file)
def MultiScore(file,blosum62,alphabet):
handle = readFile(file)
totalscore = 0
records = list(SeqIO.parse(handle, "fasta"))
handle.close()
visited = []
columns = zeros(len(records[0].seq))
# for record in records:
# visited.append(record.id)
# tovisits = [x for x in records if x.id not in visited]
# score = 0
# for tovisit in tovisits:
# score = score + Score(record.seq,tovisit.seq,blosum62,alphabet)
# print score
# totalscore = totalscore + score
for record in records:
visited.append(record.id)
tovisits = [x for x in records if x.id not in visited]
for tovisit in tovisits:
for i in xrange(len(tovisit)):
columns[i] = columns[i] + s(record.seq[i],tovisit.seq[i],blosum62,alphabet)
return totalSum(columns)
def Score(alignment1,alignment2,blosum62,alphabet):
score = 0
for i in xrange(len(alignment1)):
score = score + s(alignment1[i],alignment2[i],blosum62,alphabet)
return score
def totalSum(score):
total = 0
for i in score:
total = total + i
return total
def loadBlosum(file):
handle=open(file,'r')
blosum62=[]
for line in handle.readlines():
blosum62.append(map(int, line.split()))
return blosum62
def s(a,b,blosum62,alphabet):
po = -5
pe = -1
global ha
global hb
if(a!='-' and b!='-'):
ha=0
hb=0
ret = blosum62[alphabet[a]-1][alphabet[b]-1]
elif(a=='-' and b!='-'):
if(ha>0):
ret = pe
else:
ret = po
ha = ha+1
hb = 0
elif(b=='-' and a!='-'):
if(hb>0):
ret = pe
else:
ret = po
ha = 0
hb = hb+1
elif(a=='-' and b=='-'):
ret = 0
ha=0
hb=0
return ret
def Plot(score1,score2):
difference = []
#score es un dict de dict ( total score for each multialignment for each file)
for i in xrange(len(score1)):
difference.append(score1[i] - score2[i])
fig = plt.figure()
fig.subplots_adjust(bottom=0.2)
ax = fig.add_subplot(111)
plt.scatter(difference,score2,c='b',alpha=0.7,cmap=cm.Paired)
plt.show()
def main(argv):
if( len(argv) >= 2):
ha = 0
hb = 0
myscore = []
truescore = []
listing = [s for s in os.listdir(argv[0])
if os.path.isfile(os.path.join(argv[0], s))]
listing.sort(key=lambda s: os.path.getmtime(os.path.join(argv[0], s)))
alphabet = dict(A=1,R=2,N=3,D=4,C=5,Q=6,E=7,G=8,H=9,I=10,L=11,K=12,M=13,F=14,
P=15,S=16,T=17,W=18,Y=19,V=20,B=21,Z=22,X=23,
a=1,r=2,n=3,d=4,c=5,q=6,e=7,g=8,h=9,i=10,l=11,k=12,m=13,f=14,
p=15,s=16,t=17,w=18,y=19,v=20,b=21,z=22,x=23)
blosum62 = loadBlosum(argv[1])
for infile in listing:
if('facit' not in infile):
command = './muscle3.8.31_i86linux32' + ' -in ' + argv[0] + '/' + infile + ' -out' + ' fileout.fa'
child = pexpect.spawn (command)
child.expect(pexpect.EOF)
myscore.append(MultiScore('fileout.fa',blosum62,alphabet))
else:
truescore.append(MultiScore(argv[0] + '/' + infile,blosum62,alphabet))
Plot(myscore,truescore)
else:
sys.stderr.write("The program does not have any argument or the number of arguments is incorrect\n")
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: old_python_courses/Lab1/reformat.py
```python
import os.path
def removeCrapp(chain):
if (chain.find(" ") != -1):
return chain[0:chain.find(" ")] + chain[chain.find("\n"):len(chain)]
else:
return chain
counter = 0
output = ''
name = raw_input("Enter the name of the file: ")
if ( os.path.isfile(name) == False):
print "The file does not exist";
else:
file = open(name, 'r')
for line in file:
if (">" in line):
if((len(output) > 0) and (output[len(output)-1] != "\n")):
output += '\n' + line
else:
output += line
counter = 0
else:
i = 0
exit = False
while ( (exit == False) and (i < len(line)) ):
if ( (line[i] != " ") and (line[i] != '\n') ):
if (counter == 59):
output += line[i] + '\n'
counter = 0
else:
counter += 1
output += line[i]
else:
exit = True
i = i + 1
print output
file.close()
"WikiPedia :UniProt is the Universal Protein resource, a central repository of protein"
"data created by combining the Swiss-Prot, TrEMBL and PIR-PSD databases."
"UniProt is based on protein sequences, many of which are derived from genome sequencing"
"projects. It contains a large amount of information about the biological function of proteins"
"derived from the research literature."
```
#### File: Lab2/src/basedist.py
```python
import os.path
import sys
import math
class gcc(object):
def __init__(self, name,ratios):
self.name = name
self.ratios = ratios
def setRatios(self,g,t,a,c):
self.ratios = dict(ratioG=g,ratioT=t,ratioA=a,ratioC=c)
def getRatios(self):
return self.ratios
def setName(self,name):
self.name = name
def getName(self):
return self.name
def readFile(name):
file = open(name, 'r')
return file
def checkFiles(files):
for file in files:
if(os.path.isfile(file) == False):
return False
return True
def closeFiles(names):
for name in names:
if(not name.closed):
name.close()
def extractName(genome):
return ""
def getRatio(genome):
g = c = a = t = 0
for linea in genome:
if(linea.find(">") == -1):
g += linea.count("G")
c += linea.count("C")
a += linea.count("A")
t += linea.count("T")
gratio = g / float((g+c+a+t))
cratio = c / float((g+c+a+t))
aratio = a / float((g+c+a+t))
tratio = t / float((g+c+a+t))
ratio = dict(ratioG=gratio,ratioT=cratio,ratioA=aratio,ratioC=tratio)
return ratio
def diff(gen1Ratios,gen2Ratios):
if(gen1Ratios == gen2Ratios):
return 0
else:
for type, value in gen1Ratios.iteritems():
if (type == "ratioG"):
gratio = value
elif (type == "ratioC"):
cratio = value
elif (type == "ratioA"):
aratio = value
elif (type == "ratioT"):
tratio = value
for type, value in gen2Ratios.iteritems():
if (type == "ratioG"):
gratio2 = value
elif (type == "ratioC"):
cratio2 = value
elif (type == "ratioA"):
aratio2 = value
elif (type == "ratioT"):
tratio2 = value
diff = pow((aratio - aratio2),2) + pow((cratio - cratio2),2) + pow((gratio - gratio2),2) + pow((tratio - tratio2),2)
return round(math.sqrt(float(diff/4)),2)
def main(argv):
if( len(argv) > 1):
if(checkFiles(argv) != False):
visited = []
sys.stdout.write(" " + str(len(argv)) + "\n")
for arg in argv:
name = arg[0:arg.find(".")]
file = readFile(arg)
genome = gcc(name,getRatio(file))
visited.append(genome)
spaces = 10 - len(genome.getName())
row = genome.getName() + " "*spaces
file.close()
for arg2 in visited:
row += " " + str(diff(genome.getRatios(),arg2.getRatios()))
row += "\n"
sys.stdout.write(row)
else:
sys.stderr.write("Some of the files are missed\n")
else:
sys.stderr.write("The program does not have any argument or the number of arguments is incorrect\n")
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: Lab2/src/gccontent.py
```python
import os.path
import sys
def readFile(name):
if ( os.path.isfile(name) == False):
print "The file does not exist";
else:
file = open(name, 'r')
return file
def closeFile(name):
if(not file.closed):
name.close()
def gcContent(genome):
g = c = a = t = 0
for linea in genome:
g += linea.count("G")
c += linea.count("C")
a += linea.count("A")
t += linea.count("T")
return float(g+c)/float(a+t+g+c)
def main(argv):
if( len(argv) > 0):
for arg in argv:
fileName = readFile(arg)
print '%.2f'%(gcContent(fileName))
closeFile(fileName)
else:
print "The program does not have any argument"
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: old_python_courses/ML.4/animate.py
```python
from Tkinter import *
from PIL import Image, ImageTk
def do_animation(currentframe):
def do_image():
wrap.create_image(50,50,image=frame[currentframe])
try:
do_image()
except IndexError:
currentframe = 0
do_image()
wrap.update_idletasks()
currentframe = currentframe + 1
root.after(1000, do_animation, currentframe)
def draw(stateSequence):
global wrap,frame,root
root = Tk()
root.title("WalkingRobot")
frame=[]
for i in stateSequence:
fname="step"+str(i+1)+".png"
img=ImageTk.PhotoImage(Image.open(fname))
frame+=[img]
wrap = Canvas(root, width=200, height=120)
wrap.pack()
root.after(10, do_animation, 0)
root.mainloop()
```
#### File: Omic2/src/Slonim.py
```python
import sys
import os.path
import math
class ReturnValue(object):
def __init__(self,name="",sequence=""):
self.sequence = sequence
self.name = name
class kNN:
def __init__(self):
self.classes = [] #list of the possible classes
self.xs = [] # list of the neighbors
self.ys = [] # list of the classes that the neighbors belong to
self.k = None # numbers of neighbors
def weight(g,c):
return 1
def distance(x,y):
if len(x) != len(y):
raise ValueError, "vectors must be same length"
sum = 0
for i in range(len(x)):
sum += (x[i]-y[i])**2
return math.sqrt(sum)
def neighbor(k,m,n,c):
def readFile(name):
file = open(name, 'rU')
return file
def permuteSequences(reference,length,read):
L = len(reference)
mutations = list()
if(length > 25):
for i in xrange(L - length-1):
return mutations
def missmatches(sequence1,sequence2):
number = 0
for i in xrange(len(sequence1)):
if(sequence1[i] != sequence2[i]):
number += 1
return number
def main(argv):
if( len(argv) < 2):
sys.stderr.write("Error: Number of arguments incorrect\n")
sys.exit()
else:
if(os.path.isfile(argv[0]) and os.path.isfile(argv[1])):
if __name__ == "__main__":
main(sys.argv[1:])
```
|
{
"source": "jfnavarro/proteomics",
"score": 3
}
|
#### File: jfnavarro/proteomics/generateFidoInput.py
```python
from lxml import etree
import os
import getopt
import sys
import percolatorUtils as per
import generalUtils as utils
def usage():
print "this script generates a psm-proteins and unique peptide-proteins graph files for FIDO as well as a file containing all the target and decoy proteins names from a input file from a percolator pout.xml file"
print "Usage : generateFidoInput.py <pout.xml> [-o, --output <output.txt>] [-p, --pattern ] [-d, --database <database.txt>] [-h, --help] [-v, --verbose]"
print "--output : the name of the file that will contain the PSMs, probabilities and proteins"
print "--database : the name of the file that will contain the target and decoy protein names"
print "--pattern : the pattern that identifies the decoy hits"
def main(argv):
if( len(argv) < 1):
sys.stderr.write("Error: Number of arguments incorrect\n")
usage()
sys.exit()
else:
fidoOutputFile = "output.txt"
fidoDBfile = "db.txt"
verbose = False
decoy_prefix = "random"
try:
opts, args = getopt.getopt(sys.argv[2:], "o:p:d:hv", ["output=", "pattern=", "database=", "help", "verbose"])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-o", "--output"):
fidoOutputFile = a
elif o in ("-p", "--pattern"):
decoy_prefix = a
elif o in ("-d", "--database"):
fidoDBfile = a
else:
assert False, "unhandled option"
if(os.path.isfile(argv[0])):
infile = argv[0]
else:
sys.stderr.write("Error: XML file not found\n")
sys.exit()
parser = etree.XMLParser(ns_clean=False, huge_tree=True)
try:
tree = etree.parse(infile,parser)
except Exception, inst:
sys.stderr.write("Unexpected error opening %s: %s\n" % (infile, inst))
sys.exit()
if(verbose):
print "Reading " + str(argv[0])
elems = tree.getroot()
percolatorPeptides = per.getPeptides(elems)
percolatorPSMs = per.getPSMs(elems)
if(len(percolatorPSMs) > 0):
if(verbose):
print "writing in " + str(fidoOutputFile)
utils.writeFidoInput(percolatorPSMs,"psms_" + fidoOutputFile,fidoDBfile,decoy_prefix)
if(len(percolatorPeptides) > 0):
utils.writeFidoInput(percolatorPeptides,"peptides_" + fidoOutputFile,fidoDBfile,decoy_prefix)
else:
sys.stderr.write("the input file does not contain any peptide or psms\n")
sys.exit()
if(verbose):
print "Fido file generated"
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: jfnavarro/proteomics/generateProsolveInput.py
```python
from lxml import etree
import os
import getopt
import sys
import percolatorUtils as per
import generalUtils as utils
def usage():
print "this script generates a PROSOLVE txt input file from a percolator pout.xml file"
print "Usage : generateProsolveInput.py <pout.xml> [-o, --output <output.txt>] [-h, --help] [-v, --verbose] [-e, --hidden] [-m, --maxpsms]"
print "--hidden : the percolator file was run with hidden decoys in the target database (normal decoys will be discarded)"
print "--maxpsms : the output file containing PSMs will have a maximum of maxpsms number of PSMs per peptide"
print "--output : the name of the two output file containing PSMs and peptides"
def main(argv):
if( len(argv) < 1):
sys.stderr.write("Error: Number of arguments incorrect\n")
usage()
sys.exit()
else:
magnusOutputFile = "output.txt"
verbose = False
hidden = False
maxpsms = 1000
try:
opts, args = getopt.getopt(sys.argv[2:], "o:hvem:", ["output=", "help", "verbose","hidden","maxpsms"])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-o", "--output"):
magnusOutputFile = a
elif o in ("-e", "--hidden"):
hidden = True
elif o in ("-m", "--maxpsms"):
maxpsms = a
else:
assert False, "unhandled option"
if(os.path.isfile(argv[0])):
infile = argv[0]
else:
sys.stderr.write("Error: XML file not found\n")
sys.exit()
parser = etree.XMLParser(ns_clean=False, huge_tree=True)
try:
tree = etree.parse(infile,parser)
except Exception, inst:
sys.stderr.write("Unexpected error opening %s: %s\n" % (infile, inst))
sys.exit()
if(verbose):
print "Reading " + str(argv[0])
elems = tree.getroot()
percolatorPSMs = per.getPSMs(elems)
psms = dict()
for psm in percolatorPSMs:
peptide_clean = psm.peptide[:-2][2:]
if psms.has_key(peptide_clean):
if(psms[peptide_clean].score < psm.score):
psms[peptide_clean] = psm
else:
psms[peptide_clean] = psm
percolatorPeptides = per.getPeptides(elems)
if(len(percolatorPSMs) > 0 and len(percolatorPeptides) > 0):
if(verbose):
print "writing in " + str(magnusOutputFile) + " with max psms = " + str(maxpsms)
utils.writeMagnusInput(percolatorPSMs,"psms_"+magnusOutputFile,hidden,maxpsms)
utils.writeMagnusPeptides(percolatorPeptides,psms,"peptides_"+magnusOutputFile,hidden)
else:
sys.stderr.write("the input file does not contain any peptide or psms\n")
sys.exit()
if(verbose):
print "Magnus files generated"
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: jfnavarro/proteomics/parsePercolator.py
```python
from lxml import etree
import os
import getopt
import sys
import percolatorUtils as per
import generalUtils as utils
def usage():
print "this script generates a tab delimited output for PSMs, peptides and proteins from a percolator pout.xml file"
print "Usage : parsePercolator.py <pout.xml> [-o, --output <output.txt>] [-h, --help] [-c, --cutoff] [-v, --verbose]"
print "--output : the name of the file that will contain the tab delimited list of psms|peptides|proteins and probabilities"
print "--cutoff : you want to report only the psms,peptides and proteins below a certain threshold?"
print "--qvalue : uses the qvalues insted of the PEPs for the threshold"
def main(argv):
if( len(argv) < 1):
sys.stderr.write("Error: Number of arguments incorrect\n")
usage()
sys.exit()
else:
OutputFile = "output.txt"
verbose = False
fdr = 1.0
try:
opts, args = getopt.getopt(sys.argv[2:], "o:hvc:", ["output=", "help", "verbose", "cutoff="])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-o", "--output"):
OutputFile = a
elif o in ("-f", "--fdr"):
fdr = float(a)
else:
assert False, "unhandled option"
if(os.path.isfile(argv[0])):
infile = argv[0]
else:
sys.stderr.write("Error: XML file not found\n")
sys.exit()
parser = etree.XMLParser(ns_clean=False, huge_tree=True)
try:
tree = etree.parse(infile,parser)
except Exception, inst:
sys.stderr.write("Unexpected error opening %s: %s\n" % (infile, inst))
sys.exit()
if(verbose):
print "Reading " + str(argv[0])
elems = tree.getroot()
percolatorPSMs = per.getPSMs(elems)
percolatorPeptides = per.getPeptides(elems)
percolatorProteins = per.getProteins(elems)
if(verbose):
print "Read " + str(len(percolatorPSMs)) + " PSMs"
print "Read " + str(len(percolatorPeptides)) + " Peptides"
print "Read " + str(len(percolatorProteins)) + " Proteins"
if(fdr < 1.0 and fdr > 0.0):
percolatorPSMs = [psm for psm in percolatorPSMs if psm.qvalue <= fdr]
percolatorPeptides = [pep for pep in percolatorPeptides if pep.qvalue <= fdr]
percolatorProteins = [prot for prot in percolatorProteins if prot.qvalue <= fdr]
if(len(percolatorPSMs) > 0):
if(verbose):
print "writing in " + str(OutputFile)
utils.writePsms(percolatorPSMs, "psms_" + OutputFile)
if(len(percolatorPeptides) > 0):
utils.writePeptides(percolatorPeptides,"peptides_" + OutputFile)
if(len(percolatorProteins) > 0):
utils.writeProteins(percolatorProteins,"proteins_" + OutputFile)
else:
sys.stderr.write("the input file does not contain any information\n")
sys.exit()
if(verbose):
print "Percolator file parsed"
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: jfnavarro/proteomics/plot_qvalues.py
```python
import os
from operator import itemgetter, attrgetter
import getopt
import generalUtils as util
import sys
import random
def getPValues(combined,order=True,decoy_prefix="random"):
##combined sorted in best hit first order
##score has to be unnormalized
combined = sorted(combined,key=attrgetter('score'),reverse=order)
nDecoys = 0
posSame = 0
negSame = 0
p = []
prevScore = -4711.4711
for hit in combined:
if(hit.score != prevScore):
for i in xrange(1,posSame):
p.append(float( nDecoys + ( negSame / (posSame + 1)) * (i + 1) ) )
nDecoys += negSame
negSame = 0
posSame = 0
prevScore = hit.score
if(hit.protein.find(decoy_prefix) == -1):
posSame += 1
else:
negSame += 1
##careful with the order here
for pi in xrange(len(p)):
if(nDecoys > 0):
p[pi] = float(p[pi]) / float(nDecoys)
else:
p[pi] = 1.0
return sorted(p)
def estimatePi0(pvalues,numBoot=100):
pBoot = []
lambdas = []
pi0s = []
n = len(pvalues)
numLambda = 100
maxLambda = 0.5
pvalues = sorted(pvalues,reverse = False)
if(n > 0):
for ix in xrange(numLambda):
lambdav = ((ix + 1) / float(numLambda)) * maxLambda
start = lower_bound(pvalues,lambdav)
Wl = float(n - start)
pi0 = float((Wl / n) / (1 - lambdav))
if (pi0 > 0.0):
lambdas.append(lambdav)
pi0s.append(pi0)
if(len(pi0s) == 0):
print "Error calculating Pi0"
sys.exit()
minPi0 = min(pi0s)
mse = []
for i in xrange(len(pi0s)):
mse.append(0.0)
for boot in xrange(numBoot):
pBoot = bootstrap(pvalues)
n = len(pBoot)
for ix in xrange(len(lambdas)):
start = lower_bound(pBoot,lambdas[ix])
Wl = float(n - start) #float(distance(start,pBoot[n]))
pi0Boot = float((Wl / n) / (1 - lambdas[ix]))
mse[ix] += float( ((pi0Boot - minPi0) * (pi0Boot - minPi0)) )
minIx = mse.index(min(mse)) #distance(mse.begin(), min(mse), mse);
pi0 = max(min(pi0s[minIx], 1.0), 0.0);
return pi0;
else:
return -1
def bootstrap(pvalues):
max_size = 1000
n = len(pvalues);
num_draw = min(n, max_size);
out = []
for ix in xrange(num_draw):
out.append(float(random.choice(pvalues)))
return sorted(out,reverse=False)
def lower_bound(liste,element):
ret = 0
for ix in xrange(len(liste)):
if(float(liste[ix]) >= element):
return ix
return ret
def estimateQvalues(combined,pi0=1.0,prefix="random",order=False,Ties=False,countdecoys = True,fdr=0.01, tdratio=0.0):
##assuming combined sorted in descending order
nTargets = 0
nDecoys = 0
qemp = 0.0
qest = 0.0
prevQemp = 0.0
prevQest = 0.0
ndecoys_fdr = 0
ntargets_fdr = 0
ndecoys_fdr_emp = 0
ntargets_fdr_emp = 0
suma = 0.0
prev_prob = -1000
qvaluesEst = []
qvaluesEmp = []
combined = sorted(combined,key=attrgetter('pep'),reverse=order)
for hit in combined:
isdecoy = hit.protein.find(prefix) != -1
if(isdecoy):
nDecoys += 1
else:
nTargets += 1
if( (Ties and hit.pep != prev_prob) or (not Ties)):
if(nTargets > 0):
qemp = float(nDecoys) / float(nTargets)
if(countdecoys):
suma = float(suma) + float(hit.pep)
qest = float(suma) / float(nTargets + nDecoys)
elif(nTargets > 0):
if(not isdecoy):
suma = float(suma) + float(hit.pep)
qest = float(suma) / float(nTargets)
if(qemp < prevQemp):
qemp = prevQemp
else:
prevQemp = qemp
if(qest < prevQest):
qest = prevQest
else:
prevQest = qest
prev_prob = hit.pep
if(qest <= fdr):
if(isdecoy):
ndecoys_fdr +=1
else:
ntargets_fdr +=1
##this is not totally correct cos I might modify emp qvalues later
if(qemp <= fdr):
if(isdecoy):
ndecoys_fdr_emp +=1
else:
ntargets_fdr_emp +=1
qvaluesEst.append(qest)
qvaluesEmp.append(qemp)
print "Estimating qvalues with " + str(nTargets) + " targets and " + str(nDecoys) + " decoys"
if(nTargets > 0 and nDecoys > 0):
if(tdratio == 0.0):
factor = float( pi0 * (float(nTargets) / float(nDecoys)) )
else:
factor = float( pi0 * (tdratio))
else:
factor = 1.0
for i in xrange(0,len(qvaluesEmp)):
qvaluesEmp[i] *= factor
return sorted(qvaluesEmp),sorted(qvaluesEst),ndecoys_fdr,ntargets_fdr,ndecoys_fdr_emp,ntargets_fdr_emp
def estimateFDR_hidden_decoys(target,pi0=1.0,prefix="entrapment",order=False,Ties=False,countdecoys = True,fdr=0.01, tdratio=0.0):
qvaluesEmp = []
qvaluesEst = []
nDecoys = 0
nTargets = 0
ndecoys_fdr = 0
ntargets_fdr = 0
ndecoys_fdr_emp = 0
ntargets_fdr_emp = 0
qemp = 0.0
qest = 0.0
suma = 0.0
prevQest = 0.0
prevQemp = 0.0
prev_prob = -1000
target = sorted(target,key=attrgetter('pep'),reverse=order)
for hit in target:
isdecoy = hit.protein.find(prefix) != -1
if(isdecoy):
nDecoys += 1
else:
nTargets += 1
if( (Ties and hit.pep != prev_prob) or (not Ties)):
if(nTargets):
qemp = float(nDecoys * 2) / float(nTargets)
if(countdecoys):
suma = float(suma) + float(hit.pep)
qest = float(suma) / float(nTargets + nDecoys)
elif(nTargets):
if(not isdecoy):
suma = float(suma) + float(hit.pep)
qest = float(suma) / float(nTargets)
if(qemp < prevQemp):
qemp = prevQemp
else:
prevQemp = qemp
if(qest < prevQest):
qest = prevQest
else:
prevQest = qest
prev_prob = hit.pep
if(qest <= fdr):
if(hit.protein.find(prefix) != -1):
ndecoys_fdr +=1
else:
ntargets_fdr +=1
##this is not totally correct cos I might modify emp qvalues later
if(qemp <= fdr):
if(isdecoy):
ndecoys_fdr_emp +=1
else:
ntargets_fdr_emp +=1
qvaluesEst.append(qest)
qvaluesEmp.append(qemp)
print "Estimating qvalues with " + str(nTargets) + " targets and " + str(nDecoys) + " decoys"
if(nTargets > 0 and nDecoys > 0):
if(tdratio == 0.0):
factor = float( pi0 * (float(nTargets) / float(nDecoys)) )
else:
factor = float( pi0 * (tdratio))
else:
factor = 1.0
for i in xrange(0,len(qvaluesEmp)):
qvaluesEmp[i] *= factor
return sorted(qvaluesEmp),sorted(qvaluesEst),ndecoys_fdr,ntargets_fdr,ndecoys_fdr_emp,ntargets_fdr_emp
def usage():
print "this scripts plots estimated and empirical q values for multiple set of probabilities and their respective protein name in a metaFile, the metaFile must be like this : filename title"
print "Usage : plot_qvalues.py <metaFile.txt> [d, --prefix <prefix>] [e, --hidden <pattern>] [l, --label <text>] [f, --fdr <number>] [i, --pi0] [r, --tdratio] [t, --ties] [c, --countdecoys] [v, --verbose] [h, --help]"
print "--hidden : the target database contains hidden decoys with pattern = (q values will be estimated from target and hidden decoys)"
print "--prefix : the decoy prefix used to identify decoys"
print "--label : the main label to be used to name the plots"
print "--fdr : the fdr threshold to be used to cut off"
print "--pi0 : use pi0 to adjust the empirical qvalues"
print "--tdratio : target decoy ratio to adjust empirical q values"
print "--ties : count elements with same probability as a cluster"
print "--countdecoys : no include decoys when computing estimated q values"
def main(argv):
if( len(argv) < 1):
sys.stderr.write("Error: Number of arguments incorrect\n")
usage()
sys.exit()
else:
hidden_decoys = False
hidden_pattern = ""
decoy_pattern = "random"
verbose = False
ties = False
countdecoys = True
label = ""
fdr = 0.01
tdratio = 0.0
pi0 = False
try:
opts, args = getopt.getopt(sys.argv[2:], "e:d:vhl:f:i:r:tc", ["hidden=", "prefix=", "verbose", "help", "label=", "fdr=", "pi0=", "tdratio=","ties","countdecoys"])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-e", "--hidden"):
hidden_decoys = True
hidden_pattern = a
elif o in ("-d", "--prefix"):
decoy_pattern = a
elif o in ("-l", "--label"):
label = a
elif o in ("-f", "--fdr"):
fdr = float(a)
elif o in ("-r", "--tdratio"):
tdratio = float(a)
print "Using Target Decoy Ratio = " + str(tdratio)
elif o in ("-i", "--pi0"):
pi0 = True
print "Using pi0 to adjust empirical q values"
elif o in ("-c", "--countdecoys"):
countdecoys = True
print "Not counting decoys when computing empirical q values"
elif o in ("-t", "--ties"):
ties = True
print "Counting clusters as one"
else:
assert False, "unhandled option"
if(os.path.isfile(argv[0])):
infile = argv[0]
else:
sys.stderr.write("Error: file " + str(argv[0]) + " not found\n")
sys.exit()
colors = ["red","blue","yellow","black","brown","pink","cyan","darkblue","darkred"]
hits = list()
names = list()
##READING ELEMENTS##
for line in open(infile).readlines():
words = line.split()
if(line != ""):
prob_file = words[0]
names.append(words[1])
if(os.path.isfile(prob_file)):
hits.append(util.importer(prob_file))
if(verbose):
print "reading file " +str(prob_file)
else:
sys.stderr.write("Error: file " + str(prob_file) + " not found\n")
sys.exit()
pi0s = list(xrange(len(hits)))
qvaluesEmps = list(xrange(len(hits)))
qvaluesEsts = list(xrange(len(hits)))
if(hidden_decoys):
print "Estimating qvalues for hidden decoys mode..."
for x in xrange(len(hits)):
##ESTIMATING PI0##
if(hidden_decoys):
hits[x] = [ele for ele in hits[x] if ele.protein.find(decoy_pattern) == -1]
if(pi0):
pi0s[x] = estimatePi0(getPValues(hits[x],True,hidden_pattern))
else:
pi0s[x] = 1.0
else:
if(pi0):
pi0s[x] = estimatePi0(getPValues(hits[x],True,decoy_pattern))
else:
pi0s[x] = 1.0
if(pi0s[x] > 1.0 or pi0s[x] < 0.0):
pi0s[x] = 1.0
##ESTIMATING QVALUEs##
ndecoys = 0
ntargets = 0
ndecoysEmp = 0
ntargetEmp = 0
if(hidden_decoys):
qvaluesEmps[x],qvaluesEsts[x],ndecoys,ntargets,ndecoysEmp,ntargetEmp = estimateFDR_hidden_decoys(hits[x],pi0s[x],hidden_pattern,False,ties,countdecoys,fdr,tdratio)
else:
qvaluesEmps[x],qvaluesEsts[x],ndecoys,ntargets,ndecoysEmp,ntargetEmp = estimateQvalues(hits[x],pi0s[x],decoy_pattern,False,ties,countdecoys,fdr,tdratio)
if(pi0):
print "pi0 file " + str(x+1) + " :" + str(pi0s[x])
print "elements with estimated qvalue below " + str(fdr) + " in file " + str(x+1) + " :" + str(ntargets)
print "elements with empirical qvalue below " + str(fdr) + " in file " + str(x+1) + " :" + str(ntargetEmp)
print "false positive elements with estimated qvalue below " + str(fdr) + " in file " + str(x+1) + " :" + str(ndecoys)
print "false positive elements with empirical qvalue below " + str(fdr) + " in file " + str(x+1) + " :" + str(ndecoysEmp)
if(verbose):
print "generatings plots"
##PLOTTING##
util.plotHist(qvaluesEsts,names,colors,"estimated $q$ value", "#target " + label, label + "_qvalue_estimated.png",fdr)
util.plotHist(qvaluesEmps,names,colors,"empirical $q$ value", "#target " + label, label + "_qvalue_empirical.png",fdr)
util.plotCorrelation(qvaluesEmps,qvaluesEsts,names,colors,"empirical $q$ value","estimated $q$ value",label + "_qvalue_estimated_VS_qvalue_empirical_low_range.png",1.0)
util.plotCorrelation(qvaluesEmps,qvaluesEsts,names,colors,"empirical $q$ value","estimated $q$ value",label + "_qvalue_estimated_VS_qvalue_empirical.png",fdr)
if(verbose):
print "plots generated"
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: jfnavarro/proteomics/roc.py
```python
from pylab import *
import random
def _remove_duplicate_styles(rocList):
pref_styles = ['cx-','mx-','yx-','gx-','bx-','rx-']
points = 'ov^>+xd'
colors = 'bgrcmy'
lines = ['-','-.',':']
rand_ls = []
for r in rocList:
if r.linestyle not in rand_ls:
rand_ls.append(r.linestyle)
else:
while True:
if len(pref_styles) > 0:
pstyle = pref_styles.pop()
if pstyle not in rand_ls:
r.linestyle = pstyle
rand_ls.append(pstyle)
break
else:
ls = ''.join(random.sample(colors,1) + random.sample(points,1)+ random.sample(lines,1))
if ls not in rand_ls:
r.linestyle = ls
rand_ls.append(ls)
break
def plot_multiple_roc(rocList,titlep='',labels=None, include_baseline=False, equal_aspect=True,filename=""):
""" Plots multiple ROC curves on the same chart.
Parameters:
rocList: the list of ROCData objects
title: The tile of the chart
labels: The labels of each ROC curve
include_baseline: if it's True include the random baseline
equal_aspect: keep equal aspect for all roc curves
"""
clf()
ylim((0,1))
xlim((0,1))
xticks(arange(0,1.1,.1))
yticks(arange(0,1.1,.1))
grid(True)
if equal_aspect:
cax = gca()
cax.set_aspect('equal')
xlabel("1 - Specificity")
ylabel("Sensitivity")
title(titlep)
if not labels:
labels = [ '' for x in rocList]
_remove_duplicate_styles(rocList)
for ix, r in enumerate(rocList):
plot([x[0] for x in r.derived_points], [y[1] for y in r.derived_points], r.linestyle, linewidth=1, label=labels[ix])
if include_baseline:
plot([0.0,1.0], [0.0, 1.0], 'k-', label= 'random')
if labels:
legend(loc='lower right')
if(filename != ""):
savefig(filename, format='png')
else:
show()
class ROCData(object):
""" Class that generates an ROC Curve for the data.
Data is in the following format: a list l of tutples t
where:
t[0] = 1 for positive class and t[0] = 0 for negative class
t[1] = score
t[2] = label
"""
def __init__(self,data,linestyle='rx-'):
""" Constructor takes the data and the line style for plotting the ROC Curve.
Parameters:
data: The data a listl of tuples t (l = [t_0,t_1,...t_n]) where:
t[0] = 1 for positive class and 0 for negative class
t[1] = a score
t[2] = any label (optional)
lineStyle: THe matplotlib style string for plots.
Note: The ROCData is still usable w/o matplotlib. The AUC is still available,
but plots cannot be generated.
"""
self.data = sorted(data,lambda x,y: cmp(y[1],x[1]))
self.linestyle = linestyle
self.auc() #Seed initial points with default full ROC
def auc(self,fpnum=0):
""" Uses the trapezoidal ruel to calculate the area under the curve. If fpnum is supplied, it will
calculate a partial AUC, up to the number of false positives in fpnum (the partial AUC is scaled
to between 0 and 1).
It assumes that the positive class is expected to have the higher of the scores (s(+) < s(-))
Parameters:
fpnum: The cumulativr FP count (fps)
Return:
"""
fps_count = 0
relevant_pauc = []
current_index = 0
max_n = len([x for x in self.data if x[0] == 0])
if fpnum == 0:
relevant_pauc = [x for x in self.data]
elif fpnum > max_n:
fpnum = max_n
#Find the upper limit of the data that does not exceed n FPs
else:
while fps_count < fpnum:
relevant_pauc.append(self.data[current_index])
if self.data[current_index][0] == 0:
fps_count += 1
current_index +=1
total_n = len([x for x in relevant_pauc if x[0] == 0])
total_p = len(relevant_pauc) - total_n
#Convert to points in a ROC
previous_df = -1000000.0
current_index = 0
points = []
tp_count, fp_count = 0.0 , 0.0
tpr, fpr = 0, 0
while current_index < len(relevant_pauc):
df = relevant_pauc[current_index][1]
if previous_df != df:
points.append((fpr,tpr,fp_count))
if relevant_pauc[current_index][0] == 0:
fp_count +=1
elif relevant_pauc[current_index][0] == 1:
tp_count +=1
fpr = fp_count/total_n
tpr = tp_count/total_p
previous_df = df
current_index +=1
points.append((fpr,tpr,fp_count)) #Add last point
points.sort(key=lambda i: (i[0],i[1]))
self.derived_points = points
return self._trapezoidal_rule(points)
def _trapezoidal_rule(self,curve_pts):
""" Method to calculate the area under the ROC curve"""
cum_area = 0.0
for ix,x in enumerate(curve_pts[0:-1]):
cur_pt = x
next_pt = curve_pts[ix+1]
cum_area += ((cur_pt[1]+next_pt[1])/2.0) * (next_pt[0]-cur_pt[0])
return cum_area
def calculateStandardError(self,fpnum=0):
""" Returns the standard error associated with the curve.
Parameters:
fpnum: The cumulativr FP count (fps)
Return:
the standard error.
"""
area = self.auc(fpnum)
#real positive cases
Na = len([ x for x in self.data if x[0] == 1])
#real negative cases
Nn = len([ x for x in self.data if x[0] == 0])
Q1 = area / (2.0 - area)
Q2 = 2 * area * area / (1.0 + area)
return math.sqrt( ( area * (1.0 - area) + (Na - 1.0) * (Q1 - area*area) +
(Nn - 1.0) * (Q2 - area * area)) / (Na * Nn))
def plot(self,titlep='',include_baseline=False,equal_aspect=True):
""" Method that generates a plot of the ROC curve
Parameters:
title: Title of the chart
include_baseline: Add the baseline plot line if it's True
equal_aspect: Aspects to be equal for all plot
"""
clf()
plot([x[0] for x in self.derived_points], [y[1] for y in self.derived_points], self.linestyle)
if include_baseline:
plot([0.0,1.0], [0.0,1.0],'k-.')
ylim((0,1))
xlim((0,1))
xticks(arange(0,1.1,.1))
yticks(arange(0,1.1,.1))
grid(True)
if equal_aspect:
cax = gca()
cax.set_aspect('equal')
xlabel('1 - Specificity')
ylabel('Sensitivity')
title(titlep)
show()
def confusion_matrix(self,threshold,do_print=False):
""" Returns the confusion matrix (in dictionary form) for a fiven threshold
where all elements > threshold are considered 1 , all else 0.
Parameters:
threshold: threshold to check the decision function
do_print: if it's True show the confusion matrix in the screen
Return:
the dictionary with the TP, FP, FN, TN
"""
pos_points = [x for x in self.data if x[1] >= threshold]
neg_points = [x for x in self.data if x[1] < threshold]
tp,fp,fn,tn = self._calculate_counts(pos_points,neg_points)
if do_print:
print "\t Actual class"
print "\t+(1)\t-(0)"
print "+(1)\t%i\t%i\tPredicted" % (tp,fp)
print "-(0)\t%i\t%i\tclass" % (fn,tn)
return {'TP': tp, 'FP': fp, 'FN': fn, 'TN': tn}
def evaluateMetrics(self,matrix,metric=None,do_print=False):
""" Returns the metrics evaluated from the confusion matrix.
Parameters:
matrix: the confusion matrix
metric: the specific metric of the default value is None (all metrics).
do_print: if it's True show the metrics in the screen
Return:
the dictionary with the Accuracy, Sensitivity, Specificity,Efficiency,
PositivePredictiveValue, NegativePredictiveValue, PhiCoefficient
"""
accuracy = (matrix['TP'] + matrix['TN'])/ float(sum(matrix.values()))
sensitivity = (matrix['TP'])/ float(matrix['TP'] + matrix['FN'])
specificity = (matrix['TN'])/float(matrix['TN'] + matrix['FP'])
efficiency = (sensitivity + specificity) / 2.0
positivePredictiveValue = matrix['TP'] / float(matrix['TP'] + matrix['FP'])
NegativePredictiveValue = matrix['TN'] / float(matrix['TN'] + matrix['FN'])
PhiCoefficient = (matrix['TP'] * matrix['TN'] - matrix['FP'] * matrix['FN'])/(
math.sqrt( (matrix['TP'] + matrix['FP']) *
(matrix['TP'] + matrix['FN']) *
(matrix['TN'] + matrix['FP']) *
(matrix['TN'] + matrix['FN']))) or 1.0
if do_print:
print 'Sensitivity: ' , sensitivity
print 'Specificity: ' , specificity
print 'Efficiency: ' , efficiency
print 'Accuracy: ' , accuracy
print 'PositivePredictiveValue: ' , positivePredictiveValue
print 'NegativePredictiveValue' , NegativePredictiveValue
print 'PhiCoefficient' , PhiCoefficient
return {'SENS': sensitivity, 'SPEC': specificity, 'ACC': accuracy, 'EFF': efficiency,
'PPV':positivePredictiveValue, 'NPV':NegativePredictiveValue , 'PHI': PhiCoefficient}
def _calculate_counts(self,pos_data,neg_data):
""" Calculates the number of false positives, true positives, false negatives and true negatives """
tp_count = len([x for x in pos_data if x[0] == 1])
fp_count = len([x for x in pos_data if x[0] == 0])
fn_count = len([x for x in neg_data if x[0] == 1])
tn_count = len([x for x in neg_data if x[0] == 0])
return tp_count,fp_count,fn_count, tn_count
```
#### File: jfnavarro/proteomics/uppmax_xtandem.py
```python
import sys
import os
import getopt
import fnmatch
def getFilesinFolder(pathToFolder,extension):
files = list()
for file_ in os.listdir(pathToFolder):
if fnmatch.fnmatch(file_, '*.' + extension):
files.append(file_)
return files
def make_dir(pathname):
if not os.path.exists('%s' % pathname):
cmd = 'mkdir %s' % pathname
os.system(cmd)
return pathname
def createXTandemInput(spectra_file,output_file,database,parameters,type_,input_file_name):
input_file= "<?xml version=\"1.0\"?>\n" \
" <bioml>\n" \
" <note>\n" \
" Each one of the parameters for x!tandem is entered as a labeled note node.\n" \
" Any of the entries in the default_input.xml file can be over-ridden by\n" \
" adding a corresponding entry to this file. This file represents a minimum\n" \
" input file, with only entries for the default settings, the output file\n" \
" and the input spectra file name.\n" \
" See the taxonomy.xml file for a description of how FASTA sequence list\n" \
" files are linked to a taxon name.\n" \
" </note>\n" \
" <note type=\"input\" label=\"list path, default parameters\">" + parameters + "</note>\n" \
" <note type=\"input\" label=\"list path, taxonomy information\">" + database + "</note>\n" \
" <note type=\"input\" label=\"protein, taxon\">" + type_ + "</note>\n" \
" <note type=\"input\" label=\"spectrum, path\">" + spectra_file + "</note>\n" \
" <note type=\"input\" label=\"output, path\">" + output_file + "</note>\n" \
" </bioml>"
f = open(input_file_name, "w")
f.write(input_file)
f.close()
def createXTandemTaxonomy(output_file,db,label):
taxon_file = "<?xml version=\"1.0\"?>\n" \
" <bioml label=\"x! taxon-to-file matching list\">\n" \
" <taxon label=" + "\"" + label + "\"" + ">\n" \
" <file format=\"peptide\" URL=\"" + db + "\"" + "/>\n" \
" </taxon>\n" \
" </bioml>"
f = open(output_file, "w")
f.write(taxon_file)
f.close()
def submit_job(spectra_file,partNum,taxonomy,parameters,type_,project,time,xtandem,out_dir,input_dir,local_path):
input_file = local_path + "/" + spectra_file[:-5] + "_" + str(partNum) + "_in.xml"
spectra_output = local_path + "/" + spectra_file[:-5] + "_" + str(partNum) + "_output.xml"
createXTandemInput(input_dir + spectra_file,spectra_output,taxonomy,parameters,type_,input_file)
if type_ == "target":
scriptName = "copyFileTarget%s" % (partNum)
elif type_ == "decoy":
scriptName = "copyFileDecoy%s" % (partNum)
job_file = open(("%s/%s" % (out_dir + "/jobs",scriptName)), "w")
job_file.write("#!/bin/bash\n")
job_file.write("\n")
job_file.write("#SBATCH -A %s\n" % (project))
job_file.write("#SBATCH -p core\n")
#job_file.write("#SBATCH -N 2\n")
job_file.write("#SBATCH -n 1\n")
job_file.write("#SBATCH -t %s\n" % (time))
job_file.write("#SBATCH -J xtandem_search\n")
job_file.write("\n")
job_file.write("xtandem=\"%s\"\n" % (xtandem))
job_file.write("$xtandem %s\n" % (input_file))
if(type_ == "target"):
job_file.write("mv %s* %s" %(spectra_output[:-4] ,out_dir + "/target"))
if(type_ == "decoy"):
job_file.write("mv %s* %s" %(spectra_output[:-4] ,out_dir + "/decoy"))
job_file.close()
os.chmod("%s/%s" % (out_dir + "/jobs", scriptName), 0755)
command = "sbatch %s/%s" % (out_dir + "/jobs", scriptName)
os.system(command)
def cleanUp():
os.system("rm *in.xml")
os.system("rm *taxonomy_target.xml")
os.system("rm *taxonomy_decoy.xml")
def usage():
print "this script runs x!tandem on a set of spectra files with the dabatabes and parameters given in UPPMAX"
print "Usage : uppmax_xtandem.py [-o, --output <pathfolder>] [-i, --input <pathfolder>] [-p, --params <pathname>] [-t, --target <pathname>] [d, --decoy <pathname>] [e, --extension <string>] [-h, --help] [-v, --verbose]"
print "--output : the path of the directory where the results will be placed"
print "--input : the path of the directory containing the spectra files"
print "--params : the path of the directory containing the parameter file"
print "--target : the path of the target database in fasta format"
print "--decoy : the path of the decoy database in fasta format"
print "--extension : the path of the decoy database in fasta format"
def main(argv):
if( len(argv) < 6):
sys.stderr.write("Error: Number of arguments incorrect\n")
usage()
sys.exit()
else:
output_dir = ""
input_dir = ""
param_file = ""
target_file = ""
decoy_file = ""
extension = ""
verbose = False
try:
opts, args = getopt.getopt(sys.argv[1:], "o:i:p:t:d:e:hv", ["output=","input=","params=","target=","decoy=","extension=" "help", "verbose"])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-o", "--output"):
output_dir = a
elif o in ("-i", "--input"):
input_dir = a
elif o in ("-p", "--params"):
param_file = a
elif o in ("-t", "--target"):
target_file = a
elif o in ("-d", "--decoy"):
decoy_file = a
elif o in ("-e", "--extension"):
extension = a
else:
assert False, "unhandled option"
if(os.path.isfile(param_file) and os.path.isfile(target_file) and os.path.isfile(decoy_file) and os.path.isdir(output_dir) and os.path.isdir(input_dir)):
if(verbose):
print "Input parameters processed succesfully"
else:
sys.stderr.write("Error: file/s or directoriy/es not found\n")
sys.exit()
spectra_files = getFilesinFolder(input_dir,extension)
xtandem = "/bubo/home/h22/navarro/bin/bin/tandem"
local_path = os.getcwd()
taxonomy_target = local_path + "/taxonomy_target.xml"
taxonomy_decoy = local_path + "/taxonomy_decoy.xml"
project="s00111-298"
time="20:00:00"
##create output target and decoy
make_dir(output_dir + "/target")
make_dir(output_dir + "/decoy")
##create jobs directory
make_dir(output_dir + "/jobs")
##create taxonomy file for target and decoy databases
createXTandemTaxonomy(taxonomy_target,target_file,"target")
createXTandemTaxonomy(taxonomy_decoy,decoy_file,"decoy")
i = 0
if(len(spectra_files) == 0):
sys.stderr.write("Error: the folder does not contain any spectra file\n")
sys.exit()
for spectra in spectra_files:
print "Reading spectra : " + spectra
submit_job(spectra,i,taxonomy_target,param_file,"target",project,time,xtandem,output_dir,input_dir,local_path)
submit_job(spectra,i,taxonomy_decoy,param_file,"decoy",project,time,xtandem,output_dir,input_dir,local_path)
i+=1
cleanUp()
if __name__ == "__main__":
main(sys.argv[1:])
```
|
{
"source": "jfnavarro/scitron",
"score": 2
}
|
#### File: scitron/sarek_merge/variants.py
```python
import os
import vcfpy
import numpy as np
class Variant:
def __init__(self):
self.chrom = None
self.start = None
self.ref = None
self.alt = None
self.effects = None
self.caller = None
self.VAF = None
self.DP = None
self.AD = None
self.sample = None
self.patient = None
self.status = None
@property
def key(self):
return "{}:{} {}>{}".format(self.chrom, self.start, self.ref, self.alt)
def __str__(self):
return '{}:{} {}>{}'.format(self.chrom, self.start, self.ref, self.alt)
def parse_haplotypecaller_germline_variants(filename, FILTER_DP, FILTER_DP_PDX, FILTER_VAF, NO_EFFECT_FILTER=False):
variants = list()
tumor_id = str(os.path.basename(filename)).split('_snpEff')[0].split('HaplotypeCaller_')[1]
reader = vcfpy.Reader.from_path(filename)
FDP = FILTER_DP_PDX if 'PDX' in str(filename) else FILTER_DP
for record in reader:
called = {x.sample: x.data for x in record.calls}
tumor_DP = int(called[tumor_id]['DP'])
tumor_AD = int(called[tumor_id]['AD'][1])
tumor_VAF = np.around(tumor_AD / float(tumor_DP) * 100, 3) if tumor_DP > 0.0 else 0.0
if tumor_VAF >= FILTER_VAF and tumor_DP >= FDP:
effects = set()
for effect in record.INFO['ANN']:
gene = effect.split('|')[3]
effect_name = effect.split('|')[1]
effect_type = effect.split('|')[2]
effect_biotype = effect.split('|')[7]
effect_aachange = effect.split('|')[10]
effect_feature = effect.split('|')[5]
if NO_EFFECT_FILTER or effect_type in ['HIGH', 'MODERATE']:
effects.add((effect_name, gene, effect_aachange, effect_feature))
if len(effects) > 0:
variant = Variant()
variant.chrom = record.CHROM
variant.start = record.POS
variant.ref = record.REF
variant.alt = record.ALT[0].serialize()
variant.caller = 'HaplotypeCaller'
variant.effects = effects
variant.DP = tumor_DP
variant.AD = tumor_AD
variant.VAF = tumor_VAF
variants.append(variant)
return variants
def parse_strelka_germline_variants(filename, FILTER_DP, FILTER_DP_PDX, FILTER_VAF, NO_EFFECT_FILTER=False):
variants = list()
tumor_id = str(os.path.basename(filename)).split('_variants_snpEff')[0].split('Strelka_')[1]
reader = vcfpy.Reader.from_path(filename)
FDP = FILTER_DP_PDX if 'PDX' in str(filename) else FILTER_DP
for record in reader:
called = {x.sample: x.data for x in record.calls}
DP_FIELD = 'DP' if 'SNV' in record.ALT[0].type else 'DPI'
tumor_DP = int(called[tumor_id][DP_FIELD])
tumor_AD = int(called[tumor_id]['AD'][1])
tumor_VAF = np.around((tumor_AD / float(tumor_DP)) * 100, 3) if tumor_DP > 0.0 else 0.0
if 'PASS' in record.FILTER and tumor_VAF >= FILTER_VAF and tumor_DP >= FDP:
effects = set()
for effect in record.INFO['ANN']:
gene = effect.split('|')[3]
effect_name = effect.split('|')[1]
effect_type = effect.split('|')[2]
effect_biotype = effect.split('|')[7]
effect_aachange = effect.split('|')[10]
effect_feature = effect.split('|')[5]
if NO_EFFECT_FILTER or effect_type in ['HIGH', 'MODERATE']:
effects.add((effect_name, gene, effect_aachange, effect_feature))
if len(effects) > 0:
variant = Variant()
variant.chrom = record.CHROM
variant.start = record.POS
variant.ref = record.REF
variant.alt = record.ALT[0].serialize()
variant.caller = 'StrelkaGermline'
variant.effects = effects
variant.DP = tumor_DP
variant.AD = tumor_AD
variant.VAF = tumor_VAF
variants.append(variant)
return variants
def parse_strelka_variants(filename, FILTER_DP, FILTER_DP_PDX, FILTER_VAF, NO_EFFECT_FILTER=False):
variants = list()
reader = vcfpy.Reader.from_path(filename)
FDP = FILTER_DP_PDX if 'PDX' in str(filename) else FILTER_DP
for record in reader:
called = {x.sample: x.data for x in record.calls}
ref_index = record.REF + 'U'
alt_index = str(record.ALT[0].serialize()) + 'U'
normal_AD1 = int(called['NORMAL'][ref_index][0])
normal_AD2 = int(called['NORMAL'][alt_index][0])
normal_DP = normal_AD1 + normal_AD2
normal_VAF = np.around((normal_AD2 / float(normal_DP)) * 100, 3) if normal_DP > 0.0 else 0.0
tumor_AD1 = int(called['TUMOR'][ref_index][0])
tumor_AD2 = int(called['TUMOR'][alt_index][0])
tumor_DP = tumor_AD1 + tumor_AD2
tumor_VAF = np.around((tumor_AD2 / float(tumor_DP)) * 100, 3) if tumor_DP > 0.0 else 0.0
if 'PASS' in record.FILTER and 'SOMATIC' in record.INFO \
and tumor_VAF >= FILTER_VAF and tumor_DP >= FDP:
effects = set()
for effect in record.INFO['ANN']:
gene = effect.split('|')[3]
effect_name = effect.split('|')[1]
effect_type = effect.split('|')[2]
effect_biotype = effect.split('|')[7]
effect_aachange = effect.split('|')[10]
effect_feature = effect.split('|')[5]
if NO_EFFECT_FILTER or effect_type in ['HIGH', 'MODERATE']:
effects.add((effect_name, gene, effect_aachange, effect_feature))
if len(effects) > 0:
variant = Variant()
variant.chrom = record.CHROM
variant.start = record.POS
variant.ref = record.REF
variant.alt = record.ALT[0].serialize()
variant.caller = 'Strelka'
variant.effects = effects
variant.DP = tumor_DP
variant.AD = tumor_AD2
variant.VAF = tumor_VAF
variants.append(variant)
return variants
def parse_strelka_indel_variants(filename, FILTER_DP, FILTER_DP_PDX, FILTER_VAF, NO_EFFECT_FILTER=False):
variants = list()
reader = vcfpy.Reader.from_path(filename)
FDP = FILTER_DP_PDX if 'PDX' in str(filename) else FILTER_DP
for record in reader:
called = {x.sample: x.data for x in record.calls}
normal_AD1 = int(called['NORMAL']['TAR'][0])
normal_AD2 = int(called['NORMAL']['TIR'][0])
normal_DP = normal_AD1 + normal_AD2
normal_VAF = np.around((normal_AD2 / float(normal_DP)) * 100, 3) if normal_DP > 0.0 else 0.0
tumor_AD1 = int(called['TUMOR']['TAR'][0])
tumor_AD2 = int(called['TUMOR']['TIR'][0])
tumor_DP = tumor_AD1 + tumor_AD2
tumor_VAF = np.around((tumor_AD2 / float(tumor_DP)) * 100, 3) if tumor_DP > 0.0 else 0.0
if 'PASS' in record.FILTER and 'SOMATIC' in record.INFO \
and tumor_VAF >= FILTER_VAF and tumor_DP >= FDP:
effects = set()
for effect in record.INFO['ANN']:
gene = effect.split('|')[3]
effect_name = effect.split('|')[1]
effect_type = effect.split('|')[2]
effect_biotype = effect.split('|')[7]
effect_aachange = effect.split('|')[10]
effect_feature = effect.split('|')[5]
if NO_EFFECT_FILTER or effect_type in ['HIGH', 'MODERATE']:
effects.add((effect_name, gene, effect_aachange, effect_feature))
if len(effects) > 0:
variant = Variant()
variant.chrom = record.CHROM
variant.start = record.POS
variant.ref = record.REF
variant.alt = record.ALT[0].serialize()
variant.caller = 'Strelka-indel'
variant.effects = effects
variant.DP = tumor_DP
variant.AD = tumor_AD2
variant.VAF = tumor_VAF
variants.append(variant)
return variants
def parse_mutect_variants(filename, FILTER_DP, FILTER_DP_PDX, FILTER_VAF, NO_EFFECT_FILTER=False):
variants = list()
tumor_id = str(os.path.basename(filename)).split('_vs_')[0].split('Mutect2_filtered_')[1]
normal_id = str(os.path.basename(filename)).split('_vs_')[1].split('_snpEff')[0]
FDP = FILTER_DP_PDX if 'PDX' in str(filename) else FILTER_DP
reader = vcfpy.Reader.from_path(filename)
for record in reader:
called = {x.sample: x.data for x in record.calls}
normal_DP = int(called[normal_id]['DP'])
normal_AD = int(called[normal_id]['AD'][1])
normal_VAF = np.around(float(called[normal_id]['AF'][0]) * 100, 3) if normal_DP > 0.0 else 0.0
tumor_DP = int(called[tumor_id]['DP'])
tumor_AD = int(called[tumor_id]['AD'][1])
tumor_VAF = np.around(float(called[tumor_id]['AF'][0]) * 100, 3) if tumor_DP > 0.0 else 0.0
if 'PASS' in record.FILTER and tumor_VAF >= FILTER_VAF and tumor_DP >= FDP:
effects = set()
for effect in record.INFO['ANN']:
gene = effect.split('|')[3]
effect_name = effect.split('|')[1]
effect_type = effect.split('|')[2]
effect_biotype = effect.split('|')[7]
effect_aachange = effect.split('|')[10]
effect_feature = effect.split('|')[5]
if NO_EFFECT_FILTER or effect_type in ['HIGH', 'MODERATE']:
effects.add((effect_name, gene, effect_aachange, effect_feature))
if len(effects) > 0:
variant = Variant()
variant.chrom = record.CHROM
variant.start = record.POS
variant.ref = record.REF
variant.alt = record.ALT[0].serialize()
variant.caller = 'Mutect'
variant.effects = effects
variant.DP = tumor_DP
variant.AD = tumor_AD
variant.VAF = tumor_VAF
variants.append(variant)
return variants
def parse_mutect_tumor_onnly_variants(filename, FILTER_DP, FILTER_DP_PDX, FILTER_VAF, NO_EFFECT_FILTER=False):
variants = list()
tumor_id = str(os.path.basename(filename)).split('Mutect2_filtered2_')[1].replace('_snpEff.ann.vcf.gz', '')
FDP = FILTER_DP_PDX if 'PDX' in str(filename) else FILTER_DP
reader = vcfpy.Reader.from_path(filename)
for record in reader:
called = {x.sample: x.data for x in record.calls}
tumor_DP = int(called[tumor_id]['DP'])
tumor_AD = int(called[tumor_id]['AD'][1])
tumor_VAF = np.around(float(called[tumor_id]['AF'][0]) * 100, 3) if tumor_DP > 0.0 else 0.0
if 'PASS' in record.FILTER and tumor_VAF >= FILTER_VAF and tumor_DP >= FDP:
effects = set()
for effect in record.INFO['ANN']:
gene = effect.split('|')[3]
effect_name = effect.split('|')[1]
effect_type = effect.split('|')[2]
effect_biotype = effect.split('|')[7]
effect_aachange = effect.split('|')[10]
effect_feature = effect.split('|')[5]
if NO_EFFECT_FILTER or effect_type in ['HIGH', 'MODERATE']:
effects.add((effect_name, gene, effect_aachange, effect_feature))
if len(effects) > 0:
variant = Variant()
variant.chrom = record.CHROM
variant.start = record.POS
variant.ref = record.REF
variant.alt = record.ALT[0].serialize()
variant.caller = 'MutectTumorOnly'
variant.effects = effects
variant.DP = tumor_DP
variant.AD = tumor_AD
variant.VAF = tumor_VAF
variants.append(variant)
return variants
```
|
{
"source": "jfnavarro/st_analysis",
"score": 3
}
|
#### File: st_analysis/scripts/data_plotter.py
```python
import argparse
from matplotlib import pyplot as plt
from stanalysis.preprocessing import *
from stanalysis.analysis import *
import pandas as pd
import numpy as np
import os
import sys
from matplotlib.pyplot import plotting
def get_spot_coordinates(spots):
has_index = "_" in spots[0]
return zip(*map(lambda s: (float(s.split("x")[0].split("_")[1] if has_index else s.split("x")[0]),
float(s.split("x")[1])), spots))
def compute_plotting_data(counts, names, cutoff_lower,
cutoff_upper, use_global_scale):
plotting_data = list()
# counts should be a vector and cutoff should be a percentage (0.0 - 1.0)
min_gene_exp = counts.quantile(cutoff_lower)
max_gene_exp = counts.quantile(cutoff_upper)
print("Using lower cutoff of {} percentile {} of total distribution".format(min_gene_exp, cutoff_lower))
print("Using upper cutoff of {} percentile {} of total distribution".format(max_gene_exp, cutoff_upper))
counts[counts < min_gene_exp] = 0
counts[counts > max_gene_exp] = 0
vmin_global = counts.min()
vmax_global = counts.max()
for i, name in enumerate(names):
r = re.compile("^{}_".format(i + 1))
# Filter spot by index (section) unless only one section is given
spots = list(filter(r.match, counts.index)) if len(names) > 1 else counts.index
if len(spots) > 0:
# Compute the expressions for each spot
# as the sum of the counts over the gene
rel_sum = counts.reindex(spots).values
x, y = get_spot_coordinates(spots)
if not rel_sum.any():
sys.stdout.write("Warning, the gene given is not expressed in {}\n".format(name))
vmin = vmin_global if use_global_scale else rel_sum.min()
vmax = vmax_global if use_global_scale else rel_sum.max()
plotting_data.append((x, y, rel_sum, vmin, vmax, name))
return plotting_data
def compute_plotting_data_clusters(counts, names, clusters):
plotting_data = list()
vmin_global = int(clusters.iloc[:, 0].min())
vmax_global = int(clusters.iloc[:, 0].max())
for i, name in enumerate(names):
r = re.compile("^{}_".format(i + 1))
# Filter spot by index (section) unless only one section is given
spots = list(filter(r.match, counts.index)) if len(names) > 1 else counts.index
if len(spots) > 0:
x, y = get_spot_coordinates(spots)
c = np.ravel(clusters.loc[spots, :].values.astype(int))
plotting_data.append((x, y, c, vmin_global, vmax_global, name))
return plotting_data
def plot_data(plotting_data, n_col, n_row, dot_size, data_alpha,
color_scale, xlim, ylim, invert=False, colorbar=False):
fig, ax = plt.subplots(n_row, n_col, figsize=(4 * n_col, 4 * n_row,))
fig.subplots_adjust(left=0.1,
right=0.9,
bottom=0.1,
top=0.9,
hspace=0.2,
wspace=0.4)
sc = list()
for i, a in enumerate(ax.flatten() if n_row > 1 or n_col > 1 else [ax]):
# Make the actual plot
data = plotting_data[i]
s = a.scatter(data[0], data[1], s=dot_size,
cmap=plt.get_cmap(color_scale),
c=data[2], edgecolor="none",
alpha=data_alpha,
vmin=data[3], vmax=data[4])
a.set_title(data[5])
a.set_xlim(xlim)
a.set_ylim(ylim)
a.set_aspect('equal')
if invert:
a.invert_yaxis()
a.set_xticks([])
a.set_yticks([])
if colorbar:
fig.colorbar(s, ax=a, fraction=0.046, pad=0.04)
sc.append(s)
return fig, ax, sc
def main(counts_table_files,
cutoff,
cutoff_upper,
data_alpha,
dot_size,
normalization,
color_scale,
color_scale_clusters,
filter_genes,
clusters_file,
gene_family,
outdir,
use_log_scale,
standard_transformation,
num_exp_genes,
num_exp_spots,
min_gene_expression,
use_global_scale,
num_columns,
xlim,
ylim,
disable_invert_y_axes,
disable_color_bar,
combine_genes):
if cutoff_upper <= cutoff:
sys.stderr.write("Error, incorrect cut-off values {}\n".format(cutoff))
sys.exit(1)
if dot_size < 0:
sys.stderr.write("Error, incorrect dot size {}\n".format(dot_size))
sys.exit(1)
if data_alpha < 0 or data_alpha > 1:
sys.stderr.write("Error, incorrect alpha value {}\n".format(data_alpha))
sys.exit(1)
if len(counts_table_files) == 0 or \
any([not os.path.isfile(f) for f in counts_table_files]):
sys.stderr.write("Error, input counts not present or invalid " \
"format\n{}.\n".format('\n'.join(counts_table_files)))
sys.exit(1)
if gene_family and \
any([not os.path.isfile(f) for f in counts_table_files]):
sys.stderr.write("Error, input gene family not present or invalid " \
"format\n{}.\n".format('\n'.join(counts_table_files)))
sys.exit(1)
if num_exp_genes < 0 or num_exp_genes > 1:
sys.stderr.write("Error, invalid number of expressed genes {}\n".format(num_exp_genes))
sys.exit(1)
if num_exp_spots < 0 or num_exp_spots > 1:
sys.stderr.write("Error, invalid number of expressed genes {}\n".format(num_exp_spots))
sys.exit(1)
if outdir is None or not os.path.isdir(outdir):
outdir = os.getcwd()
outdir = os.path.abspath(outdir)
print("Output directory {}".format(outdir))
print("Input datasets {}".format(" ".join(counts_table_files)))
# Merge input datasets (Spots are rows and genes are columns)
counts = aggregate_datatasets(counts_table_files)
print("Total number of spots {}".format(len(counts.index)))
print("Total number of genes {}".format(len(counts.columns)))
# Get the names of the datasets
names = [os.path.splitext(os.path.basename(x))[0] for x in counts_table_files]
# Compute number of columns/rows
n_col = min(num_columns, len(counts_table_files))
n_row = max(int(len(counts_table_files) / n_col), 1)
# Remove noisy spots and genes (Spots are rows and genes are columns)
counts_filtered = filter_data(counts, num_exp_genes,
num_exp_spots, min_gene_expression)
has_clusters = False
if clusters_file and os.path.isfile(clusters_file):
clusters = pd.read_csv(clusters_file, sep="\t", header=None,
index_col=0, engine='c', low_memory=True)
clusters = clusters.reindex(np.intersect1d(counts_filtered.index, clusters.index))
if clusters.shape[0] == 0 or clusters.isna().values.any():
sys.stderr.write("Error, cluster file does not match the input data\n")
sys.exit(1)
has_clusters = True
elif clusters_file:
sys.stderr.write("Error, {} is not a valid file\n".format(clusters_file))
sys.exit(1)
# Normalization
counts_normalized = normalize(counts_filtered, normalization)
# Log the counts
if use_log_scale:
print("Transforming datasets to log space...")
counts_normalized = np.log1p(counts_normalized)
# Apply the z-transformation
if standard_transformation:
print("Applying standard transformation...")
counts_normalized = ztransformation(counts_normalized)
# Gene family plots
if gene_family and combine_genes != "None":
families = [os.path.splitext(os.path.basename(x))[0] for x in gene_family]
counts_families = pd.DataFrame(index=counts_normalized.index, columns=families)
for f, name in zip(gene_family, families):
with open(f, "r") as filehandler:
genes = [x.rstrip() for x in filehandler.readlines()]
if len(genes) == 0:
print("Error, no genes were found in {}\n".format(f))
continue
# Filter the data with the genes in the set
counts = counts_normalized.loc[:, np.intersect1d(genes, counts_normalized.columns)]
if counts.shape[1] == 0:
print("Error, none of the genes from {} were found in the data\n".format(f))
continue
genes_in_set = counts.columns.tolist()
# Compute the combined score
if combine_genes in "NaiveMean":
present_genes = (counts > 0).sum(axis=1) / len(genes_in_set)
counts_families.loc[:, name] = (counts.mean(axis=1) * present_genes).values
elif combine_genes in "NaiveSum":
present_genes = (counts > 0).sum(axis=1) / len(genes_in_set)
counts_families.loc[:, name] = (counts.sum(axis=1) * present_genes).values
else:
# For the CumSum we need to use all the genes so in order to compute p-values
counts_families.loc[:, name] = enrichment_score(counts_normalized, genes_in_set)
# Plot the data
plotting_data = compute_plotting_data(counts_families.loc[:, name],
names,
0.0,
1.0,
use_global_scale)
if len(plotting_data) == 0:
sys.stderr.write("Error, plotting data is empty!\n")
sys.exit(1)
fig, ax, sc = plot_data(plotting_data, n_col, n_row, dot_size, data_alpha, color_scale,
xlim, ylim, not disable_invert_y_axes, not disable_color_bar)
# Save the plot
fig.suptitle(name, fontsize=16)
fig.savefig(os.path.join(outdir, "Combined_{}_joint_plot.pdf".format(name)),
format='pdf', dpi=90)
plt.close(fig)
# Save the proportions
counts_families.to_csv(os.path.join(outdir, "gene_families.tsv"), sep="\t")
# Gene plots
if filter_genes:
try:
counts_final = filter_data_genes(counts_normalized, filter_genes)
# Compute plotting data and plot genes
for gene in counts_final.columns:
print("Plotting gene {}".format(gene))
plotting_data = compute_plotting_data(counts_final.loc[:, gene],
names,
cutoff,
cutoff_upper,
use_global_scale)
if len(plotting_data) == 0:
sys.stderr.write("Error, plotting data is empty!\n")
sys.exit(1)
fig, ax, sc = plot_data(plotting_data, n_col, n_row, dot_size, data_alpha, color_scale,
xlim, ylim, not disable_invert_y_axes, not disable_color_bar)
# Save the plot
fig.suptitle(gene, fontsize=16)
fig.savefig(os.path.join(outdir, "{}_joint_plot.pdf".format(gene)), format='pdf', dpi=90)
plt.close(fig)
except RuntimeError as e:
sys.stdount.write("No genes could be found in the data...\n")
if has_clusters:
# Compute data for clusters and plot
plotting_data = compute_plotting_data_clusters(counts_normalized, names, clusters)
if len(plotting_data) == 0:
sys.stderr.write("Error, plotting data is empty!\n")
sys.exit(1)
fig, ax, sc = plot_data(plotting_data, n_col, n_row, dot_size, data_alpha, color_scale_clusters,
xlim, ylim, not disable_invert_y_axes, not disable_color_bar)
# Save the plot
fig.suptitle("Clusters", fontsize=16)
fig.savefig(os.path.join(outdir, "Clusters_joint_plot.pdf"), format='pdf', dpi=90)
plt.close(fig)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--counts", required=True, nargs='+', type=str,
help="One or more matrices of counts (spots as rows and genes as columns)")
parser.add_argument("--num-exp-genes", default=0.0, metavar="[FLOAT]", type=float,
help="The percentage of number of expressed genes (>= --min-gene-expression) a spot\n"
"must have to be kept from the distribution of all expressed genes (0.0 - 1.0) (default: %(default)s)")
parser.add_argument("--num-exp-spots", default=0.0, metavar="[FLOAT]", type=float,
help="The percentage of number of expressed spots a gene\n" \
"must have to be kept from the total number of spots (0.0 - 1.0) (default: %(default)s)")
parser.add_argument("--min-gene-expression", default=1, type=float, metavar="[FLOAT]",
help="The minimum count (number of reads) a gene must have in a spot to be\n"
"considered expressed (default: %(default)s)")
parser.add_argument("--cutoff", default=0.0, metavar="[FLOAT]", type=float,
help="The percentage of reads a gene must have in a spot to be included in the plots from\n"
"the distribution of reads of the gene across all the spots (0.0 - 1.0) (default: %(default)s)")
parser.add_argument("--cutoff-upper", default=1.0, metavar="[FLOAT]", type=float,
help="The percentage of reads a gene should not have in a spot to be included in the plots from\n"
"the distribution of reads of the gene across all the spots (0.0 - 1.0) (default: %(default)s)")
parser.add_argument("--data-alpha", type=float, default=1.0, metavar="[FLOAT]",
help="The transparency level for the data points, 0 min and 1 max (default: %(default)s)")
parser.add_argument("--dot-size", type=int, default=20, metavar="[INT]",
help="The size of the data points (default: %(default)s)")
parser.add_argument("--color-scale", default="YlOrRd",
type=str,
choices=["hot", "binary", "hsv", "Greys", "inferno", "YlOrRd", "bwr", "Spectral", "coolwarm"],
help="Different color scales for the gene plots (default: %(default)s)")
parser.add_argument("--color-scale-clusters", default="tab20",
type=str,
choices=["tab20", "tab20b", "tab20c" "Set3", "Paired"],
help="Different color scales for the cluster plots (default: %(default)s)")
parser.add_argument("--normalization", default="RAW",
type=str,
choices=["RAW", "REL", "CPM"],
help="Normalize the counts using:\n"
"RAW = absolute counts\n"
"REL = Each gene count divided by the total count of its spot\n"
"CPM = Each gene count divided by the total count of its spot multiplied by its mean\n"
"(default: %(default)s)")
parser.add_argument("--standard-transformation", action="store_true", default=False,
help="Apply the z-score transformation to each feature (gene)")
parser.add_argument("--show-genes", help="Regular expression for gene symbols to be shown (one image per gene).\n"
"The genes matching the reg-exp will be shown in separate files.",
required=False,
default=None,
type=str,
nargs='+')
parser.add_argument("--clusters", help="Path to a tab delimited file containing clustering results for each spot.\n"
"First column spot id and second column the cluster number (integer).",
default=None,
type=str)
parser.add_argument("--gene-family", help="Path to one or more files containing set of genes (one per row).\n"
"A combined image will be generated using the value of --combine-genes",
required=False,
default=None,
type=str,
nargs='+')
parser.add_argument("--outdir", default=None, help="Path to output dir")
parser.add_argument("--use-log-scale", action="store_true", default=False,
help="Plot expression in log space (log2)")
parser.add_argument("--use-global-scale", action="store_true", default=False,
help="Use a global color scale instead of a relative color scale")
parser.add_argument("--num-columns", default=1, type=int, metavar="[INT]",
help="The number of columns (default: %(default)s)")
parser.add_argument("--xlim", default=[1, 33], nargs='+', metavar="[FLOAT]", type=float,
help="The x axis limits to have equally sized sub-images (default: %(default)s)")
parser.add_argument("--ylim", default=[1, 35], nargs='+', metavar="[FLOAT]", type=float,
help="The y axis limits to have equally sized sub-images (default: %(default)s)")
parser.add_argument("--disable-invert-y-axes", action="store_true", default=False,
help="Whether to disable the invert of the y axes or not (default False)")
parser.add_argument("--disable-color-bar", action="store_true", default=False,
help="Whether to disable the color bar or not (default False)")
parser.add_argument("--combine-genes", default="None",
type=str,
choices=["None", "NaiveMean", "NaiveSum", "CumSum"],
help="Whether to generate a combined plot with the all the genes given in --show-genes:\n"
"None = do not create combined plot\n"
"NaiveMean = create combine plot using the mean value of the genes in the spot adjusted by size\n"
"NaiveSum = create combine plot using the sum value of the genes in the spot adjusted by size\n"
"CumSum = create combined plot using a cumulative sum of the genes (0.90) and the Fisher test\n"
"(default: %(default)s)")
args = parser.parse_args()
main(args.counts,
args.cutoff,
args.cutoff_upper,
args.data_alpha,
args.dot_size,
args.normalization,
args.color_scale,
args.color_scale_clusters,
args.show_genes,
args.clusters,
args.gene_family,
args.outdir,
args.use_log_scale,
args.standard_transformation,
args.num_exp_genes,
args.num_exp_spots,
args.min_gene_expression,
args.use_global_scale,
args.num_columns,
args.xlim,
args.ylim,
args.disable_invert_y_axes,
args.disable_color_bar,
args.combine_genes)
```
#### File: st_analysis/scripts/supervised_torch.py
```python
import os
import sys
import time
import argparse
import numpy as np
import pandas as pd
import gc
import platform
import random
import copy
from collections import defaultdict
from stanalysis.preprocessing import *
from stanalysis.utils import *
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.utils.data as utils
import torchvision
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.utils import shuffle
from sklearn.model_selection import StratifiedShuffleSplit
import gc
# Windows work-around
__spec__ = None
import multiprocessing
SEARCH_BATCH = [(200, 200), (500, 500), (1000, 1000)]
L2 = [0.0, 0.001, 0.0001]
SEARCH_LR = [0.1, 0.01, 0.001, 0.0001]
SEARCH_HL = [(3000, 500), (2000, 500), (1000, 500), (3000, 1000), (2000, 1000), (2000, 300), (1000, 300)]
SEED = 999
def computeWeightsClasses(dataset):
# Distribution of labels
label_count = defaultdict(int)
for _, label in dataset:
label_count[label.item()] += 1
# Weight for each sample
weights = np.asarray([1.0 / x for x in label_count.values()])
return weights
def computeWeights(dataset, nclasses):
count = [0] * nclasses
for item in dataset:
count[item[1]] += 1
weight_per_class = [0.] * nclasses
N = float(sum(count))
for i in range(nclasses):
weight_per_class[i] = N / float(count[i])
weight = [0] * len(dataset)
for idx, val in enumerate(dataset):
weight[idx] = weight_per_class[val[1]]
return np.asarray(weight)
def str_to_act_func(str):
if str in "TANH":
return torch.nn.Tanh()
elif str in "SELU":
return torch.nn.SELU()
else:
return torch.nn.ReLU()
def create_model(n_feature,
n_class,
hidden_layer_one,
hidden_layer_two,
activation_function):
# Init model
H1 = hidden_layer_one
H2 = hidden_layer_two
model = torch.nn.Sequential(
torch.nn.Linear(n_feature, H1),
torch.nn.BatchNorm1d(num_features=H1),
str_to_act_func(activation_function),
torch.nn.Linear(H1, H2),
torch.nn.BatchNorm1d(num_features=H2),
str_to_act_func(activation_function),
torch.nn.Linear(H2, n_class))
return model
def create_loaders(trn_set,
vali_set,
train_batch_size,
validation_batch_size,
train_sampler,
test_sampler,
shuffle_train,
shuffle_test,
kwargs):
# Create loaders
trn_loader = utils.DataLoader(trn_set,
sampler=train_sampler,
shuffle=shuffle_train,
batch_size=train_batch_size,
**kwargs)
vali_loader = utils.DataLoader(vali_set,
sampler=test_sampler,
shuffle=shuffle_test,
batch_size=validation_batch_size,
**kwargs)
return trn_loader, vali_loader
def train(model, trn_loader, optimizer, loss_func, device):
model.train()
training_loss = 0
training_acc = 0
counter = 0
for data, target in trn_loader:
data = Variable(data.to(device))
target = Variable(target.to(device))
# Forward pass
output = model(data)
tloss = loss_func(output, target)
training_loss += tloss.item()
# Zero the gradients
optimizer.zero_grad()
# Backward pass
tloss.backward()
# Update parameters
optimizer.step()
# Compute prediction's score
pred = torch.argmax(output.data, 1)
training_acc += accuracy_score(target.data.cpu().numpy(),
pred.data.cpu().numpy())
counter += 1
avg_loss = training_loss / float(counter)
avg_acc = training_acc / float(counter)
return avg_loss, avg_acc
def test(model, vali_loader, loss_func, device):
model.eval()
test_loss = 0
counter = 0
preds = list()
for data, target in vali_loader:
with torch.no_grad():
data = data.to(device)
target = target.to(device)
output = model(data)
test_loss += loss_func(output, target).item()
pred = torch.argmax(output.data, 1)
preds += pred.cpu().numpy().tolist()
counter += 1
avg_loss = test_loss / float(counter)
return preds, avg_loss
def predict(model, data, device):
model.eval()
with torch.no_grad():
data = data.to(device)
output = model(data)
pred = torch.argmax(output.data, 1)
return output, pred
def main(train_data,
test_data,
train_classes_file,
test_classes_file,
log_scale,
normalization,
stratified_loss,
outdir,
standard_transformation,
train_batch_size,
validation_batch_size,
epochs,
learning_rate,
stratified_sampler,
min_class_size,
use_cuda,
num_exp_genes,
num_exp_spots,
min_gene_expression,
verbose,
hidden_layer_one,
hidden_layer_two,
train_validation_ratio,
train_test_ratio,
grid_search,
activation_function,
l2,
num_genes_keep_train,
num_genes_keep_test,
top_genes_criteria_train,
top_genes_criteria_test):
if not os.path.isfile(train_data):
sys.stderr.write("Error, the training data input is not valid\n")
sys.exit(1)
if not os.path.isfile(train_classes_file):
sys.stderr.write("Error, the train labels input is not valid\n")
sys.exit(1)
if not os.path.isfile(test_data):
sys.stderr.write("Error, the test data input is not valid\n")
sys.exit(1)
if test_classes_file is not None and not os.path.isfile(test_classes_file):
sys.stderr.write("Error, the test labels input is not valid\n")
sys.exit(1)
if min_class_size < 0:
sys.stderr.write("Error, invalid minimum class size\n")
sys.exit(1)
if learning_rate < 0:
sys.stderr.write("Error, invalid learning rate\n")
sys.exit(1)
if hidden_layer_one <= 0 or hidden_layer_two <= 0:
sys.stderr.write("Error, invalid hidden layers\n")
sys.exit(1)
if train_batch_size < 1 or validation_batch_size < 1:
sys.stderr.write("Error, batch size is too small\n")
sys.exit(1)
if epochs < 1:
sys.stderr.write("Error, number of epoch is too small\n")
sys.exit(1)
if num_exp_genes < 0.0 or num_exp_genes > 1.0:
sys.stderr.write("Error, invalid number of expressed genes\n")
sys.exit(1)
if num_exp_spots < 0.0 or num_exp_spots > 1.0:
sys.stderr.write("Error, invalid number of expressed spots\n")
sys.exit(1)
if train_validation_ratio < 0.1 or train_validation_ratio > 0.9:
sys.stderr.write("Error, invalid train test ratio genes\n")
sys.exit(1)
if not torch.cuda.is_available() and use_cuda:
sys.stderr.write("Error, CUDA is not available in this computer\n")
sys.exit(1)
if not outdir or not os.path.isdir(outdir):
outdir = os.getcwd()
print("Output folder {}".format(outdir))
# To ensure reproducibility
np.random.seed(SEED)
random.seed(SEED)
torch.manual_seed(SEED)
if use_cuda:
torch.cuda.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
print("Loading training dataset...")
train_data_frame = pd.read_csv(train_data, sep="\t", header=0,
index_col=0, engine='c', low_memory=True)
train_data_frame = remove_noise(train_data_frame, num_exp_genes,
num_exp_spots, min_gene_expression)
# Load all the classes for the training set
train_labels = parse_labels(train_classes_file, min_class_size)
print("Loading testing dataset...")
test_data_frame = pd.read_csv(test_data, sep="\t", header=0,
index_col=0, engine='c', low_memory=True)
test_data_frame = remove_noise(test_data_frame, num_exp_genes,
num_exp_spots, min_gene_expression)
# Load all the classes for the prediction set
if test_classes_file is not None:
test_labels = parse_labels(test_classes_file, 0)
# Normalize counts
print("Normalizing...")
train_data_frame = normalize_data(train_data_frame, normalization)
test_data_frame = normalize_data(test_data_frame, normalization)
# Keep top genes (variance or expressed)
train_data_frame = keep_top_genes(train_data_frame, num_genes_keep_train / 100.0,
criteria=top_genes_criteria_train)
test_data_frame = keep_top_genes(test_data_frame, num_genes_keep_test / 100.0,
criteria=top_genes_criteria_test)
# Keep only the record in the training set that intersects with the prediction set
print("Genes in training set {}".format(train_data_frame.shape[1]))
print("Spots in training set {}".format(train_data_frame.shape[0]))
print("Genes in testing set {}".format(test_data_frame.shape[1]))
print("Spots in testing set {}".format(test_data_frame.shape[0]))
intersect_genes = np.intersect1d(train_data_frame.columns.values,
test_data_frame.columns.values)
if len(intersect_genes) == 0:
sys.stderr.write("Error, there are no genes intersecting the train and test datasets\n")
sys.exit(1)
print("Intersected genes {}".format(len(intersect_genes)))
train_data_frame = train_data_frame.loc[:, intersect_genes]
test_data_frame = test_data_frame.loc[:, intersect_genes]
# Log the counts
if log_scale:
print("Transforming datasets to log space...")
train_data_frame = np.log1p(train_data_frame)
test_data_frame = np.log1p(test_data_frame)
# Apply the z-transformation
if standard_transformation:
print("Applying standard transformation...")
train_data_frame = ztransformation(train_data_frame)
test_data_frame = ztransformation(test_data_frame)
# Sort labels data together
shared_spots = np.intersect1d(train_data_frame.index, train_labels.index)
train_data_frame = train_data_frame.loc[shared_spots, :]
train_labels = np.asarray(train_labels.loc[shared_spots, ["cluster"]]).ravel()
if test_classes_file:
shared_spots = np.intersect1d(test_data_frame.index, test_labels.index)
test_data_frame = test_data_frame.loc[shared_spots, :]
test_labels = np.asarray(test_labels.loc[shared_spots, ["cluster"]]).ravel()
# Split train and test datasets
print("Validation set ratio {}\nTest set ratio {}".format(train_validation_ratio, train_test_ratio))
train_counts_x, vali_countx_x, test_counts_x, \
train_labels_y, vali_labels_y, test_labels_y = split_dataset(train_data_frame,
train_labels,
train_validation_ratio,
train_test_ratio,
min_class_size)
# Update labels so to ensure they go from 0-N sequentially
labels_index_map = dict()
index_label_map = dict()
for i, label in enumerate(sorted(set(train_labels_y + vali_labels_y + test_labels_y))):
labels_index_map[label] = i
index_label_map[i] = label
print("Mapping of labels:")
print(index_label_map)
train_labels_y = [labels_index_map[x] for x in train_labels_y]
vali_labels_y = [labels_index_map[x] for x in vali_labels_y]
test_labels_y = [labels_index_map[x] for x in test_labels_y]
print("Training set {}".format(train_counts_x.shape[0]))
print("Validation set {}".format(vali_countx_x.shape[0]))
print("Test set {}".format(test_counts_x.shape[0]))
# PyTorch needs floats
train_counts = train_counts_x.astype(np.float32).values
vali_counts = vali_countx_x.astype(np.float32).values
test_counts = test_counts_x.astype(np.float32).values
# Input and output sizes
n_feature = train_counts.shape[1]
n_ele_train = train_counts.shape[0]
n_ele_test = vali_counts.shape[0]
n_class = max(set(train_labels_y)) + 1
print("CUDA Available: ", torch.cuda.is_available())
device = torch.device("cuda" if use_cuda and torch.cuda.is_available() else "cpu")
workers = 0 if platform.system() == "Windows" else multiprocessing.cpu_count() - 1
print("Workers {}".format(workers))
kwargs = {'num_workers': workers, 'pin_memory': use_cuda}
# Create Tensor Flow train dataset
X_train = torch.tensor(train_counts)
X_vali = torch.tensor(vali_counts)
X_test = torch.tensor(test_counts)
y_train = torch.from_numpy(np.asarray(train_labels_y, dtype=np.longlong))
y_vali = torch.from_numpy(np.asarray(vali_labels_y, dtype=np.longlong))
y_test = torch.from_numpy(np.asarray(test_labels_y, dtype=np.longlong))
# Create tensor datasets (train + test)
trn_set = utils.TensorDataset(X_train, y_train)
vali_set = utils.TensorDataset(X_vali, y_vali)
if stratified_loss:
print("Using a stratified loss...")
# Compute weights
weights_classes = computeWeightsClasses(trn_set)
weights_classes = torch.from_numpy(weights_classes).float().to(device)
else:
weights_classes = None
# Creating loss
loss_func = nn.CrossEntropyLoss(weight=weights_classes, reduction="mean")
# Create Samplers
if stratified_sampler:
print("Using a stratified sampler for training set...")
weights_train = computeWeights(trn_set, n_class)
weights_train = torch.from_numpy(weights_train).float().to(device)
trn_sampler = utils.sampler.WeightedRandomSampler(weights_train,
len(weights_train),
replacement=False)
else:
trn_sampler = None
vali_sampler = None
learning_rates = [learning_rate] if not grid_search else SEARCH_LR
batch_sizes = [(train_batch_size, validation_batch_size)] if not grid_search else SEARCH_BATCH
hidden_sizes = [(hidden_layer_one, hidden_layer_two)] if not grid_search else SEARCH_HL
l2s = [l2] if not grid_search else L2
best_model = dict()
best_acc = 0
best_lr = 0
best_bs = (0, 0)
best_h = (0, 0)
best_l2 = 0
PATIENCE = 20
for lr in learning_rates:
for l2 in l2s:
for (trn_bs, vali_bs) in batch_sizes:
for (h1, h2) in hidden_sizes:
# Create model
model = create_model(n_feature, n_class, h1, h2, activation_function)
model = model.to(device)
# Create optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2)
# Create loaders
trn_loader, vali_loader = create_loaders(trn_set, vali_set,
trn_bs, vali_bs,
trn_sampler, vali_sampler,
not stratified_sampler, False,
kwargs)
# Train the model
best_local_loss = 10e6
best_local_acc = 0
counter = 0
best_model_local = dict()
if grid_search:
print("Training model with:\n L2 {}\n learning rate {}\n train batch size {}\n "
"test batch size {}\n hidden layer one {}\n hidden layer two {}".format(l2, lr, trn_bs,
vali_bs, h1, h2))
for epoch in range(epochs):
if verbose:
print('Epoch: {}'.format(epoch))
# Training
avg_train_loss, avg_training_acc = train(model, trn_loader, optimizer, loss_func, device)
# Test the model on the validation set
preds, avg_vali_loss = test(model, vali_loader, loss_func, device)
avg_vali_acc = accuracy_score(y_vali.cpu().numpy(), preds)
if verbose:
print("Training set accuracy {}".format(avg_training_acc))
print("Training set loss (avg) {}".format(avg_train_loss))
print("Validation set accuracy {}".format(avg_vali_acc))
print("Validation set loss (avg) {}".format(avg_vali_loss))
# Keep the parameters of the epoch that gives the best loss/accuracy
if avg_vali_loss < best_local_loss:
best_local_acc = avg_vali_acc
best_local_loss = avg_vali_loss
best_model_local = copy.deepcopy(model.state_dict())
counter = 0
else:
counter += 1
# Early out
if counter >= PATIENCE:
print("Early stopping at epoch {}".format(epoch))
break
# Test the model on the test set
model.load_state_dict(best_model_local)
_, preds = predict(model, X_test, device)
test_acc = accuracy_score(y_test.cpu().numpy(), preds.cpu().numpy())
# Check the results to keep the best model
print("Best training accuracy {} and loss (avg.) {}".format(best_local_acc, best_local_loss))
print("Testing accuracy {}".format(test_acc))
if test_acc > best_acc:
best_acc = test_acc
best_model = copy.deepcopy(best_model_local)
best_lr = lr
best_bs = (trn_bs, vali_bs)
best_h = (h1, h2)
best_l2 = l2
print("Model trained!")
print("Activation function {}".format(activation_function))
print("Cross entropy loss")
print("ADAM optimizer with {} L2".format(best_l2))
print("Learning rate {}".format(best_lr))
print("Train batch size {}".format(best_bs[0]))
print("Validation batch size {}".format(best_bs[1]))
print("Hidden layer one {}".format(best_h[0]))
print("Hidden layer two {}".format(best_h[1]))
print("Model accuracy {}".format(best_acc))
# Load and save best model
model = create_model(n_feature, n_class, best_h[0], best_h[1], activation_function)
model = model.to(device)
model.load_state_dict(best_model)
torch.save(model, os.path.join(outdir, "model.pt"))
# Predict
print("Predicting on test data..")
predict_counts = test_data_frame.astype(np.float32).values
test_index = test_data_frame.index
X_pre = torch.tensor(predict_counts)
y_pre = test_labels if test_classes_file is not None else None
out, preds = predict(model, X_pre, device)
# Map labels back to their original value
preds = [index_label_map[np.asscalar(x)] for x in preds.cpu().numpy()]
if y_pre is not None:
print("Classification report\n{}".
format(classification_report(y_pre, preds)))
print("Confusion matrix:\n{}".format(confusion_matrix(y_pre, preds)))
with open(os.path.join(outdir, "predicted_classes.tsv"), "w") as filehandler:
for spot, pred, probs in zip(test_index, preds, out.cpu().numpy()):
filehandler.write("{0}\t{1}\t{2}\n".format(spot, pred,
"\t".join(['{:.6f}'.format(x) for x in probs])))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--train-data", required=True, type=str,
help="Path to the input training dataset (matrix of counts, spots as rows)")
parser.add_argument("--test-data", required=True, type=str,
help="Path to the test dataset (to be predicted) (matrix of counts, spots as rows)")
parser.add_argument("--train-classes", required=True, type=str,
help="Path to the training classes file (SPOT LABEL)")
parser.add_argument("--test-classes", required=False, type=str,
help="Path to the test classes file (SPOT LABEL)")
parser.add_argument("--log-scale", action="store_true", default=False,
help="Convert the training and test sets to log space (log2 + 1)")
parser.add_argument("--standard-transformation", action="store_true", default=False,
help="Apply the standard transformation to each gene on the train and test sets")
parser.add_argument("--normalization", default="RAW", metavar="[STR]",
type=str,
choices=["RAW", "REL", "CPM"],
help="Normalize the counts using:\n"
"RAW = absolute counts\n"
"REL = Each gene count divided by the total count of its spot\n"
"CPM = Each gene count divided by the total count of its spot multiplied by its mean\n"
"(default: %(default)s)")
parser.add_argument("--train-batch-size", type=int, default=200, metavar="[INT]",
help="The input batch size for training (default: %(default)s)")
parser.add_argument("--validation-batch-size", type=int, default=200, metavar="[INT]",
help="The input batch size for validation (default: %(default)s)")
parser.add_argument("--epochs", type=int, default=50, metavar="[INT]",
help="The number of epochs to train (default: %(default)s)")
parser.add_argument("--hidden-layer-one", type=int, default=2000, metavar="[INT]",
help="The number of neurons in the first hidden layer (default: %(default)s)")
parser.add_argument("--hidden-layer-two", type=int, default=1000, metavar="[INT]",
help="The number of neurons in the second hidden layer (default: %(default)s)")
parser.add_argument("--train-validation-ratio", type=float, default=0.2, metavar="[FLOAT]",
help="The percentage of the training set that will be used to validate"
"the model during training (default: %(default)s)")
parser.add_argument("--train-test-ratio", type=float, default=0.2, metavar="[FLOAT]",
help="The percentage of the training set that will be used to test"
"the model after training (default: %(default)s)")
parser.add_argument("--learning-rate", type=float, default=0.001, metavar="[FLOAT]",
help="The learning rate for the Adam optimizer (default: %(default)s)")
parser.add_argument("--l2", type=float, default=0.0, metavar="[FLOAT]",
help="The L2 penalty regularization for the ADAM optimizer (default: %(default)s)")
parser.add_argument("--activation-function", default="RELU", metavar="[STR]",
type=str,
choices=["RELU", "TANH", "SELU"],
help="Activation function to be used in the hidden layers:\n"
"RELU = rectified linear unit \n"
"TANH = hyperbolic tangent\n"
"SELU = self normalizing linear unit\n"
"(default: %(default)s)")
parser.add_argument("--use-cuda", action="store_true", default=False,
help="Whether to use CUDA (GPU computation)")
parser.add_argument("--stratified-sampler", action="store_true", default=False,
help="Draw samples with equal probabilities when training")
parser.add_argument("--stratified-loss", action="store_true", default=False,
help="Penalizes more small classes in the loss")
parser.add_argument("--min-class-size", type=int, default=10, metavar="[INT]",
help="The minimum number of elements a class must has in the"
"training set (default: %(default)s)")
parser.add_argument("--verbose", action="store_true", default=False,
help="Whether to show extra messages")
parser.add_argument("--grid-search", action="store_true", default=False,
help="Perform a grid search to find the most optimal hyper parameters")
parser.add_argument("--outdir", help="Path to output directory")
parser.add_argument("--num-exp-genes", default=0.01, metavar="[FLOAT]", type=float,
help="The percentage of number of expressed genes (>= --min-gene-expression) a spot\n"
"must have to be kept from the distribution of all expressed genes (0.0 - 1.0) (default: %(default)s)")
parser.add_argument("--num-exp-spots", default=0.01, metavar="[FLOAT]", type=float,
help="The percentage of number of expressed spots (>= --min-gene-expression) a gene\n"
"must have to be kept from the total number of spots (0.0 - 1.0) (default: %(default)s)")
parser.add_argument("--min-gene-expression", default=1, type=float, metavar="[FLOAT]",
help="The minimum count a gene must have in a spot to be\n"
"considered expressed when filtering (default: %(default)s)")
parser.add_argument("--num-genes-keep-train", default=50, metavar="[INT]", type=int, choices=range(0, 99),
help="The percentage of genes to discard from the distribution of all the genes\n"
"across all the spots using the variance or the top highest expressed\n"
"(see --top-genes-criteria-train)\n "
"Low variance or low expressed genes will be discarded (default: %(default)s)")
parser.add_argument("--num-genes-keep-test", default=50, metavar="[INT]", type=int, choices=range(0, 99),
help="The percentage of genes to discard from the distribution of all the genes\n"
"across all the spots using the variance or the top highest expressed\n"
"(see --top-genes-criteria-test)\n "
"Low variance or low expressed genes will be discarded (default: %(default)s)")
parser.add_argument("--top-genes-criteria-train", default="Variance", metavar="[STR]",
type=str, choices=["Variance", "TopRanked"],
help="What criteria to use to reduce the number of genes "
"(Variance or TopRanked) (default: %(default)s)")
parser.add_argument("--top-genes-criteria-test", default="Variance", metavar="[STR]",
type=str, choices=["Variance", "TopRanked"],
help="What criteria to use to reduce the number of genes "
"(Variance or TopRanked) (default: %(default)s)")
args = parser.parse_args()
main(args.train_data,
args.test_data,
args.train_classes,
args.test_classes,
args.log_scale,
args.normalization,
args.stratified_loss,
args.outdir,
args.standard_transformation,
args.train_batch_size,
args.validation_batch_size,
args.epochs,
args.learning_rate,
args.stratified_sampler,
args.min_class_size,
args.use_cuda,
args.num_exp_genes,
args.num_exp_spots,
args.min_gene_expression,
args.verbose,
args.hidden_layer_one,
args.hidden_layer_two,
args.train_validation_ratio,
args.train_test_ratio,
args.grid_search,
args.activation_function,
args.l2,
args.num_genes_keep_train,
args.num_genes_keep_test,
args.top_genes_criteria_train,
args.top_genes_criteria_test)
```
#### File: st_analysis/scripts/unsupervised.py
```python
import argparse
import sys
import os
from sklearn.decomposition import PCA, FastICA, SparsePCA, FactorAnalysis, NMF
from sklearn.cluster import DBSCAN, KMeans, AgglomerativeClustering
from sklearn.mixture import GaussianMixture
from sklearn.manifold import TSNE
import umap
from stanalysis.visualization import scatter_plot, scatter_plot3d
from stanalysis.preprocessing import *
def main(counts_table_files,
normalization,
num_clusters,
num_exp_genes,
num_exp_spots,
min_gene_expression,
num_genes_discard,
clustering,
dimensionality,
use_log_scale,
num_dimensions,
spot_size,
top_genes_criteria,
outdir,
tsne_perplexity,
tsne_theta,
umap_neighbors,
umap_min_dist,
umap_metric,
tsne_initial_dims,
pca_auto_components,
dbscan_min_size,
dbscan_eps,
SEED):
if len(counts_table_files) == 0 or \
any([not os.path.isfile(f) for f in counts_table_files]):
sys.stderr.write("Error, input file/s not present or invalid format\n")
sys.exit(1)
if num_clusters is None and clustering != "DBSCAN":
sys.stderr.write("Error, num_clusters must be given if clustering algorithm is not DBSCAN\n")
sys.exit(1)
if tsne_theta < 0.0 or tsne_theta > 1.0:
sys.stdout.write("Warning, invalid value for theta. Using default..\n")
tsne_theta = 0.5
if num_exp_genes < 0 or num_exp_spots < 0:
sys.stdout.write("Error, min_exp_genes and min_exp_spots must be >= 0.\n")
sys.exit(1)
if tsne_initial_dims <= num_dimensions and clustering == "tSNE":
sys.stdout.write("Error, number of initial dimensions cannot be <= than the number of dimensions.\n")
sys.exit(1)
if pca_auto_components is not None and (pca_auto_components <= 0.0 or pca_auto_components > 1.0):
sys.stdout.write("Error, pca_auto_components must be > 0 and <= 1.0.\n")
sys.exit(1)
if dbscan_eps <= 0.0:
sys.stdout.write("Warning, invalid value for DBSCAN eps. Using default..\n")
dbscan_eps = 0.5
if num_exp_genes < 0 or num_exp_genes > 1:
sys.stderr.write("Error, invalid number of expressed genes \n")
sys.exit(1)
if num_exp_spots < 0 or num_exp_spots > 1:
sys.stderr.write("Error, invalid number of expressed genes \n")
sys.exit(1)
if num_genes_discard < 0 or num_genes_discard > 1:
sys.stderr.write("Error, invalid number of genes to discard \n")
sys.exit(1)
if outdir is None or not os.path.isdir(outdir):
outdir = os.getcwd()
outdir = os.path.abspath(outdir)
print("Output directory {}".format(outdir))
print("Input datasets {}".format(" ".join(counts_table_files)))
# Merge input datasets (Spots are rows and genes are columns)
counts = aggregate_datatasets(counts_table_files)
print("Total number of spots {}".format(len(counts.index)))
print("Total number of genes {}".format(len(counts.columns)))
# Remove noisy spots and genes (Spots are rows and genes are columns)
counts = remove_noise(counts,
num_exp_genes,
num_exp_spots,
min_expression=min_gene_expression)
if len(counts.index) < 5 or len(counts.columns) < 5:
sys.stdout.write("Error, too many spots/genes were filtered.\n")
sys.exit(1)
# Normalize data
print("Computing per spot normalization...")
norm_counts = normalize_data(counts, normalization)
if use_log_scale:
print("Using pseudo-log counts log1p(counts)")
norm_counts = np.log1p(norm_counts)
# Keep top genes (variance or expressed)
norm_counts = keep_top_genes(norm_counts,
num_genes_discard,
criteria=top_genes_criteria)
if "None" not in dimensionality:
print("Performing dimensionality reduction...")
if "tSNE" in dimensionality:
# First PCA and then TSNE
if norm_counts.shape[1] > tsne_initial_dims:
y = PCA(whiten=True,
n_components=tsne_initial_dims).fit_transform(norm_counts)
else:
y = norm_counts
local_perplexity = min(y.shape[0] / 3.5, tsne_perplexity)
reduced_data = TSNE(n_components=num_dimensions,
angle=tsne_theta,
random_state=SEED,
perplexity=local_perplexity).fit_transform(y)
elif "PCA" in dimensionality:
n_comps = num_dimensions
solver = "auto"
if pca_auto_components is not None:
n_comps = pca_auto_components
solver = "full"
reduced_data = PCA(n_components=n_comps,
svd_solver=solver,
whiten=True,
random_state=SEED,
copy=True).fit_transform(norm_counts)
elif "ICA" in dimensionality:
reduced_data = FastICA(n_components=num_dimensions,
algorithm='parallel',
whiten=True,
fun='logcosh',
w_init=None,
random_state=SEED).fit_transform(norm_counts)
elif "SPCA" in dimensionality:
import multiprocessing
reduced_data = SparsePCA(n_components=num_dimensions,
alpha=1,
random_state=SEED,
n_jobs=multiprocessing.cpu_count() - 1)
elif "FactorAnalysis" in dimensionality:
reduced_data = FactorAnalysis(n_components=num_dimensions,
random_state=SEED).fit_transform(norm_counts)
elif "NMF" in dimensionality:
reduced_data = NMF(n_components=num_dimensions,
init='random',
random_state=SEED).fit_transform(norm_counts)
else:
reduced_data = umap.UMAP(n_neighbors=umap_neighbors,
min_dist=umap_min_dist,
n_components=num_dimensions,
random_state=SEED,
metric=umap_metric).fit_transform(norm_counts)
print("Performing clustering...")
# Do clustering on the dimensionality reduced coordinates
if "KMeans" in clustering:
labels = KMeans(init='k-means++',
n_clusters=num_clusters,
n_init=10).fit_predict(reduced_data)
elif "Hierarchical" in clustering:
labels = AgglomerativeClustering(n_clusters=num_clusters,
affinity='euclidean',
linkage='ward').fit_predict(reduced_data)
elif "DBSCAN" in clustering:
labels = DBSCAN(eps=dbscan_eps,
min_samples=dbscan_min_size,
metric='euclidean',
n_jobs=-1).fit_predict(reduced_data)
else:
gm = GaussianMixture(n_components=num_clusters,
covariance_type='full').fit(reduced_data)
labels = gm.predict(reduced_data)
# Check if there are -1 in the labels and that the number of labels is correct
if -1 in labels or len(labels) != len(norm_counts.index):
sys.stderr.write("Error, something went wrong in the clustering..\n")
sys.exit(1)
# If cluster 0 sum 1
if 0 in labels:
labels = labels + 1
# Plot the clustered spots with the class color in the reduced space
if num_dimensions == 3:
scatter_plot3d(x_points=reduced_data[:, 0],
y_points=reduced_data[:, 1],
z_points=reduced_data[:, 2],
colors=labels,
output=os.path.join(outdir, "computed_dim_red_3D.pdf"),
title='Computed clusters (color)',
alpha=0.8,
size=spot_size)
with open(os.path.join(outdir, "computed_dim_red_3D.tsv"), "w") as filehandler:
for s, x, y, z, l in zip(norm_counts.index,
reduced_data[:, 0],
reduced_data[:, 1],
reduced_data[:, 2],
labels):
filehandler.write("{}\t{}\t{}\t{}\t{}\n".format(s, x, y, z, l))
else:
scatter_plot(x_points=reduced_data[:, 0],
y_points=reduced_data[:, 1],
colors=labels,
output=os.path.join(outdir, "computed_dim_red_2D.pdf"),
title='Computed clusters (color)',
alpha=0.8,
invert_y=False,
size=spot_size)
with open(os.path.join(outdir, "computed_dim_red_2D.tsv"), "w") as filehandler:
for s, x, y, l in zip(norm_counts.index,
reduced_data[:, 0],
reduced_data[:, 1],
labels):
filehandler.write("{}\t{}\t{}\t{}\n".format(s, x, y, l))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--counts", required=True, nargs='+', type=str,
help="One or more matrices with gene counts per spot (genes as columns)")
parser.add_argument("--normalization", default="RAW",
type=str,
choices=["RAW", "REL", "CPM"],
help="Normalize the counts using:\n"
"RAW = absolute counts\n"
"REL = Each gene count divided by the total count of its spot\n"
"CPM = Each gene count divided by the total count of its spot multiplied by its mean\n"
"(default: %(default)s)")
parser.add_argument("--num-clusters", default=None, metavar="[INT]", type=int, choices=range(2, 30),
help="The number of clusters expected to find.\n"
"Note that this parameter has no effect with DBSCAN clustering.")
parser.add_argument("--num-exp-genes", default=0.01, metavar="[FLOAT]", type=float,
help="The percentage of number of expressed genes (>= --min-gene-expression) a spot\n"
"must have to be kept from the distribution of all expressed genes (0.0 - 1.0) (default: %(default)s)")
parser.add_argument("--num-exp-spots", default=0.01, metavar="[FLOAT]", type=float,
help="The percentage of number of expressed spots a gene\n"
"must have to be kept from the total number of spots (0.0 - 1.0) (default: %(default)s)")
parser.add_argument("--min-gene-expression", default=1, type=int, metavar="[INT]", choices=range(1, 50),
help="The minimum count (number of reads) a gene must have in a spot to be\n"
"considered expressed (default: %(default)s)")
parser.add_argument("--num-genes-discard", default=0.5, metavar="[FLOAT]", type=float,
help="The percentage of genes (0.0 - 1.0) to discard from the distribution of all the genes\n"
"across all the spots using the variance or the top highest expressed\n"
"(see --top-genes-criteria)\n "
"Low variance or lowly expressed will be discarded (default: %(default)s)")
parser.add_argument("--clustering", default="KMeans",
type=str, choices=["Hierarchical", "KMeans", "DBSCAN", "Gaussian"],
help="What clustering algorithm to use after the dimensionality reduction:\n"
"Hierarchical = Hierarchical clustering (Ward)\n"
"KMeans = Suitable for small number of clusters\n"
"DBSCAN = Number of clusters will be automatically inferred\n"
"Gaussian = Gaussian Mixtures Model\n"
"(default: %(default)s)")
parser.add_argument("--dimensionality", default="tSNE",
type=str, choices=["None", "tSNE", "PCA", "ICA", "SPCA", "FactorAnalysis", "NMF", "UMAP"],
help="What dimensionality reduction algorithm to use before the clustering:\n"
"None = no dimensionality reduction\n"
"tSNE = t-distributed stochastic neighbor embedding\n"
"PCA = Principal component analysis\n"
"ICA = Independent component analysis\n"
"SPCA = Sparse principal component analysis\n"
"FactorAnalysis = Linear model with Gaussian latent variables\n"
"NMF = Non Negative Matrix Factorization\n"
"UMAP = Uniform Manifold Approximation and Projection\n"
"(default: %(default)s)")
parser.add_argument("--use-log-scale", action="store_true", default=False,
help="Transform the counts to log2(counts + 1) after normalization")
parser.add_argument("--num-dimensions", default=2, metavar="[INT]", type=int,
help="The number of dimensions to use in the dimensionality reduction. (default: %(default)s)")
parser.add_argument("--spot-size", default=4, metavar="[INT]", type=int,
help="The size of the dots when generating the scatter plots. (default: %(default)s)")
parser.add_argument("--top-genes-criteria", default="Variance", metavar="[STR]",
type=str, choices=["Variance", "TopRanked"],
help="What criteria to use to keep top genes before doing\n"
"the dimensionality reduction (Variance or TopRanked) (default: %(default)s)")
parser.add_argument("--tsne-perplexity", default=30, metavar="[INT]", type=int,
help="The value of the perplexity for the t-SNE method. (default: %(default)s)")
parser.add_argument("--tsne-theta", default=0.5, metavar="[FLOAT]", type=float,
help="The value of theta for the t-SNE method. (default: %(default)s)")
parser.add_argument("--umap-neighbors", default=15, metavar="[INT]", type=int,
help="The number of neighboring points used in local approximations of "
"manifold structure (UMAP) (default: %(default)s)")
parser.add_argument("--umap-min-dist", default=0.1, metavar="[FLOAT]", type=float,
help="This controls how tightly the embedding is allowed to compress "
"points together (UMAP) (default: %(default)s)")
parser.add_argument("--umap-metric", default="euclidean", metavar="[STR]", type=str,
help="This controls how the distance is computed in the ambient space "
"of the input data (UMAP) (default: %(default)s)")
parser.add_argument("--tsne-initial-dims", default=50, metavar="[INT]", type=int,
help="The number of initial dimensions of the PCA step in "
"the t-SNE clustering. (default: %(default)s)")
parser.add_argument("--outdir", default=None, help="Path to output dir")
parser.add_argument("--pca-auto-components", default=None, metavar="[FLOAT]", type=float,
help="For the PCA dimensionality reduction the number of dimensions\n"
"are computed so to include the percentage of variance given as input [0.1-1].")
parser.add_argument("--dbscan-min-size", default=5, metavar="[INT]", type=int,
help="The value of the minimum cluster sizer for DBSCAN. (default: %(default)s)")
parser.add_argument("--dbscan-eps", default=0.5, metavar="[FLOAT]", type=float,
help="The value of the EPS parameter for DBSCAN. (default: %(default)s)")
parser.add_argument("--seed", default=999, metavar="[INT]", type=int,
help="The value of the random seed. (default: %(default)s)")
args = parser.parse_args()
main(args.counts,
args.normalization,
args.num_clusters,
args.num_exp_genes,
args.num_exp_spots,
args.min_gene_expression,
args.num_genes_discard,
args.clustering,
args.dimensionality,
args.use_log_scale,
args.num_dimensions,
args.spot_size,
args.top_genes_criteria,
args.outdir,
args.tsne_perplexity,
args.tsne_theta,
args.umap_neighbors,
args.umap_min_dist,
args.umap_metric,
args.tsne_initial_dims,
args.pca_auto_components,
args.dbscan_min_size,
args.dbscan_eps,
args.seed)
```
|
{
"source": "jfnavarro/stereoscope",
"score": 2
}
|
#### File: stereoscope/stsc/fit.py
```python
import sys
from os import mkdir
import os.path as osp
from typing import NoReturn, Union, Dict
import torch as t
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
import stsc.models as M
import stsc.datasets as D
import stsc.utils as utils
def fit(model: Union[M.ScModel, M.STModel],
dataset: D.CountData,
loss_tracker: utils.LossTracker,
device: t.device,
epochs: int,
learning_rate: float,
batch_size: int = None,
silent_mode: bool = False,
**kwargs
) -> None:
"""Fit Model
Generic function to fit models
Parameter:
---------
model : Union[M.ScModel,M.STModel],
model to be fitted
dataset : D.CountData
CountData dataset representing either
single cell or ST data
loss_tracker : utils.LossTracker
LossTracker object
epochs : int
number of epochs to run
learning_rate :
learning rate during optimization
batch_size : int
batch size, if none is provided
no batching will occur. Recommended
to use when utilizing GPU resources
silent_mode : bool
whether to use silent mode or not
Returns:
-------
Loss progression througout
optimization
"""
# move model to device
model.to(device)
# define optimizer
optim = t.optim.Adam(model.parameters(), lr=learning_rate)
# instantiate progressbar
progressBar = utils.SimpleProgressBar(epochs,
silent_mode=silent_mode,
length=20
)
# use full dataset if no batch size is specified
if batch_size is None:
batch_size = dataset.M
else:
batch_size = int(np.min((batch_size, dataset.M)))
dataloader = DataLoader(dataset,
batch_size=batch_size,
shuffle=False,
)
# Use try/except to catch SIGINT
# for early interruption
try:
for epoch in range(epochs):
epoch_loss = 0.0
for batch in dataloader:
# move batch items to device
for k, v in batch.items():
batch[k] = v.to(device)
batch['x'].requires_grad = True
# reset gradients
optim.zero_grad()
# compute loss
loss = model.forward(**batch)
epoch_loss += loss.item()
# compute gradients
loss.backward()
# update parameters based on gradients
optim.step()
# update progress bar
progressBar(epoch, epoch_loss)
# record loss progression
loss_tracker(epoch_loss, epoch)
# newline after completion
print('\n')
# write final loss
loss_tracker.write_history()
except KeyboardInterrupt:
print('\n\nPress Ctrl+C again to interrupt whole process')
def fit_st_data(st_data: D.CountData,
R: pd.DataFrame,
logits: pd.DataFrame,
loss_tracker: utils.LossTracker,
device: t.device,
st_epochs: int,
learning_rate: float,
st_batch_size: int,
silent_mode: bool = False,
st_from_model: str = None,
keep_noise: bool = False,
**kwargs
) -> Dict[str, Union[pd.DataFrame, M.STModel]]:
"""Fit ST Data model
Estimate proportion values for
ST data
Parameter:
---------
st_data : D.CountData
CountData object contining ST Data
R :pd.DataFrame
rates for each gene and celltype [n_genes x n_types]
logits : pd.DataFrame
logits for each gene [n_genes]
loss_tracker : utils.LossTracker
device : t.device
device to which objects should be
moved during optimization
st_epochs : int
number of epochs
learning_rate : float
learning rate during optimization
st_batch_size : int
batch_size, if non provided no
batching will occur
silent_mode : bool
run in silent mode. Default False.
st_from_model : str
path to pre-fitted st-model state dict.
Should be a '.pt' object
keep_noise : bool
keep dummy cell type in output
Returns:
-------
Dictionary with estimated proportions
and st model
"""
# get intersection between ST data
# and single cell parameters
inter = st_data.intersect(R.index)
if inter.shape[0] < 1:
print("[ERROR] : No genes overlap in SC and"
" ST data. Exiting.",
file=sys.stderr
)
sys.exit(-1)
R = R.loc[inter, :]
logits = logits.loc[inter, :]
t.manual_seed(1337)
# generate ST model
st_model = M.STModel(st_data.M,
R=R.values,
logits=logits.values,
device=device,
freeze_beta=kwargs.get("freeze_beta", False)
)
# load st model from path if provided
if st_from_model is not None:
try:
st_model.load_state_dict(t.load(st_from_model))
except:
print('Could not load state dict from >> {}'.format(st_from_model),
file=sys.stderr
)
# estimate proportion values
fit(dataset=st_data,
model=st_model,
loss_tracker=loss_tracker,
device=device,
epochs=st_epochs,
learning_rate=learning_rate,
batch_size=st_batch_size,
silent_mode=silent_mode
)
# get estimated unadjusted proportions
W = st_model.v.data.cpu().numpy().T
# remove dummy cell type proportion values
if not keep_noise:
W = W[:, 0:st_model.K]
w_columns = R.columns
else:
w_columns = R.columns.append(pd.Index(["noise"]))
# normalize to obtain adjusted proportions
W = W / W.sum(axis=1).reshape(-1, 1)
# generate pandas DataFrame from proportions
W = pd.DataFrame(W,
index=st_data.index,
columns=w_columns
)
return {'proportions': W,
'model': st_model
}
def fit_sc_data(sc_data: D.CountData,
loss_tracker: utils.LossTracker,
device: t.device,
sc_epochs: int,
sc_batch_size: int,
learning_rate: float,
silent_mode: bool = False,
sc_from_model: str = None,
**kwargs
) -> Dict[str, Union[pd.DataFrame, M.ScModel]]:
"""Fit single cell data
sc_data : D.CountData
CountData Object containing
single cell data
loss_tracker : utils.LossTracker
device : t.device
device to which objects should be
moved during optimization
sc_epochs : int
number of epochs
learning_rate : float
learning rate during optimization
sc_batch_size : int
batch_size, if non provided no
batching will occur
silent_mode : bool
run in silent mode. Default False.
sc_from_model : str,
path to pre-fitted st-model state dict.
Should be a '.pt' object
Returns:
-------
Dictionary with estimated rates,
logits and sc model
"""
t.manual_seed(1337)
# define single cell model
sc_model = M.ScModel(n_genes=sc_data.G,
n_celltypes=sc_data.Z,
device=device
)
# load sc-model if provided
if sc_from_model is not None and osp.exists(sc_from_model):
sc_model.load_state_dict(t.load(sc_from_model))
# fit single cell parameters
fit(dataset=sc_data,
model=sc_model,
loss_tracker=loss_tracker,
device=device,
epochs=sc_epochs,
learning_rate=learning_rate,
batch_size=sc_batch_size,
silent_mode=silent_mode
)
# retrieve estimated parameter values
logits = sc_model.o.data.cpu().numpy()
R = sc_model.R.data.cpu().numpy()
# get cell type names
typenames = sc_data.unique_labels()
# generate data frames for parameters
R = pd.DataFrame(R,
index=sc_data.genes,
columns=typenames
)
logits = pd.DataFrame(logits,
index=sc_data.genes,
columns=pd.Index(['logits']))
return {'rates': R,
'logits': logits,
'model': sc_model
}
```
|
{
"source": "jfnavarro/st_misc",
"score": 3
}
|
#### File: old_unsorted/misc_rel_0.1/checkResultIndexes.py
```python
import sys
import os
def usage():
print "Usage:"
print "python checkResultIndexes.py testResults.fastq mismatches_out.txt"
def main(result, mismatches_out):
res = open(result)
mismatches = open(mismatches_out, "w")
wrongMappings = 0
missedMappings = 0
mismatch_format = "{}\n" \
"Quality: {}\n" \
"Observed: {}\n" \
"Wrong: {}\n" \
"Correct: {}\n" \
"Shift: {} -> {}\n\n"
while True:
name = res.readline().rstrip()
if not name:
break
seq = res.readline().rstrip()
#drop 2 more
optional = res.readline().rstrip()
qual = res.readline()
try:
foundId = optional.split(' ')[1].split('=')[1]
except IndexError:
missedMappings += 1
continue
correct_id, c_x, c_y = name.split("\t")[1:4]
if foundId != correct_id:
wrongMappings += 1
o_x, o_y = optional.split("\t")[1:]
mismatches.write(mismatch_format.format(seq,
qual[10:10 + len(correct_id)],
seq[10:10 + len(correct_id)],
foundId,
correct_id,
(c_x, c_y),
(o_x, o_y)))
res.close()
mismatches.close()
print 'Wrong: ' + str(wrongMappings)
print 'Missed: ' + str(missedMappings)
if __name__ == "__main__":
#Should have three inputs.
if len(sys.argv) != 3:
usage()
sys.exit(1)
main(sys.argv[1], sys.argv[2])
```
#### File: old_unsorted/misc_rel_0.1/compareJSONfiles.py
```python
import getopt
import sys
import json
import csv
def usage():
print 'python compareJSONfiles.py input1.json input2.json'
def stripExtension(string):
f = string.rsplit('.', 1)
'''
#this part is taken from Jose to create the intermideary files into the
#working folder
if(f[0].find("/") != -1):
return f[0].rsplit('/', 1)[1]
else:
return f[0]
'''
return f[0]
def sortList(FileToSort):
File_sort_out = stripExtension(FileToSort)+ "_sort.json"
list_sort = []
filea = open(FileToSort, 'r')
#put lines into list
for line in filea:
list_sort.append((line))
filea.close()
#sort list
list_sort.sort()
#open a new file and save the sorted lines to that file
fout=open(File_sort_out,"w")
for item in list_sort:
fout.writelines(item)
print "Done Sorting"
return File_sort_out
def compareTranscripts(json_file1,json_file2):
#Store the transcripts which do not match with the new pipeline output
# to out_diff_old and out_diff_new
out_diff_json1 = stripExtension(json_file1)+ "_diff.txt"
out_diff_json2 = stripExtension(json_file2)+ "_diff.txt"
#matches transcripts from old_sort and new_sort into out_sim
out_sim = stripExtension(json_file1)+ "_sim.txt"
#statistics about the how many matches or not
out_stat = stripExtension(json_file1)+ "_stat.txt"
#input files
inF1 = file(json_file1, "r")
inF2 = file(json_file2, "r")
#out Files
out_dif_o = file(out_diff_json1, "w")
out_dif_n = file(out_diff_json2, "w")
out_s = file(out_sim, "w")
outF_stat = file(out_stat,"w")
num_diff=0
num_siml=0
while(True):
str1_old_sort = inF1.readline()
str2_new_sort = inF2.readline()
if str1_old_sort!=str2_new_sort:
num_diff=num_diff+1
out_dif_n.writelines(str2_new_sort)
out_dif_o.writelines(str1_old_sort)
if str1_old_sort==str2_new_sort:
out_s.writelines(str2_new_sort)
num_siml=num_siml+1
if not str1_old_sort:
break;
outF_stat.write("Diff : "+str(num_diff))
outF_stat.write("\nSim : "+str(num_siml))
print "Diff:",num_diff
print "Siml:",num_siml
print "Done Comparing"
def get_IdGeXYNr_column(file_sort):
first_col=[]
s_file=open(file_sort,"r")
IdGeXYNr_cols = stripExtension(file_sort)+ "_IdGeXYNr.txt"
inf = open(IdGeXYNr_cols,"w")
for stringNotMatch in s_file:
first_col.append(stringNotMatch.split(',')[0].strip())
c=first_col[0].rstrip().split('{')[1].strip()
inf.writelines(first_col[0].rstrip().split('{')[1].strip()+"\t")
#take id, gene , x and y and Nr of gene
for i in range(1,5):
inf.writelines(stringNotMatch.rstrip().split(',')[i].strip()+"\t")
inf.writelines("\n")
#reset first_col
for item in first_col:
first_col.pop()
return IdGeXYNr_cols
def main(json_file1,json_file2):
#sort two json files
json_file_sort1=sortList(json_file1)
json_file_sort2=sortList(json_file2)
#get Id , gene , Nr of gene , x and y cordinates from sorted JSON1
json_col1 =get_IdGeXYNr_column(json_file_sort1)
#get Id , gene , Nr of gene , x and y cordinates from sorted JSON2
json_col2 =get_IdGeXYNr_column(json_file_sort2)
#compare two json files
compareTranscripts(json_col1,json_col2)
print "end comparing"
if __name__ == "__main__":
if len(sys.argv)!=3:
usage()
sys.exit(1)
main(sys.argv[1],sys.argv[2])
```
#### File: old_unsorted/misc_rel_0.1/countCoughtTranscripts.py
```python
import sys
import numpy
import os
def usage():
print "Usage:"
print "python countCoughtTranscripts.py mapped_with_gene.txt"
def getAllMappedReads(mapWithGeneFile):
nameToGene = dict()
exonHit = dict()
exonGeneDict = dict()
utrHit = dict()
GeneDict = dict()
c = 0
total = 0
inF = open(mapWithGeneFile,'r')
insertSize = []
for line in inF:
cols = line.rstrip().split('\t')
start = int(cols[1])
end = int(cols[2])
if cols[18] != '.':
#print "Gene: " + cols[18]
total += 1
isThere = False
if '@'+cols[3] in nameToGene:
isThere = True
nameToGene['@'+cols[3]] = cols[18]
if cols[18] in GeneDict:
if not isThere:
GeneDict[cols[18]] += 1
else:
if not isThere:
GeneDict[cols[18]] = 1
#Is it actually exonic?
gStart = int(cols[10])
gEnd = int(cols[11])
starts = cols[15].split(',')
ends = cols[16].split(',')
#Check if UTR hit!
#5'
overlap_5 = max(0, min(end, int(starts[0])) - max(start, int(gStart)))
#3'
overlap_3 = max(0, min(end, int(gEnd)) - max(start, int(ends[-1-1])))
if overlap_5 > 0 or overlap_3 > 0:
utrHit['@'+cols[3]] = 'Yes'
else:
for i in range(len(starts)-1):
overlap = max(0, min(end, int(ends[i])) - max(start, int(starts[i])))
if overlap > 0:
if cols[18] in exonGeneDict:
exonGeneDict[cols[18]] += 1
else:
exonGeneDict[cols[18]] = 1
intronLen = 0;
if i != 0 and i != len(starts)-2:
if gEnd-end < start-gStart:
#Closer to the right!
for j in range(len(starts)-2,i,-1):
intronLen += int(starts[j])-int(ends[j-1])
else:
#Closer to the left
for j in range(i):
intronLen += int(starts[j+1])-int(ends[j])
dist = min(gEnd-end,start-gStart)-intronLen
if '@'+cols[3] in exonHit:
exonHit['@'+cols[3]] = min(dist,exonHit['@'+cols[3]])
else:
exonHit['@'+cols[3]] = dist
inF.close()
print "Total hitting exon: " + str(len(exonHit.keys()))
for k in exonHit.keys():
insertSize.append(exonHit[k])
# print k +" " +str(exonHit[k])
print "Mean distance to end of gene: " + str(numpy.mean(insertSize))
print "Median distance to end of gene: " + str(numpy.median(insertSize))
print "Total hitting UTR's: " + str(len(utrHit.keys()))
genesOut = open("exon_genes.out","w")
for k in exonGeneDict.keys():
genesOut.write("%s\t%d\n"%(k,exonGeneDict[k]))
genesOut.close()
allgenesOut = open("all_genes.out","w")
for k in GeneDict.keys():
allgenesOut.write("%s\t%d\n"%(k,GeneDict[k]))
allgenesOut.close()
print "Total different genes: " + str(len(GeneDict.keys()))
print "Total diff genes hit on exon: " +str(len(exonGeneDict.keys()))
return nameToGene
def main(mapWithGene):
#Find all mapped!
nameToGene = getAllMappedReads(mapWithGene)
print "Total hits: " + str(len(nameToGene.keys()))
if __name__ == "__main__":
#Should have two inputs.
if len(sys.argv) != 2:
usage()
sys.exit(1)
main(sys.argv[1])
```
#### File: st_feature_classification/st_feature_classification/util.py
```python
from collections import Counter
from itertools import permutations
import random
import numpy as np
from pymongo import Connection
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
def merge(x, i):
return (x / i) * i + i / 2 + 1
def load_tissue_df(coll, merge_dist=1):
records = {}
tissue_labels = {}
if merge_dist <= 1:
merger = lambda x: x
else:
merger = lambda x: merge(x, merge_dist)
for doc in coll.find():
loc = tuple(map(merger, doc['loc']))
if loc in records:
records[loc] += Counter(dict(zip(doc['genes'], doc['expression'])))
else:
records[loc] = Counter(dict(zip(doc['genes'], doc['expression'])))
tissue_labels[loc] = doc['tissue']
df = pd.DataFrame.from_records(records.values())
df.index = pd.MultiIndex.from_tuples(zip(tissue_labels.values(), tissue_labels.keys()), names=['tissue', 'feature'])
return df
def feature_class_visualizer(coll):
feature_labels = {}
for doc in coll.find():
feature_labels[tuple(doc['loc'])] = hash(doc['tissue'])
def vis_feature_classes(df, c, s=100):
plt.scatter(*zip(*feature_labels.keys()), c=feature_labels.values(), marker=',', edgecolor='none', cmap=cm.Dark2_r, alpha=0.5);
plt.scatter(*zip(*[i[1] for i in df.index]), c=c, edgecolor='none', s=s, cmap=cm.gist_rainbow, marker='s', alpha=0.66)
plt.ylim(450, 180)
plt.xlim(90, 370);
plt.axis('off');
return vis_feature_classes
def integer_labels(df, equivalence_classes=None):
n2i = {n: i for i, n in enumerate(df.index.levels[0])}
if equivalence_classes:
for e_class in equivalence_classes:
label = min(n2i[e] for e in e_class)
for e in e_class:
n2i[e] = label
relabel = {i[1]: i[0] for i in enumerate(set(n2i.values()))}
for name in n2i.keys():
n2i[name] = relabel[n2i[name]]
y = [n2i[i[0]] for i in df.index]
return np.array(y)
def compare_clusters(clusters, truth):
max_similarity = 0
for p in permutations(set(truth)):
pc = np.array(map(lambda i: p[i], clusters))
similarity = (pc == truth).sum()
if similarity > max_similarity:
max_similarity = similarity
mpc = pc
return float(max_similarity) / len(clusters), mpc
def weighted_choice(choices):
total = sum(w for c, w in choices)
r = random.uniform(0, total)
upto = 0
for c, w in choices:
if upto + w > r:
return c
upto += w
assert False, "Shouldn't get here"
def weighted_random_classes(y, choices):
return np.array([weighted_choice(choices) for _ in range(len(y))])
def weighted_random_score(y):
choices = Counter(y).items()
score = []
for i in range(128):
c = weighted_random_classes(y, choices)
score.append(compare_clusters(c, y)[0])
return np.mean(score)
def split_df(df, r=0.25):
rows = pd.MultiIndex.from_tuples(random.sample(df.index, int(df.shape[0] * r)))
df_train = df.ix[rows]
df_test = df.drop(rows)
y_train = integer_labels(df_train)
return df_train, y_train, df_test
def load_reads_per_feature(csv_file):
reads = np.zeros((500, 500))
for line in open(csv_file):
l = line.strip().split(" ")
x, y = map(int, l[-1].split())
reads[y, x] = int(l[0])
return reads
def merged_reads_per_feature(reads_matrix, size):
merged_reads = np.zeros((600, 600))
for i in range(500):
for j in range(500):
merged_reads[merge(j, size), merge(i, size)] += reads_matrix[i, j]
return merged_reads
```
#### File: st_toolbox/scripts/kmer_read_classifier.py
```python
import argparse
from collections import defaultdict
from itertools import tee, izip
import os
import sys
from st_toolbox.data import readfq
def prc_format(x, pos=0):
return '{:.1%}'.format(x)
def histogram(args):
""" Make a histogram over kmer overlaps for reads over reference, to
evaluate a proper cutoff value.
"""
import matplotlib.pylab as plt
from matplotlib.ticker import FuncFormatter
with open(args.reference) as fh:
fa = readfq(fh)
contams = [t[1] for t in fa]
k = args.k
kmers = set()
for contam in contams:
kmers |= set(str(contam[i:i + k]) for i in xrange(len(contam) - k))
kmers = frozenset(kmers)
fh = open(args.fastq)
fq = readfq(fh)
overlap = defaultdict(int)
for _, seq, _ in fq:
read_kmers = frozenset(seq[i:i + k] for i in xrange(len(seq) - k))
overlap[round(100 * float(len(read_kmers.intersection(kmers))) / len(seq), 0)] += 1
fh.close()
out_file = os.path.basename(args.fastq).replace('.fastq', '_kmer_histogram.png')
keys = sorted(overlap.keys())
nreads = sum(overlap.values())
percentages = [float(v) / nreads for v in overlap.values()]
cumulative = [float(overlap[keys[0]]) / nreads]
for key in keys[1:]:
cumulative.append(cumulative[-1] + float(overlap[key]) / nreads)
plt.bar(overlap.keys(), percentages, ec='none', label='histogram')
plt.plot(keys, cumulative, label='cumulative')
plt.gca().yaxis.set_major_formatter(FuncFormatter(prc_format))
plt.grid(True)
title = \
"""kmer overlap
k = {}
number of reads: {}"""
plt.title(title.format(k, nreads))
plt.xlabel('Overlapment')
plt.ylabel('% of reads')
plt.legend()
plt.tight_layout()
plt.savefig(out_file)
def split(args):
""" Split the given fastq file to two different fastq files depending on
the read being contaminated or not.
"""
with open(args.reference) as fh:
fa = readfq(fh)
contams = [t[1] for t in fa]
k = args.k
cutoff = args.overlap_cutoff
kmers = set()
for contam in contams:
kmers |= set(str(contam[i:i + k]) for i in xrange(len(contam) - k))
kmers = frozenset(kmers)
fh = open(args.fastq)
fq = readfq(fh)
fq_format = '@{header}\n{sequence}\n+\n{quality}\n'
fname = os.path.basename(args.fastq)
contam_fh = open(fname.replace('.fastq', '_contam.fastq'), 'w')
clean_fh = open(fname.replace('.fastq', '_clean.fastq'), 'w')
for head, seq, qual in fq:
read_kmers = frozenset(seq[i:i + k] for i in xrange(len(seq) - k))
overlapment = 100 * float(len(read_kmers.intersection(kmers))) / len(seq)
if overlapment > cutoff:
contam_fh.write(fq_format.format(header=head, sequence=seq, quality=qual))
else:
clean_fh.write(fq_format.format(header=head, sequence=seq, quality=qual))
clean_fh.close()
contam_fh.close()
fh.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('reference', help='reference fasta file')
parser.add_argument('fastq', help='fastq file with reads to classify')
parser.add_argument('--mode', help='script mode, either "histogram" or "split"', \
default='histogram')
parser.add_argument('--k', help='kmer length, default 15', default=15, type=int)
parser.add_argument('--overlap_cutoff', help='kmer overlap cutoff, default 20', \
default=20, type=int)
args = parser.parse_args()
if args.mode == 'histogram':
histogram(args)
elif args.mode == 'split':
split(args)
```
#### File: st_toolbox/st_toolbox/optics.py
```python
import numpy as np
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.metrics import pairwise_distances
def hierarchical_extraction(ordering, reachability_distances, min_cluster_size,
significant_ratio=0.75, similarity_ratio=0.4):
"""
Constructs a tree structure from an OPTICS ordering and a set of
reachability distances and extracts clusters from this structure.
Parameters
----------
ordering : array [n_samples]
Indices of the samples in the order generated by OPTICS.
reachability_distances : array [n_samples]
Reachability distance for each sample.
min_cluster_size : int
The minimum size of a cluster in number of samples.
significant_ratio : float
The ratio for the reachability score of a local maximum
compared to its neighbors to be considered significant.
similarity_ratio : float
The ratio for the reachability score of a split point
compared to the parent split point for it to be considered
similar.
Returns
-------
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
References
----------
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
"Automatic extraction of clusters from hierarchical clustering
representations." Advances in Knowledge Discovery and Data Mining (2003):
567-567.
"""
R = np.asarray([reachability_distances[i] for i in ordering])
n = len(ordering)
# Find local maximas
L = []
for i in xrange(0, min_cluster_size):
if np.argmax(R[0:i + min_cluster_size + 1]) == i:
L.append(i)
if np.argmax(R[n - 2 * min_cluster_size + i:n]) == i:
L.append(n - min_cluster_size + i)
for i in xrange(min_cluster_size, n - min_cluster_size):
if np.argmax(R[i - min_cluster_size:i + min_cluster_size + 1]) == min_cluster_size:
L.append(i)
# Sort local maximas in order of their reachability
L.sort(key=lambda x: R[x])
class Node:
def __init__(self, left, right):
self.left = left
self.right = right
self.children = []
leaves = []
def cluster_tree(node, parent, L):
if not L:
leaves.append(node)
return
s = node.split = L.pop()
child_left = Node(node.left, s)
child_right = Node(s + 1, node.right)
L_left = [L[i] for i in np.where(np.asarray(L) < s)[0]]
L_right = [L[i] for i in np.where(np.asarray(L) > s)[0]]
R_left = R[child_left.left:child_left.right]
R_right = R[child_right.left:child_right.right]
if R_left.size > 0:
avg_reach_left = np.mean(R_left)
else:
avg_reach_left = 0
if R_right.size > 0:
avg_reach_right = np.mean(R_right)
else:
avg_reach_right = 0
if avg_reach_left <= significant_ratio * R[s] >= avg_reach_right:
children = []
if child_left.right - child_left.left >= min_cluster_size:
children.append((child_left, L_left))
if child_right.right - child_right.left >= min_cluster_size:
children.append((child_right, L_right))
if not children:
return
if parent and R[s] / R[parent.split] >= similarity_ratio:
for child, L in children:
parent.children.append(child)
parent.children.remove(node)
p = parent
else:
for child, L in children:
node.children.append(child)
p = node
for (child, L) in children:
cluster_tree(child, p, L)
else:
cluster_tree(node, parent, L)
root = Node(0, n)
cluster_tree(root, None, L)
labels = -np.ones(n)
count = 0
for leaf in leaves:
for i in xrange(leaf.left, leaf.right):
labels[ordering[i]] = count
count += 1
return labels
EXTRACTION_FUNCTIONS = {
'hierarchical': hierarchical_extraction,
}
def optics(X, eps=float('inf'), min_samples=5, metric='euclidean',
extraction='hierarchical', ext_kwargs=dict()):
"""
Perform OPTICS clustering from vector array or distance matrix.
Parameters
----------
X : array [n_samples, n_samples] or [n_samples, n_features]
Array of distances between samples, or a feature array.
The array is treated as a feature array unless the metric is given as
'precomputed'.
eps : float, optional
The generating distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples in a neighborhood for a point to be considered
as a core point.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square.
extraction : string, optional
The extraction method used to generate clusters from the ordering of
points returned by the OPTICS algorithm.
ext_kwargs : dict
Keyword arguments to be supplied to the extraction function.
Returns
-------
core_distances : array [n_samples]
Core distance for each sample.
ordering : array [n_samples]
Indices of the samples in the order generated by OPTICS.
reachability_distances : array [n_samples]
Reachability distance for each sample.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_optics.py for an example.
References
----------
Ankerst, Mihael, <NAME>, <NAME>, and <NAME>.
"OPTICS: ordering points to identify the clustering structure." ACM SIGMOD
Record 28, no. 2 (1999): 49-60.
"""
X = np.asarray(X)
n = X.shape[0]
# Calculate the pairwise distances
D = pairwise_distances(X, metric=metric)
ordering = []
# Initiate reachability distances to infinity
reachability_distances = float('inf') * np.ones(n)
# Calculate core distance for each sample
core_distances = np.asarray([np.sort(row)[min_samples] for row in D])
seeds = range(n)
i = 0
while len(seeds) > 1:
# Mark current point as processed
seeds.remove(i)
# Add current point to the ordering
ordering.append(i)
core_dist = core_distances[i]
if core_dist <= eps:
seeds_array = np.asarray(seeds)
# Get the neighbors of the current point
neighbors = seeds_array[np.where(D[i][seeds] <= eps)[0]]
cd = core_dist * np.ones(neighbors.size)
d = D[i][neighbors]
# Set the new reachability distances to
# max(core_distance, distance)
new_reach_dists = np.maximum(cd, d)
reachability_distances[neighbors] = new_reach_dists
i = seeds[np.argmin(reachability_distances[seeds])]
else:
i = seeds[0]
# Add last point
ordering.append(seeds[0])
# Set reachability for first point
reachability_distances[0] = 0
if type(extraction) is str:
estr = extraction.lower()
if estr in EXTRACTION_FUNCTIONS:
func = EXTRACTION_FUNCTIONS[estr]
labels = func(ordering, reachability_distances, min_samples,
**ext_kwargs)
else:
raise ValueError('Unknown Extraction Method: %s' % estr)
else:
raise TypeError('Extraction Method must be a string.')
return core_distances, ordering, reachability_distances, labels
class OPTICS(BaseEstimator, ClusterMixin):
"""
Perform OPTICS clustering from vector array or distance matrix.
Parameters
----------
X : array [n_samples, n_samples] or [n_samples, n_features]
Array of distances between samples, or a feature array.
The array is treated as a feature array unless the metric is given as
'precomputed'.
eps : float, optional
The generating distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples in a neighborhood for a point to be considered
as a core point.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square.
extraction : string, optional
The extraction method used to generate clusters from the ordering of
points returned by the OPTICS algorithm.
ext_kwargs : dict
Keyword arguments to be supplied to the extraction function.
Attributes
----------
`core_distances_` : array [n_samples]
Core distance for each sample.
`ordering_` : array [n_samples]
Indices of the samples in the order generated by OPTICS.
`reachability_distances_` : array [n_samples]
Reachability distance for each sample.
`labels_` : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_optics.py for an example.
References
----------
Ankerst, Mihael, <NAME>, <NAME>, and <NAME>.
"OPTICS: ordering points to identify the clustering structure." ACM SIGMOD
Record 28, no. 2 (1999): 49-60.
"""
def __init__(self, eps=float('inf'), min_samples=5, metric='euclidean',
extraction='hierarchical', ext_kwargs=dict()):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.extraction = extraction
self.ext_kwargs = ext_kwargs
def fit(self, X):
"""
Perform OPTICS clustering from vector array or distance matrix.
Parameters
----------
X : array [n_samples, n_samples] or [n_samples, n_features]
Array of distances between samples, or a feature array.
The array is treated as a feature array unless the metric is
given as 'precomputed'.
params : dict
Overwrite keywords from __init__.
"""
clust = optics(X, **self.get_params())
(self.core_distances_, self.ordering_, self.reachability_distances_,
self.labels_) = clust
return self
```
#### File: sysadmin_scripts/mongodb_data_model_3/updateS3.py
```python
import boto3
import botocore
import os
import sys
import subprocess
import glob
import tempfile
import shutil
BUCKET_NAME = "featuresdev"
def run_command(command, out=subprocess.PIPE):
try:
print "running command: {}".format(" ".join(x for x in command).rstrip())
proc = subprocess.Popen(command,
stdout=out, stderr=subprocess.PIPE,
close_fds=True, shell=False)
(stdout, errmsg) = proc.communicate()
except Exception as e:
print str(e)
raise e
print "Starting conversion of data in S3 from model 2 to model 3"
tmp_dir = tempfile.mkdtemp()
os.chdir(tmp_dir)
print "Using temp directory " + tmp_dir
# Create S3 object
s3 = boto3.resource('s3')
# Print out all bucket names
print "S3 buckets available"
for bucket in s3.buckets.all():
print(bucket.name)
print "Using S3 bucket " + BUCKET_NAME
# Download the feature's bucket files
file_ids = list()
for object in s3.Bucket(BUCKET_NAME).objects.all():
print "Downloading " + object.key
try:
s3.Bucket(BUCKET_NAME).download_file(object.key, object.key)
file_ids.append(object.key)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print "The file does not exist or it is no valid"
else:
print "Unknown error downloading file"
for file in file_ids:
try:
run_command(["gunzip", "-f", file])
except Exception as e:
print "Error gunziping file " + str(file) + " " + str(e)
continue
for file in glob.glob("*"):
if file.find(".gz") != -1:
print "Converting JSON to TSV, skipping file " + str(file)
continue
try:
# We need to add the JSON extension so the conversion script works
json_file_name = file + ".json"
os.rename(file, json_file_name)
new_file_name = file + "_stdata.tsv"
run_command(["json_to_matrix.py", "--json-file", json_file_name, "--outfile", new_file_name])
except Exception as e:
print "Error converting JSON to TSV " + str(file) + " " + str(e)
continue
for file in glob.glob("*.tsv"):
try:
run_command(["gzip", "-f", file])
except Exception as e:
print "Error gzipping file " + str(file)
continue
for file in glob.glob("*.gz"):
key = file.split("_stdata")[0] + "/" + file
print "Uploading to S3 " + key
data = open(file, 'rb')
try:
s3.Bucket(BUCKET_NAME).put_object(Key=key, Body=data)
data.close()
except Exception as e:
print "Error uploading file to S3 " + str(e)
data.close()
for file in file_ids:
print "Removing file from S3 " + file
try:
s3.Bucket(BUCKET_NAME).delete_key(Key=file)
except Exception as e:
print "Error deleting file in S3 " + str(e)
print "Done! removing temp folder"
shutil.rmtree(tmp_dir)
```
|
{
"source": "jfnavarro/st_ts",
"score": 3
}
|
#### File: st_ts/scripts/tag_clusters_to_matrix.py
```python
import argparse
import sys
from collections import defaultdict
import os
def main(input_files, outfile):
if len(input_files) != 2:
sys.stderr.write("Error, input file not present or invalid format\n")
sys.exit(1)
st_bed_file = input_files[1]
tag_clusters_file = input_files[0]
if not os.path.isfile(st_bed_file) or not os.path.isfile(tag_clusters_file):
sys.stderr.write("Error, input file not present or invalid format\n")
sys.exit(1)
if outfile is None:
outfile = "output_table_ctts.txt"
# load all the original barcode - gene coordinates
map_original_clusters = defaultdict(list)
with open(st_bed_file, "r") as filehandler:
for line in filehandler.readlines():
if line.find("#") != -1:
continue
tokens = line.split()
assert(len(tokens) == 9)
chromosome = tokens[0]
start_site = int(tokens[1])
end_site = int(tokens[2])
strand = tokens[5]
# gene = tokens[6]
x = float(tokens[7])
y = float(tokens[8])
map_original_clusters[(chromosome,strand)].append((x,y,start_site,end_site))
# loads all the clusters
map_clusters = defaultdict(int)
clusters = set()
barcodes = set()
with open(tag_clusters_file, "r") as filehandler:
for line in filehandler.readlines():
if line.find("#") != -1:
continue
tokens = line.split()
assert(len(tokens) == 8)
chromosome = tokens[0]
start = int(tokens[2])
strand = tokens[1]
end = int(tokens[3])
# doing a full search of intersections over all barcodes (similar to bed intersect)
# If we could rely on that no barcodes were missing doing the clustering we could use
# a faster approach not needing to iterate all the barcodes but only one
# this intersection method is prob overcounting
for x, y, start_orig, end_orig in map_original_clusters[chromosome, strand]:
if strand == "-": start_orig = (end_orig - 1)
if (start_orig >= start and start_orig < end):
map_clusters[(x,y,chromosome,strand,start,end)] += 1
barcodes.add((x,y))
clusters.add((chromosome,strand,start,end))
# write cluster count for each barcode
with open(outfile, "w") as filehandler:
clusters_string = "\t".join("%s:%s-%s,%s" % cluster for cluster in clusters)
filehandler.write(clusters_string + "\n")
for x,y in barcodes:
filehandler.write("{0}x{1}".format(x,y))
for chro,strand,star,end in clusters:
count = map_clusters[(x,y,chro,strand,star,end)]
filehandler.write("\t{}".format(count))
filehandler.write("\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('input_files', nargs=2,
help="The tab delimited file containing the tag clusters and the ST original BED file")
parser.add_argument("--outfile", default=None, help="Name of the output file")
args = parser.parse_args()
main(args.input_files, args.outfile)
```
|
{
"source": "jfnavarro/TACA",
"score": 3
}
|
#### File: taca/utils/config.py
```python
import ConfigParser
import os
import yaml
CONFIG = {}
def load_config(config_file=None):
"""Loads a configuration file.
By default it assumes ~/.taca/taca.yaml
"""
try:
if not config_file:
config_file = os.path.join(os.environ.get('HOME'), '.taca', 'taca.yaml')
config = ConfigParser.SafeConfigParser()
with open(config_file) as f:
config.readfp(f)
return config
except IOError:
raise IOError(("There was a problem loading the configuration file. "
"Please make sure that ~/.taca/taca.conf exists and that you have "
"read permissions"))
def load_yaml_config(config_file):
"""Load YAML config file
:param str config_file: The path to the configuration file.
:returns: A dict of the parsed config file.
:rtype: dict
:raises IOError: If the config file cannot be opened.
"""
if type(config_file) is file:
CONFIG.update(yaml.load(config_file) or {})
return CONFIG
else:
try:
with open(config_file, 'r') as f:
content = yaml.load(f)
CONFIG.update(content)
return content
except IOError as e:
e.message = "Could not open configuration file \"{}\".".format(config_file)
raise e
```
|
{
"source": "jfnavarro/taggd",
"score": 3
}
|
#### File: taggd/core/demultiplex.py
```python
import os
import time
import multiprocessing as mp
import argparse
import taggd.io.barcode_utils as bu
import taggd.core.demultiplex_core_functions as core
import taggd.core.demultiplex_sub_functions as sub
import taggd.core.demultiplex_search_functions as srch
def main(argv=None):
"""
Main application.
Starts a timer, create parameter parsers, parsers parameters
and run all the steps for the demultiplexing.
"""
start_time = time.time()
# Create a parser
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
# Needed parameters
parser.add_argument('barcodes_infile',
metavar='barcodes-infile',
help="The file with true barcode IDs and other properties.")
parser.add_argument('reads_infile',
metavar='reads-infile',
help="The FASTQ, FASTA, SAM or BAM file with reads.")
parser.add_argument('outfile_prefix',
metavar='outfile-prefix', help="The output files prefix.")
# Optional arguments.
parser.add_argument('--no-matched-output',
help='Do not output matched reads',
default=False, action='store_true')
parser.add_argument('--no-ambiguous-output',
help='Do not output ambiguous reads',
default=False, action='store_true')
parser.add_argument('--no-unmatched-output',
help='Do not output unmatched reads',
default=False, action='store_true')
parser.add_argument('--no-results-output',
help='Do not output a tab-separated results file with stats on the reads',
default=False, action='store_true')
parser.add_argument('--start-position',
type=int,
help='The start position for barcodes in reads (default: %(default)d)',
default=0, metavar="[int]")
parser.add_argument('--k',
type=int,
help='The kmer length (default: %(default)d)',
default=6, metavar="[int]")
parser.add_argument('--max-edit-distance',
type=int,
help='The max edit distance for allowing hits (default: %(default)d)',
default=2, metavar="[int]")
parser.add_argument('--metric',
help= "Distance metric: Subglobal, Levenshtein or Hamming (default: %(default)s)",
default="Subglobal", metavar="[string]")
parser.add_argument('--ambiguity-factor',
type=float,
help='Top matches within this factor from the best match are considered ambiguous,\n'
'for instance with factor=1.5, having one match with distance 2 and two matches\n'
'with distance 4 yields all three matches as ambiguous hits. Perfect hits are always\n'
'considered non-ambiguous, irrespective of factor. (default: %(default)d)',
default=1.0, metavar="[int]")
parser.add_argument('--slider-increment',
type=int, help="Space between kmer searches, " \
"0 yields kmer length (default: %(default)d)",
default=0, metavar="[int]")
parser.add_argument('--overhang',
type=int,
help="Additional flanking bases around read barcode\n" \
"to allow for insertions when matching (default: %(default)d)",
default=2, metavar="[int]")
parser.add_argument('--seed',
help="Random number generator seed for shuffling ambiguous hits (default: %(default)s)",
default=None, metavar="[string]")
parser.add_argument('--homopolymer-filter',
type=int,
help="If set, excludes reads where the barcode part contains\n" \
"a homopolymer of the given length,\n" \
"0 means no filter (default: %(default)d)",
default=8, metavar="[int]")
parser.add_argument('--subprocesses',
type=int,
help="Number of subprocesses started (default: 0, yielding number of machine cores - 1)",
default=0, metavar="[int]")
parser.add_argument('--estimate-min-edit-distance',
type=int,
help="If set, estimates the min edit distance among true\n" \
"barcodes by comparing the specified number of pairs,\n" \
"0 means no estimation (default: %(default)d)",
default=0, metavar="[int]")
parser.add_argument('--no-offset-speedup',
help="Turns off an offset speedup routine.\n" \
"Increases running time but may yield more hits.",
default=False, action='store_true')
parser.add_argument('--multiple-hits-keep-one',
help="When multiple kmer hits are found for a record\n" \
"keep one as unambiguous and the rest as ambiguous",
default=False, action='store_true')
parser.add_argument('--trim-sequences', nargs='+', type=int, default=None,
help="Trims from the barcodes in the input file\n" \
"The bases given in the list of tuples as START END START END .. where\n" \
"START is the integer position of the first base (0 based) and END is the integer\n" \
"position of the last base.\nTrimmng sequences can be given several times.")
parser.add_argument('--barcode-tag',
type=str,
help='Use the sequence in specified tag instead of the read sequence for the barcode demultiplexing.\n' \
'The tag must be a two-letter string and be present for all records in the input file.\n' \
'Can only be used with SAM or BAM formatted input files.',
default=None, metavar="[str]")
parser.add_argument('--version', action='version', version='%(prog)s ' + "0.3.2")
# Parse
if argv == None:
options = parser.parse_args()
else:
options = parser.parse_args(argv)
# Validate all options.
if not os.path.isfile(options.barcodes_infile) :
raise ValueError("Invalid true barcodes input file path.")
if not os.path.isfile(options.reads_infile) :
raise ValueError("Invalid reads input file path.")
if not (options.reads_infile.upper().endswith(".FASTQ") or \
options.reads_infile.upper().endswith(".FQ") or \
options.reads_infile.upper().endswith(".SAM") or \
options.reads_infile.upper().endswith(".FASTA") or \
options.reads_infile.upper().endswith(".FA") or \
options.reads_infile.upper().endswith(".BAM")):
raise ValueError("Invalid reads input file format: must be FASTQ, " \
"FASTA, SAM or BAM format and file end with .fq, fastq, .fa, .fasta, .sam or .bam")
if options.outfile_prefix is None or options.outfile_prefix == "":
raise ValueError("Invalid output file prefix.")
if options.k <= 0:
raise ValueError("Invalid kmer length. Must be > 0.")
if options.max_edit_distance < 0:
raise ValueError("Invalid max edit distance. Must be >= 0.")
if options.metric not in ("Subglobal", "Levenshtein", "Hamming"):
raise ValueError("Invalid metric. Must be Subglobal, Levenshtein or Hamming.")
if options.slider_increment < 0:
raise ValueError("Invalid slider increment. Must be >= 0.")
if options.slider_increment == 0:
options.slider_increment = int(options.k)
if options.start_position < 0:
raise ValueError("Invalid start position. Must be >= 0.")
if options.overhang < 0:
raise ValueError("Invalid overhang. Must be >= 0.")
if options.metric == "Hamming" and options.overhang > 0:
raise ValueError("Invalid overhang. Must be 0 for Hamming metric.")
if options.subprocesses < 0:
raise ValueError("Invalid no. of subprocesses. Must be >= 0.")
if options.ambiguity_factor < 1.0:
raise ValueError("Invalid ambiguity factor. Must be >= 1.")
# Check the the trimming sequences given are valid
if options.trim_sequences is not None \
and (len(options.trim_sequences) % 2 != 0 or min(options.trim_sequences)) < 0:
raise ValueError("Invalid trimming sequences given " \
"The number of positions given must be even and they must fit into the barcode length.")
if options.barcode_tag:
if len(options.barcode_tag) != 2:
raise ValueError("Invalid the \"--barcode-tag\" option must specify a two-letter string, current length is "+str(len(options.barcode_tag))+" letters (\""+options.barcode_tag+"\").\n")
if not (options.reads_infile.upper().endswith(".SAM") or options.reads_infile.upper().endswith(".BAM")):
raise ValueError("Invalid the \"--barcode-tag\" option can only be used with SAM or BAM formatted input files.\n")
# Read barcodes file
true_barcodes = bu.read_barcode_file(options.barcodes_infile)
# Paths
frmt = options.reads_infile.split(".")[-1]
fn_bc = os.path.abspath(options.barcodes_infile)
fn_reads = os.path.abspath(options.reads_infile)
fn_prefix = os.path.abspath(options.outfile_prefix)
fn_matched = None if options.no_matched_output else fn_prefix + "_matched." + frmt
fn_ambig = None if options.no_ambiguous_output else fn_prefix + "_ambiguous." + frmt
fn_unmatched = None if options.no_unmatched_output else fn_prefix + "_unmatched." + frmt
fn_results = None if options.no_results_output else fn_prefix + "_results.tsv"
# Subprocesses
if options.subprocesses == 0:
options.subprocesses = mp.cpu_count() - 1
print("# Options: " + str(options).split("Namespace")[-1])
print("# Barcodes input file: " + str(fn_bc))
print("# Reads input file: " + str(fn_reads))
print("# Matched output file: " + str(fn_matched))
print("# Ambiguous output file: " + str(fn_ambig))
print("# Unmatched output file: " + str(fn_unmatched))
print("# Results output file: " + str(fn_results))
print("# Number of barcodes in input: " + str(len(true_barcodes)))
lngth = len(list(true_barcodes.keys())[0])
print("# Barcode length: " + str(lngth))
print("# Barcode length when overhang added: " + \
str(lngth + min(options.start_position, options.overhang) + options.overhang))
# Check barcodes file.
if options.estimate_min_edit_distance > 0:
min_dist = estimate_min_edit_distance(true_barcodes, options.estimate_min_edit_distance)
if min_dist <= options.max_edit_distance:
raise ValueError("Invalid max edit distance: exceeds or equal " \
"to estimated minimum edit distance among true barcodes.")
print("# Estimate of minimum edit distance between true barcodes (may be less): " + str(min_dist))
else:
print("# Estimate of minimum edit distance between true barcodes (may be less): Not estimated")
# Make the input trim coordinates a list of tuples
trim_sequences = None
if options.trim_sequences is not None:
trim_sequences = list()
for i in range(len(options.trim_sequences) - 1):
if i % 2 == 0:
trim_sequences.append((options.trim_sequences[i],
options.trim_sequences[i+1]))
# Initialize main components
sub.init(true_barcodes,
options.start_position,
min(options.start_position, options.overhang),
options.overhang,
options.max_edit_distance,
options.homopolymer_filter,
options.seed,
options.multiple_hits_keep_one,
trim_sequences,
options.barcode_tag)
srch.init(true_barcodes,
options.k,
options.max_edit_distance,
options.metric,
options.slider_increment,
min(options.start_position, options.overhang),
options.overhang,
options.ambiguity_factor,
options.no_offset_speedup)
# Demultiplex
print("# Starting demultiplexing...")
stats = core.demultiplex(fn_reads,
fn_matched,
fn_ambig,
fn_unmatched,
fn_results,
options.subprocesses)
print("# ...finished demultiplexing")
print("# Wall time in secs: " + str(time.time() - start_time))
print(str(stats))
```
|
{
"source": "jfneis/bbvisa2ofx",
"score": 3
}
|
#### File: bbvisa2ofx/bbvisa2ofx/txtparser.py
```python
import re
from datetime import datetime
from dateutil.relativedelta import relativedelta
class TxtParser:
'''
Classe responsavel por realizar o parse do arquivo txt da fatura de cartoes
disponibilizada pelo banco do brasil.
Analizando o arquivo disponibilizado, verifica-se que as linhas que interessam sao as que possuem uma
data no formato dd/mm no inicio.
Para cada linha contendo este padrao, vamos extrair as informacoes da seguinte forma:
date: primeiros 5 caracteres
desc: do caracter 8 ao 49
value:
split no final da linha do 51 caracter em diante, primeiro item
'''
items = None
cardTitle = None #titulo do cartao, definido por "Modalidade" no txt
cardNumber = None #numero do cartao, definido por Nr.Cartao no txt
txtFile = None
dueDate = None #data de vencimento, definido por "Vencimento" no txt
def __init__(self,txtFile):
'''
Constructor
'''
self.items = []
self.txtFile = txtFile
self.exchangeRate = 0.0
def parse(self):
f = self.txtFile
lines = f.readlines()
for line in lines:
self.parseDueDate(line)
self.parseExchangeRateLine(line)
self.parseCardTitleLine(line)
self.parseCardNumberLine(line)
#now with the exangeRate and dueDate populated, we can parse all transaction lines
for line in lines:
self.parseTransactionLine(line)
def parseDueDate(self, line):
'''
popula dueDate se for a linha que representa o vencimento da fatura,
esta informacao eh utilizada para adivinharmos o ano da compra
FIXME: ainda pode gerar problemas quando temos uma compra realizada no ano anterior, mas que aparenta ser do ano atual
'''
if(line.lstrip().startswith("Vencimento")):
print "Due date line found. %s" % line
self.dueDate = datetime.strptime(line.split(":")[1].strip(),'%d.%m.%Y')
print "Due date is: %s" % self.dueDate
def parseExchangeRateLine(self, line):
'''
popula exchangeRate se for a linha que apresenta o resumo dos gastos
em dolar. essa linha eh utilizada para extrair o valor da taxa
de conversao de dolar para real. o que diferencia essa linha da
linha com o resumo dos gastos em reais a presenca de um sinal
de multiplicacao (representado por um X)
'''
if (re.match('^\s+\S+\s+-\s+\S+\s+\+\s+\S+\s+=\s+\S+\s+X', line)):
print "Echange Rate line found: "+ line
rate = re.findall('X\s+(\S+)', line)[0]
rate = rate.replace(',','.')
self.exchangeRate = float(rate)
print "Exchange Rate value: "+str(self.exchangeRate)
return self.exchangeRate
return 0.0
def parseCardTitleLine(self, line):
'''
Titulo do cartao inicia com "Modalidade"
'''
if(line.lstrip().startswith("Modalidade")):
print "Card title line found. %s" % line
# Fix issue #3 changing from lstrip to strip
self.cardTitle = line.split(":")[1].strip()
print "The card title is: %s" % self.cardTitle
def parseCardNumberLine(self, line):
'''
Numero do cartao inicia com "Nr.Cart"
'''
if(line.lstrip().startswith("Nr.Cart")):
print "Card number line found. %s" % line
# Fix issue #3 changing from lstrip to strip
self.cardNumber = line.split(":")[1].strip()
print "The card number is: %s" % self.cardNumber
def parseTransactionLine(self, line):
'''
Linhas de transacao inicial com uma data no formato "dd/mm "
(devemos verificar o espaco no final pois existem linhas no formato dd/mm/yyyy que nao sao tansacoes)
caso for uma linha de transacao, um objeto sera adicionado na lista self.items
este objeto contem os seguintes campos:
date: data da transacao
desc: descricao
value: valor em BRL
'''
if(re.match("^\d\d\/\d\d\ $", line[:6]) != None):
brlValue = ''
usdValue = ''
obj = {}
obj['value'] = self.parseValueFromTransactionLine(line)
obj['date'] = self.parseDateFromTransactionLine(line)
obj['desc'] = line[9:48].lstrip().replace('*','')
obj['fitid'] = (obj['date'] + str(obj['value']) + obj['desc']).replace(' ','')
print "Line parsed: "+ str(obj)
# Atualiza data das transacoes de compras parceladas
self.updateDateFromInstallmentTransactionLine(obj)
self.items.append(obj)
return obj
def parseValueFromTransactionLine(self, line):
'''
Extraindo valor da linha, a partir do caracter 51
Caso esteja em dolar, converter para real utilizando a taxa de cambio.
Agradecimento especial para Rodrigo que contribuiu pelo code.google
'''
value = 0.0
arr = line[51:].split()
brlValue = float(arr[0].replace('.','').replace(',','.'))
usdValue = float(arr[1].replace('.','').replace(',','.'))
if brlValue != 0.0:
value = brlValue
else:
value = usdValue * self.exchangeRate
value = value * -1 #inverte valor
return value
def parseDateFromTransactionLine(self, line):
'''
Extraindo data da linha de transacao
Como o BB removeu o ano das datas, vamos precisar "adivinhar" o ano correto,
conforme a data de vencimento
definimos o ano do vencimento como padrao, porem caso a data da transacao
fique maior que o vencimento, assumimos o ano anterior como ano correto.
FIXME transacoes feitas a mais de 12 meses terao o ano definido incorretamente, mas
nao consegui pensar em outra solucao no momento
Agradecimento especial a <NAME> que fez esta contruicao por email. :)
'''
transactionDate = datetime.strptime(line[:5],'%d/%m')
transactionDate = transactionDate.replace(self.dueDate.year)
if transactionDate >= self.dueDate:
transactionDate = transactionDate.replace(transactionDate.year-1)
return transactionDate.strftime('%Y%m%d')
def updateDateFromInstallmentTransactionLine(self, obj):
'''
Verifica se trata-se de uma linha de transacao de compra parcelada (ex.: PARC 01/04) e,
em caso positivo, posterga o data de vencimento X meses para frente (X = Nro Parc - 1)
'''
regex = re.search("PARC\s\d\d/\d\d", obj['desc']);
if regex != None:
installmentNumber = int(regex.group()[5:7])
originalDate = datetime.strptime(obj['date'], '%Y%m%d')
newDate = originalDate + relativedelta(months=installmentNumber-1)
obj['date'] = newDate.strftime('%Y%m%d')
obj['desc'] = obj['desc'] + " DT ORIG: " + originalDate.strftime('%d/%m')
print 'Updated installment transaction date. Installment Number: {0} Original: {1} Updated: {2}'.format(installmentNumber, originalDate, newDate)
```
|
{
"source": "jfng/amaranth-soc",
"score": 3
}
|
#### File: amaranth_soc/test/test_memory.py
```python
import unittest
from ..memory import _RangeMap, ResourceInfo, MemoryMap
class RangeMapTestCase(unittest.TestCase):
def test_insert(self):
range_map = _RangeMap()
range_map.insert(range(0,10), "a")
range_map.insert(range(20,21), "c")
range_map.insert(range(15,16), "b")
range_map.insert(range(16,20), "q")
self.assertEqual(range_map._keys, [
range(0,10), range(15,16), range(16,20), range(20,21)
])
def test_overlaps(self):
range_map = _RangeMap()
range_map.insert(range(10,20), "a")
self.assertEqual(range_map.overlaps(range(5,15)), ["a"])
self.assertEqual(range_map.overlaps(range(15,25)), ["a"])
self.assertEqual(range_map.overlaps(range(5,25)), ["a"])
self.assertEqual(range_map.overlaps(range(0,3)), [])
self.assertEqual(range_map.overlaps(range(0,5)), [])
self.assertEqual(range_map.overlaps(range(25,30)), [])
def test_insert_wrong_overlap(self):
range_map = _RangeMap()
range_map.insert(range(0,10), "a")
with self.assertRaises(AssertionError):
range_map.insert(range(5,15), "b")
def test_get(self):
range_map = _RangeMap()
range_map.insert(range(5,15), "a")
self.assertEqual(range_map.get(0), None)
self.assertEqual(range_map.get(5), "a")
self.assertEqual(range_map.get(10), "a")
self.assertEqual(range_map.get(14), "a")
self.assertEqual(range_map.get(15), None)
class ResourceInfoTestCase(unittest.TestCase):
def test_simple(self):
info = ResourceInfo("a", name=("foo", "bar"), start=0, end=1, width=8)
self.assertEqual(info.name, ("foo", "bar"))
self.assertEqual(info.start, 0)
self.assertEqual(info.end, 1)
self.assertEqual(info.width, 8)
def test_name_cast(self):
info = ResourceInfo("a", name="foo", start=0, end=1, width=8)
self.assertEqual(info.name, ("foo",))
def test_wrong_name(self):
with self.assertRaisesRegex(TypeError,
r"Name must be a non-empty sequence of non-empty strings, not \(1,\)"):
ResourceInfo("a", name=(1,), start=0, end=1, width=8)
with self.assertRaisesRegex(TypeError,
r"Name must be a non-empty sequence of non-empty strings, not \(\)"):
ResourceInfo("a", name=(), start=0, end=1, width=8)
with self.assertRaisesRegex(TypeError,
r"Name must be a non-empty sequence of non-empty strings, not \('foo', ''\)"):
ResourceInfo("a", name=("foo", ""), start=0, end=1, width=8)
def test_wrong_start_addr(self):
with self.assertRaisesRegex(TypeError,
r"Start address must be a non-negative integer, not 'foo'"):
ResourceInfo("a", name="b", start="foo", end=1, width=8)
with self.assertRaisesRegex(TypeError,
r"Start address must be a non-negative integer, not -1"):
ResourceInfo("a", name="b", start=-1, end=1, width=8)
def test_wrong_end_addr(self):
with self.assertRaisesRegex(TypeError,
r"End address must be an integer greater than the start address, not 'foo'"):
ResourceInfo("a", name="b", start=0, end="foo", width=8)
with self.assertRaisesRegex(TypeError,
r"End address must be an integer greater than the start address, not 0"):
ResourceInfo("a", name="b", start=0, end=0, width=8)
def test_wrong_width(self):
with self.assertRaisesRegex(TypeError,
r"Width must be a non-negative integer, not 'foo'"):
ResourceInfo("a", name="b", start=0, end=1, width="foo")
with self.assertRaisesRegex(TypeError,
r"Width must be a non-negative integer, not -1"):
ResourceInfo("a", name="b", start=0, end=1, width=-1)
class MemoryMapTestCase(unittest.TestCase):
def test_name(self):
memory_map_0 = MemoryMap(addr_width=1, data_width=8)
memory_map_1 = MemoryMap(addr_width=1, data_width=8, name=None)
memory_map_2 = MemoryMap(addr_width=1, data_width=8, name="foo")
self.assertEqual(memory_map_0.name, None)
self.assertEqual(memory_map_1.name, None)
self.assertEqual(memory_map_2.name, "foo")
def test_wrong_name(self):
with self.assertRaisesRegex(ValueError,
r"Name must be a non-empty string, not 1"):
MemoryMap(addr_width=1, data_width=8, name=1)
with self.assertRaisesRegex(ValueError,
r"Name must be a non-empty string, not ''"):
MemoryMap(addr_width=1, data_width=8, name="")
def test_wrong_addr_width(self):
with self.assertRaisesRegex(ValueError,
r"Address width must be a positive integer, not -1"):
MemoryMap(addr_width=-1, data_width=8)
def test_wrong_data_width(self):
with self.assertRaisesRegex(ValueError,
r"Data width must be a positive integer, not -1"):
MemoryMap(addr_width=16, data_width=-1)
def test_wrong_alignment(self):
with self.assertRaisesRegex(ValueError,
r"Alignment must be a non-negative integer, not -1"):
MemoryMap(addr_width=16, data_width=8, alignment=-1)
def test_set_addr_width_wrong(self):
with self.assertRaisesRegex(ValueError,
r"Address width must be a positive integer, not -1"):
memory_map = MemoryMap(addr_width=1, data_width=8)
memory_map.addr_width = -1
def test_set_addr_width_wrong_shrink(self):
with self.assertRaisesRegex(ValueError,
r"Address width 1 must not be less than its previous value 2, "
r"because resources that were previously added may not fit anymore"):
memory_map = MemoryMap(addr_width=2, data_width=8)
memory_map.addr_width = 1
def test_set_addr_width_wrong_frozen(self):
with self.assertRaisesRegex(ValueError,
r"Memory map has been frozen. Address width cannot be extended "
r"further"):
memory_map = MemoryMap(addr_width=1, data_width=8)
memory_map.freeze()
memory_map.addr_width = 2
def test_add_resource(self):
memory_map = MemoryMap(addr_width=16, data_width=8)
self.assertEqual(memory_map.add_resource("a", name="foo", size=1), (0, 1))
self.assertEqual(memory_map.add_resource(resource="b", name="bar", size=2), (1, 3))
def test_add_resource_map_aligned(self):
memory_map = MemoryMap(addr_width=16, data_width=8, alignment=1)
self.assertEqual(memory_map.add_resource("a", name="foo", size=1), (0, 2))
self.assertEqual(memory_map.add_resource("b", name="bar", size=2), (2, 4))
def test_add_resource_explicit_aligned(self):
memory_map = MemoryMap(addr_width=16, data_width=8)
self.assertEqual(memory_map.add_resource("a", name="foo", size=1), (0, 1))
self.assertEqual(memory_map.add_resource("b", name="bar", size=1, alignment=1), (2, 4))
self.assertEqual(memory_map.add_resource("c", name="baz", size=2), (4, 6))
def test_add_resource_addr(self):
memory_map = MemoryMap(addr_width=16, data_width=8)
self.assertEqual(memory_map.add_resource("a", name="foo", size=1, addr=10), (10, 11))
self.assertEqual(memory_map.add_resource("b", name="bar", size=2), (11, 13))
def test_add_resource_extend(self):
memory_map = MemoryMap(addr_width=16, data_width=8)
self.assertEqual(memory_map.add_resource("a", name="foo", size=1, addr=0x10000,
extend=True),
(0x10000, 0x10001))
self.assertEqual(memory_map.addr_width, 17)
def test_add_resource_size_zero(self):
memory_map = MemoryMap(addr_width=1, data_width=8)
self.assertEqual(memory_map.add_resource("a", name="foo", size=0), (0, 1))
self.assertEqual(memory_map.add_resource("b", name="bar", size=0), (1, 2))
def test_add_resource_wrong_frozen(self):
memory_map = MemoryMap(addr_width=2, data_width=8)
memory_map.freeze()
with self.assertRaisesRegex(ValueError,
r"Memory map has been frozen. Cannot add resource 'a'"):
memory_map.add_resource("a", name="foo", size=0)
def test_add_resource_wrong_name(self):
memory_map = MemoryMap(addr_width=1, data_width=8)
with self.assertRaisesRegex(TypeError, r"Name must be a non-empty string, not 1"):
memory_map.add_resource("a", name=1, size=0)
with self.assertRaisesRegex(TypeError, r"Name must be a non-empty string, not ''"):
memory_map.add_resource("a", name="", size=0)
def test_add_resource_wrong_name_conflict(self):
memory_map = MemoryMap(addr_width=1, data_width=8)
memory_map.add_resource("a", name="foo", size=0)
with self.assertRaisesRegex(ValueError, r"Name foo is already used by 'a'"):
memory_map.add_resource("b", name="foo", size=0)
def test_add_resource_wrong_address(self):
memory_map = MemoryMap(addr_width=16, data_width=8)
with self.assertRaisesRegex(ValueError,
r"Address must be a non-negative integer, not -1"):
memory_map.add_resource("a", name="foo", size=1, addr=-1)
def test_add_resource_wrong_address_unaligned(self):
memory_map = MemoryMap(addr_width=16, data_width=8, alignment=1)
with self.assertRaisesRegex(ValueError,
r"Explicitly specified address 0x1 must be a multiple of 0x2 bytes"):
memory_map.add_resource("a", name="foo", size=1, addr=1)
def test_add_resource_wrong_size(self):
memory_map = MemoryMap(addr_width=16, data_width=8)
with self.assertRaisesRegex(ValueError,
r"Size must be a non-negative integer, not -1"):
memory_map.add_resource("a", name="foo", size=-1)
def test_add_resource_wrong_alignment(self):
memory_map = MemoryMap(addr_width=16, data_width=8)
with self.assertRaisesRegex(ValueError,
r"Alignment must be a non-negative integer, not -1"):
memory_map.add_resource("a", name="foo", size=1, alignment=-1)
def test_add_resource_wrong_out_of_bounds(self):
memory_map = MemoryMap(addr_width=16, data_width=8)
with self.assertRaisesRegex(ValueError,
r"Address range 0x10000\.\.0x10001 out of bounds for memory map spanning "
r"range 0x0\.\.0x10000 \(16 address bits\)"):
memory_map.add_resource("a", name="foo", addr=0x10000, size=1)
with self.assertRaisesRegex(ValueError,
r"Address range 0x0\.\.0x10001 out of bounds for memory map spanning "
r"range 0x0\.\.0x10000 \(16 address bits\)"):
memory_map.add_resource("a", name="foo", size=0x10001)
def test_add_resource_wrong_overlap(self):
memory_map = MemoryMap(addr_width=16, data_width=8)
memory_map.add_resource("a", name="foo", size=16)
with self.assertRaisesRegex(ValueError,
r"Address range 0xa\.\.0xb overlaps with resource 'a' at 0x0\.\.0x10"):
memory_map.add_resource("b", name="bar", size=1, addr=10)
def test_add_resource_wrong_twice(self):
memory_map = MemoryMap(addr_width=16, data_width=8)
memory_map.add_resource("a", name="foo", size=16)
with self.assertRaisesRegex(ValueError,
r"Resource 'a' is already added at address range 0x0..0x10"):
memory_map.add_resource("a", name="bar", size=16)
def test_iter_resources(self):
memory_map = MemoryMap(addr_width=16, data_width=8)
memory_map.add_resource("a", name="foo", size=1)
memory_map.add_resource("b", name="bar", size=2)
self.assertEqual(list(memory_map.resources()), [
("a", "foo", (0, 1)),
("b", "bar", (1, 3)),
])
def test_add_window(self):
memory_map = MemoryMap(addr_width=16, data_width=8)
self.assertEqual(memory_map.add_resource("a", name="foo", size=1), (0, 1))
self.assertEqual(memory_map.add_window(MemoryMap(addr_width=10, data_width=8)),
(0x400, 0x800, 1))
self.assertEqual(memory_map.add_resource("b", name="bar", size=1), (0x800, 0x801))
def test_add_window_sparse(self):
memory_map = MemoryMap(addr_width=16, data_width=32)
self.assertEqual(memory_map.add_window(MemoryMap(addr_width=10, data_width=8),
sparse=True),
(0, 0x400, 1))
def test_add_window_dense(self):
memory_map = MemoryMap(addr_width=16, data_width=32)
self.assertEqual(memory_map.add_window(MemoryMap(addr_width=10, data_width=8),
sparse=False),
(0, 0x100, 4))
def test_add_window_extend(self):
memory_map = MemoryMap(addr_width=16, data_width=8)
self.assertEqual(memory_map.add_window(MemoryMap(addr_width=17, data_width=8),
extend=True),
(0, 0x20000, 1))
self.assertEqual(memory_map.addr_width, 18)
def test_add_window_wrong_frozen(self):
memory_map = MemoryMap(addr_width=2, data_width=8)
memory_map.freeze()
with self.assertRaisesRegex(ValueError,
r"Memory map has been frozen. Cannot add window "
r"<amaranth_soc\.memory\.MemoryMap object at .+?>"):
memory_map.add_window(MemoryMap(addr_width=1, data_width=8))
def test_add_window_wrong_window(self):
memory_map = MemoryMap(addr_width=16, data_width=8)
with self.assertRaisesRegex(TypeError,
r"Window must be a MemoryMap, not 'a'"):
memory_map.add_window(window="a")
def test_add_window_wrong_wider(self):
memory_map = MemoryMap(addr_width=16, data_width=8)
with self.assertRaisesRegex(ValueError,
r"Window has data width 16, and cannot be added to a memory map "
r"with data width 8"):
memory_map.add_window(MemoryMap(addr_width=10, data_width=16))
def test_add_window_wrong_no_mode(self):
memory_map = MemoryMap(addr_width=16, data_width=16)
with self.assertRaisesRegex(ValueError,
r"Address translation mode must be explicitly specified when adding "
r"a window with data width 8 to a memory map with data width 16"):
memory_map.add_window(MemoryMap(addr_width=10, data_width=8))
def test_add_window_wrong_ratio(self):
memory_map = MemoryMap(addr_width=16, data_width=16)
with self.assertRaisesRegex(ValueError,
r"Dense addressing cannot be used because the memory map data width "
r"16 is not an integer multiple of window data width 7"):
memory_map.add_window(MemoryMap(addr_width=10, data_width=7), sparse=False)
def test_add_window_wrong_out_of_bounds(self):
memory_map = MemoryMap(addr_width=16, data_width=8)
with self.assertRaisesRegex(ValueError,
r"Address range 0x0\.\.0x20000 out of bounds for memory map spanning "
r"range 0x0\.\.0x10000 \(16 address bits\)"):
memory_map.add_window(MemoryMap(addr_width=17, data_width=8))
def test_add_window_wrong_overlap(self):
memory_map = MemoryMap(addr_width=16, data_width=8)
memory_map.add_window(MemoryMap(addr_width=10, data_width=8))
with self.assertRaisesRegex(ValueError,
r"Address range 0x200\.\.0x600 overlaps with window "
r"<amaranth_soc\.memory\.MemoryMap object at .+?> at 0x0\.\.0x400"):
memory_map.add_window(MemoryMap(addr_width=10, data_width=8), addr=0x200)
def test_add_window_wrong_twice(self):
memory_map = MemoryMap(addr_width=16, data_width=8)
window = MemoryMap(addr_width=10, data_width=8)
memory_map.add_window(window)
with self.assertRaisesRegex(ValueError,
r"Window <amaranth_soc\.memory\.MemoryMap object at .+?> is already added "
r"at address range 0x0\.\.0x400"):
memory_map.add_window(window)
def test_add_window_wrong_name_conflict(self):
memory_map = MemoryMap(addr_width=2, data_width=8)
memory_map.add_resource("a", name="foo", size=0)
window = MemoryMap(addr_width=1, data_width=8, name="foo")
with self.assertRaisesRegex(ValueError, r"Name foo is already used by 'a'"):
memory_map.add_window(window)
def test_add_window_wrong_name_conflict_subordinate(self):
memory_map = MemoryMap(addr_width=2, data_width=8)
memory_map.add_resource("a", name="foo", size=0)
memory_map.add_resource("b", name="bar", size=0)
window = MemoryMap(addr_width=1, data_width=8, name=None)
window.add_resource("c", name="foo", size=0)
window.add_resource("d", name="bar", size=0)
with self.assertRaisesRegex(ValueError,
r"The following names are already used: "
r"bar is used by 'b'; "
r"foo is used by 'a'"):
memory_map.add_window(window)
def test_iter_windows(self):
memory_map = MemoryMap(addr_width=16, data_width=16)
window_1 = MemoryMap(addr_width=10, data_width=8)
memory_map.add_window(window_1, sparse=False)
window_2 = MemoryMap(addr_width=12, data_width=16)
memory_map.add_window(window_2)
self.assertEqual(list(memory_map.windows()), [
(window_1, (0, 0x200, 2)),
(window_2, (0x1000, 0x2000, 1)),
])
def test_iter_window_patterns(self):
memory_map = MemoryMap(addr_width=16, data_width=16)
window_1 = MemoryMap(addr_width=10, data_width=8)
memory_map.add_window(window_1, sparse=False)
window_2 = MemoryMap(addr_width=12, data_width=16)
memory_map.add_window(window_2)
self.assertEqual(list(memory_map.window_patterns()), [
(window_1, ("000000----------", 2)),
(window_2, ("0001------------", 1)),
])
def test_iter_window_patterns_covered(self):
memory_map = MemoryMap(addr_width=16, data_width=8)
window = MemoryMap(addr_width=16, data_width=8)
memory_map.add_window(window)
self.assertEqual(list(memory_map.window_patterns()), [
(window, ("----------------", 1)),
])
def test_align_to(self):
memory_map = MemoryMap(addr_width=16, data_width=8)
self.assertEqual(memory_map.add_resource("a", name="foo", size=1), (0, 1))
self.assertEqual(memory_map.align_to(10), 0x400)
self.assertEqual(memory_map.add_resource("b", name="bar", size=16), (0x400, 0x410))
def test_align_to_wrong(self):
memory_map = MemoryMap(addr_width=16, data_width=8)
with self.assertRaisesRegex(ValueError,
r"Alignment must be a non-negative integer, not -1"):
memory_map.align_to(alignment=-1)
class MemoryMapDiscoveryTestCase(unittest.TestCase):
def setUp(self):
self.root = MemoryMap(addr_width=32, data_width=32)
self.res1 = "res1"
self.root.add_resource(self.res1, name="name1", size=16)
self.win1 = MemoryMap(addr_width=16, data_width=32)
self.res2 = "res2"
self.win1.add_resource(self.res2, name="name2", size=32)
self.res3 = "res3"
self.win1.add_resource(self.res3, name="name3", size=32)
self.root.add_window(self.win1)
self.res4 = "res4"
self.root.add_resource(self.res4, name="name4", size=1)
self.win2 = MemoryMap(addr_width=16, data_width=8)
self.res5 = "res5"
self.win2.add_resource(self.res5, name="name5", size=16)
self.root.add_window(self.win2, sparse=True)
self.win3 = MemoryMap(addr_width=16, data_width=8, name="win3")
self.res6 = "res6"
self.win3.add_resource(self.res6, name="name6", size=16)
self.root.add_window(self.win3, sparse=False)
def test_iter_all_resources(self):
res_info = list(self.root.all_resources())
self.assertIs(res_info[0].resource, self.res1)
self.assertEqual(res_info[0].name, ("name1",))
self.assertEqual(res_info[0].start, 0x00000000)
self.assertEqual(res_info[0].end, 0x00000010)
self.assertEqual(res_info[0].width, 32)
self.assertIs(res_info[1].resource, self.res2)
self.assertEqual(res_info[1].name, ("name2",))
self.assertEqual(res_info[1].start, 0x00010000)
self.assertEqual(res_info[1].end, 0x00010020)
self.assertEqual(res_info[1].width, 32)
self.assertIs(res_info[2].resource, self.res3)
self.assertEqual(res_info[2].name, ("name3",))
self.assertEqual(res_info[2].start, 0x00010020)
self.assertEqual(res_info[2].end, 0x00010040)
self.assertEqual(res_info[2].width, 32)
self.assertIs(res_info[3].resource, self.res4)
self.assertEqual(res_info[3].name, ("name4",))
self.assertEqual(res_info[3].start, 0x00020000)
self.assertEqual(res_info[3].end, 0x00020001)
self.assertEqual(res_info[3].width, 32)
self.assertIs(res_info[4].resource, self.res5)
self.assertEqual(res_info[4].name, ("name5",))
self.assertEqual(res_info[4].start, 0x00030000)
self.assertEqual(res_info[4].end, 0x00030010)
self.assertEqual(res_info[4].width, 8)
self.assertIs(res_info[5].resource, self.res6)
self.assertEqual(res_info[5].name, ("win3", "name6"))
self.assertEqual(res_info[5].start, 0x00040000)
self.assertEqual(res_info[5].end, 0x00040004)
self.assertEqual(res_info[5].width, 32)
def test_find_resource(self):
for res_info in self.root.all_resources():
other = self.root.find_resource(res_info.resource)
self.assertIs(other.resource, res_info.resource)
self.assertEqual(other.name, res_info.name)
self.assertEqual(other.start, res_info.start)
self.assertEqual(other.end, res_info.end)
self.assertEqual(other.width, res_info.width)
def test_find_resource_wrong(self):
with self.assertRaises(KeyError) as error:
self.root.find_resource("resNA")
self.assertEqual(error.exception.args, ("resNA",))
def test_decode_address(self):
for res_info in self.root.all_resources():
self.assertEqual(self.root.decode_address(res_info.start), res_info.resource)
self.assertEqual(self.root.decode_address(res_info.end - 1), res_info.resource)
def test_decode_address_missing(self):
self.assertIsNone(self.root.decode_address(address=0x00000100))
```
#### File: amaranth_soc/test/test_periph.py
```python
import unittest
from ..periph import *
from ..memory import MemoryMap
from .. import event
class ConstantBoolTestCase(unittest.TestCase):
def test_init(self):
a = ConstantBool(True)
b = ConstantBool(False)
self.assertTrue(a.value)
self.assertFalse(b.value)
def test_value_wrong(self):
with self.assertRaisesRegex(TypeError, r"Value must be a bool, not 'foo'"):
ConstantBool("foo")
def test_repr(self):
self.assertEqual(repr(ConstantBool(True)), "ConstantBool(True)")
class ConstantIntTestCase(unittest.TestCase):
def test_init(self):
c = ConstantInt(5, width=8, signed=True)
self.assertEqual(c.value, 5)
self.assertEqual(c.width, 8)
self.assertEqual(c.signed, True)
def test_init_default(self):
c = ConstantInt(5)
self.assertEqual(c.value, 5)
self.assertEqual(c.width, 3)
self.assertEqual(c.signed, False)
def test_value_wrong(self):
with self.assertRaisesRegex(TypeError, r"Value must be an integer, not 'foo'"):
ConstantInt("foo")
def test_width_wrong(self):
with self.assertRaisesRegex(TypeError, r"Width must be an integer, not 'foo'"):
ConstantInt(5, width="foo")
def test_width_overflow(self):
with self.assertRaisesRegex(ValueError,
r"Width must be greater than or equal to the number of bits needed to represent 5"):
ConstantInt(5, width=1)
def test_signed_wrong(self):
with self.assertRaisesRegex(TypeError, r"Signedness must be a bool, not 'foo'"):
ConstantInt(5, signed="foo")
def test_repr(self):
self.assertEqual(
repr(ConstantInt(-5, width=8, signed=True)),
"ConstantInt(-5, width=8, signed=True)"
)
class ConstantMapTestCase(unittest.TestCase):
def test_init(self):
constant_map = ConstantMap(A=5, B=True, C=ConstantBool(False))
self.assertEqual(
repr(constant_map), "ConstantMap(["
"('A', ConstantInt(5, width=3, signed=False)), "
"('B', ConstantBool(True)), "
"('C', ConstantBool(False))])",
)
def test_init_wrong_value(self):
with self.assertRaisesRegex(TypeError,
r"Constant value must be an instance of ConstantValue, not \('foo', 'bar'\)"):
ConstantMap(A=("foo", "bar"))
def test_getitem(self):
a = ConstantInt(1)
b = ConstantBool(False)
constant_map = ConstantMap(A=a, B=b)
self.assertIs(constant_map["A"], a)
self.assertIs(constant_map["B"], b)
def test_iter(self):
a = ConstantInt(1)
b = ConstantBool(False)
constant_map = ConstantMap(B=b, A=a)
self.assertEqual(list(constant_map.items()), [
("B", b),
("A", a),
])
def test_len(self):
a = ConstantInt(1)
b = ConstantBool(False)
constant_map = ConstantMap(B=b, A=a)
self.assertEqual(len(constant_map), 2)
class PeripheralInfoTestCase(unittest.TestCase):
def test_memory_map(self):
memory_map = MemoryMap(addr_width=1, data_width=8)
info = PeripheralInfo(memory_map=memory_map)
self.assertIs(info.memory_map, memory_map)
def test_memory_map_frozen(self):
memory_map = MemoryMap(addr_width=1, data_width=8)
info = PeripheralInfo(memory_map=memory_map)
with self.assertRaisesRegex(ValueError,
r"Memory map has been frozen. Cannot add resource 'a'"):
memory_map.add_resource("a", name="foo", size=3, extend=True)
def test_memory_map_wrong(self):
with self.assertRaisesRegex(TypeError,
r"Memory map must be an instance of MemoryMap, not 'foo'"):
info = PeripheralInfo(memory_map="foo")
def test_irq(self):
memory_map = MemoryMap(addr_width=1, data_width=8)
irq = event.Source()
info = PeripheralInfo(memory_map=memory_map, irq=irq)
self.assertIs(info.irq, irq)
def test_irq_none(self):
memory_map = MemoryMap(addr_width=1, data_width=8)
info = PeripheralInfo(memory_map=memory_map, irq=None)
with self.assertRaisesRegex(NotImplementedError,
r"Peripheral info does not have an IRQ line"):
info.irq
def test_irq_default(self):
memory_map = MemoryMap(addr_width=1, data_width=8)
info = PeripheralInfo(memory_map=memory_map)
with self.assertRaisesRegex(NotImplementedError,
r"Peripheral info does not have an IRQ line"):
info.irq
def test_irq_wrong(self):
memory_map = MemoryMap(addr_width=1, data_width=8)
with self.assertRaisesRegex(TypeError,
r"IRQ line must be an instance of event.Source, not 'foo'"):
info = PeripheralInfo(memory_map=memory_map, irq="foo")
def test_constant_map(self):
constant_map = ConstantMap()
memory_map = MemoryMap(addr_width=1, data_width=8)
info = PeripheralInfo(memory_map=memory_map, constant_map=constant_map)
self.assertIs(info.constant_map, constant_map)
def test_constant_map_none(self):
memory_map = MemoryMap(addr_width=1, data_width=8)
info = PeripheralInfo(memory_map=memory_map, constant_map=None)
self.assertIsInstance(info.constant_map, ConstantMap)
self.assertEqual(info.constant_map, {})
def test_constant_map_default(self):
memory_map = MemoryMap(addr_width=1, data_width=8)
info = PeripheralInfo(memory_map=memory_map)
self.assertIsInstance(info.constant_map, ConstantMap)
self.assertEqual(info.constant_map, {})
def test_constant_map_wrong(self):
memory_map = MemoryMap(addr_width=1, data_width=8)
with self.assertRaisesRegex(TypeError,
r"Constant map must be an instance of ConstantMap, not 'foo'"):
info = PeripheralInfo(memory_map=memory_map, constant_map="foo")
```
#### File: amaranth_soc/test/test_wishbone_bus.py
```python
import unittest
from amaranth import *
from amaranth.hdl.rec import *
from amaranth.back.pysim import *
from ..wishbone.bus import *
from ..memory import MemoryMap
class InterfaceTestCase(unittest.TestCase):
def test_simple(self):
iface = Interface(addr_width=32, data_width=8)
self.assertEqual(iface.addr_width, 32)
self.assertEqual(iface.data_width, 8)
self.assertEqual(iface.granularity, 8)
self.assertEqual(iface.layout, Layout.cast([
("adr", 32, DIR_FANOUT),
("dat_w", 8, DIR_FANOUT),
("dat_r", 8, DIR_FANIN),
("sel", 1, DIR_FANOUT),
("cyc", 1, DIR_FANOUT),
("stb", 1, DIR_FANOUT),
("we", 1, DIR_FANOUT),
("ack", 1, DIR_FANIN),
]))
def test_granularity(self):
iface = Interface(addr_width=30, data_width=32, granularity=8)
self.assertEqual(iface.addr_width, 30)
self.assertEqual(iface.data_width, 32)
self.assertEqual(iface.granularity, 8)
self.assertEqual(iface.layout, Layout.cast([
("adr", 30, DIR_FANOUT),
("dat_w", 32, DIR_FANOUT),
("dat_r", 32, DIR_FANIN),
("sel", 4, DIR_FANOUT),
("cyc", 1, DIR_FANOUT),
("stb", 1, DIR_FANOUT),
("we", 1, DIR_FANOUT),
("ack", 1, DIR_FANIN),
]))
def test_features(self):
iface = Interface(addr_width=32, data_width=32,
features={"rty", "err", "stall", "lock", "cti", "bte"})
self.assertEqual(iface.layout, Layout.cast([
("adr", 32, DIR_FANOUT),
("dat_w", 32, DIR_FANOUT),
("dat_r", 32, DIR_FANIN),
("sel", 1, DIR_FANOUT),
("cyc", 1, DIR_FANOUT),
("stb", 1, DIR_FANOUT),
("we", 1, DIR_FANOUT),
("ack", 1, DIR_FANIN),
("err", 1, DIR_FANIN),
("rty", 1, DIR_FANIN),
("stall", 1, DIR_FANIN),
("lock", 1, DIR_FANOUT),
("cti", CycleType, DIR_FANOUT),
("bte", BurstTypeExt, DIR_FANOUT),
]))
def test_wrong_addr_width(self):
with self.assertRaisesRegex(ValueError,
r"Address width must be a non-negative integer, not -1"):
Interface(addr_width=-1, data_width=8)
def test_wrong_data_width(self):
with self.assertRaisesRegex(ValueError,
r"Data width must be one of 8, 16, 32, 64, not 7"):
Interface(addr_width=0, data_width=7)
def test_wrong_granularity(self):
with self.assertRaisesRegex(ValueError,
r"Granularity must be one of 8, 16, 32, 64, not 7"):
Interface(addr_width=0, data_width=32, granularity=7)
def test_wrong_granularity_wide(self):
with self.assertRaisesRegex(ValueError,
r"Granularity 32 may not be greater than data width 8"):
Interface(addr_width=0, data_width=8, granularity=32)
def test_wrong_features(self):
with self.assertRaisesRegex(ValueError,
r"Optional signal\(s\) 'foo' are not supported"):
Interface(addr_width=0, data_width=8, features={"foo"})
def test_get_map_wrong(self):
iface = Interface(addr_width=0, data_width=8)
with self.assertRaisesRegex(NotImplementedError,
r"Bus interface \(rec iface adr dat_w dat_r sel cyc stb we ack\) does "
r"not have a memory map"):
iface.memory_map
def test_get_map_frozen(self):
iface = Interface(addr_width=1, data_width=8)
iface.memory_map = MemoryMap(addr_width=1, data_width=8)
with self.assertRaisesRegex(ValueError,
r"Memory map has been frozen\. Address width cannot be extended "
r"further"):
iface.memory_map.addr_width = 2
def test_set_map_wrong(self):
iface = Interface(addr_width=0, data_width=8)
with self.assertRaisesRegex(TypeError,
r"Memory map must be an instance of MemoryMap, not 'foo'"):
iface.memory_map = "foo"
def test_set_map_wrong_data_width(self):
iface = Interface(addr_width=30, data_width=32, granularity=8)
with self.assertRaisesRegex(ValueError,
r"Memory map has data width 32, which is not the same as bus "
r"interface granularity 8"):
iface.memory_map = MemoryMap(addr_width=32, data_width=32)
def test_set_map_wrong_addr_width(self):
iface = Interface(addr_width=30, data_width=32, granularity=8)
with self.assertRaisesRegex(ValueError,
r"Memory map has address width 30, which is not the same as bus "
r"interface address width 32 \(30 address bits \+ 2 granularity bits\)"):
iface.memory_map = MemoryMap(addr_width=30, data_width=8)
class DecoderTestCase(unittest.TestCase):
def setUp(self):
self.dut = Decoder(addr_width=31, data_width=32, granularity=16)
def test_add_align_to(self):
sub_1 = Interface(addr_width=15, data_width=32, granularity=16)
sub_1.memory_map = MemoryMap(addr_width=16, data_width=16)
sub_2 = Interface(addr_width=15, data_width=32, granularity=16)
sub_2.memory_map = MemoryMap(addr_width=16, data_width=16)
self.assertEqual(self.dut.add(sub_1), (0x00000000, 0x00010000, 1))
self.assertEqual(self.dut.align_to(18), 0x000040000)
self.assertEqual(self.dut.align_to(alignment=18), 0x000040000)
self.assertEqual(self.dut.add(sub_2), (0x00040000, 0x00050000, 1))
def test_add_extend(self):
sub = Interface(addr_width=31, data_width=32, granularity=16)
sub.memory_map = MemoryMap(addr_width=32, data_width=16)
self.assertEqual(self.dut.add(sub, addr=1, extend=True), (1, 0x100000001, 1))
self.assertEqual(self.dut.bus.addr_width, 32)
def test_add_wrong(self):
with self.assertRaisesRegex(TypeError,
r"Subordinate bus must be an instance of wishbone\.Interface, not 'foo'"):
self.dut.add(sub_bus="foo")
def test_add_wrong_granularity(self):
with self.assertRaisesRegex(ValueError,
r"Subordinate bus has granularity 32, which is greater than "
r"the decoder granularity 16"):
self.dut.add(Interface(addr_width=15, data_width=32, granularity=32))
def test_add_wrong_width_dense(self):
with self.assertRaisesRegex(ValueError,
r"Subordinate bus has data width 16, which is not the same as decoder "
r"data width 32 \(required for dense address translation\)"):
self.dut.add(Interface(addr_width=15, data_width=16, granularity=16))
def test_add_wrong_granularity_sparse(self):
with self.assertRaisesRegex(ValueError,
r"Subordinate bus has data width 64, which is not the same as subordinate "
r"bus granularity 16 \(required for sparse address translation\)"):
self.dut.add(Interface(addr_width=15, data_width=64, granularity=16), sparse=True)
def test_add_wrong_optional_output(self):
with self.assertRaisesRegex(ValueError,
r"Subordinate bus has optional output 'err', but the decoder does "
r"not have a corresponding input"):
self.dut.add(Interface(addr_width=15, data_width=32, granularity=16, features={"err"}))
def test_add_wrong_out_of_bounds(self):
sub = Interface(addr_width=31, data_width=32, granularity=16)
sub.memory_map = MemoryMap(addr_width=32, data_width=16)
with self.assertRaisesRegex(ValueError,
r"Address range 0x1\.\.0x100000001 out of bounds for memory map spanning "
r"range 0x0\.\.0x100000000 \(32 address bits\)"):
self.dut.add(sub, addr=1)
class DecoderSimulationTestCase(unittest.TestCase):
def test_simple(self):
dut = Decoder(addr_width=30, data_width=32, granularity=8,
features={"err", "rty", "stall", "lock", "cti", "bte"})
sub_1 = Interface(addr_width=14, data_width=32, granularity=8)
sub_1.memory_map = MemoryMap(addr_width=16, data_width=8)
dut.add(sub_1, addr=0x10000)
sub_2 = Interface(addr_width=14, data_width=32, granularity=8,
features={"err", "rty", "stall", "lock", "cti", "bte"})
sub_2.memory_map = MemoryMap(addr_width=16, data_width=8)
dut.add(sub_2)
def sim_test():
yield dut.bus.adr.eq(0x10400 >> 2)
yield dut.bus.cyc.eq(1)
yield dut.bus.stb.eq(1)
yield dut.bus.sel.eq(0b11)
yield dut.bus.dat_w.eq(0x12345678)
yield dut.bus.lock.eq(1)
yield dut.bus.cti.eq(CycleType.INCR_BURST)
yield dut.bus.bte.eq(BurstTypeExt.WRAP_4)
yield sub_1.ack.eq(1)
yield sub_1.dat_r.eq(0xabcdef01)
yield sub_2.dat_r.eq(0x5678abcd)
yield Delay(1e-6)
self.assertEqual((yield sub_1.adr), 0x400 >> 2)
self.assertEqual((yield sub_1.cyc), 1)
self.assertEqual((yield sub_2.cyc), 0)
self.assertEqual((yield sub_1.stb), 1)
self.assertEqual((yield sub_1.sel), 0b11)
self.assertEqual((yield sub_1.dat_w), 0x12345678)
self.assertEqual((yield dut.bus.ack), 1)
self.assertEqual((yield dut.bus.err), 0)
self.assertEqual((yield dut.bus.rty), 0)
self.assertEqual((yield dut.bus.dat_r), 0xabcdef01)
yield dut.bus.adr.eq(0x20400 >> 2)
yield sub_1.ack.eq(0)
yield sub_2.err.eq(1)
yield sub_2.rty.eq(1)
yield sub_2.stall.eq(1)
yield Delay(1e-6)
self.assertEqual((yield sub_2.adr), 0x400 >> 2)
self.assertEqual((yield sub_1.cyc), 0)
self.assertEqual((yield sub_2.cyc), 1)
self.assertEqual((yield sub_1.stb), 1)
self.assertEqual((yield sub_1.sel), 0b11)
self.assertEqual((yield sub_1.dat_w), 0x12345678)
self.assertEqual((yield sub_2.lock), 1)
self.assertEqual((yield sub_2.cti), CycleType.INCR_BURST.value)
self.assertEqual((yield sub_2.bte), BurstTypeExt.WRAP_4.value)
self.assertEqual((yield dut.bus.ack), 0)
self.assertEqual((yield dut.bus.err), 1)
self.assertEqual((yield dut.bus.rty), 1)
self.assertEqual((yield dut.bus.stall), 1)
self.assertEqual((yield dut.bus.dat_r), 0x5678abcd)
sim = Simulator(dut)
sim.add_process(sim_test)
with sim.write_vcd(vcd_file=open("test.vcd", "w")):
sim.run()
def test_addr_translate(self):
class AddressLoopback(Elaboratable):
def __init__(self, **kwargs):
self.bus = Interface(**kwargs)
def elaborate(self, platform):
m = Module()
for index, sel_bit in enumerate(self.bus.sel):
with m.If(sel_bit):
segment = self.bus.dat_r.word_select(index, self.bus.granularity)
m.d.comb += segment.eq(self.bus.adr + index)
return m
dut = Decoder(addr_width=20, data_width=32, granularity=16)
loop_1 = AddressLoopback(addr_width=7, data_width=32, granularity=16)
loop_1.bus.memory_map = MemoryMap(addr_width=8, data_width=16)
self.assertEqual(dut.add(loop_1.bus, addr=0x10000),
(0x10000, 0x10100, 1))
loop_2 = AddressLoopback(addr_width=6, data_width=32, granularity=8)
loop_2.bus.memory_map = MemoryMap(addr_width=8, data_width=8)
self.assertEqual(dut.add(loop_2.bus, addr=0x20000),
(0x20000, 0x20080, 2))
loop_3 = AddressLoopback(addr_width=8, data_width=16, granularity=16)
loop_3.bus.memory_map = MemoryMap(addr_width=8, data_width=16)
self.assertEqual(dut.add(loop_3.bus, addr=0x30000, sparse=True),
(0x30000, 0x30100, 1))
loop_4 = AddressLoopback(addr_width=8, data_width=8, granularity=8)
loop_4.bus.memory_map = MemoryMap(addr_width=8, data_width=8)
self.assertEqual(dut.add(loop_4.bus, addr=0x40000, sparse=True),
(0x40000, 0x40100, 1))
def sim_test():
yield dut.bus.cyc.eq(1)
yield dut.bus.adr.eq(0x10010 >> 1)
yield dut.bus.sel.eq(0b11)
yield Delay(1e-6)
self.assertEqual((yield dut.bus.dat_r), 0x00090008)
yield dut.bus.sel.eq(0b01)
yield Delay(1e-6)
self.assertEqual((yield dut.bus.dat_r), 0x00000008)
yield dut.bus.sel.eq(0b10)
yield Delay(1e-6)
self.assertEqual((yield dut.bus.dat_r), 0x00090000)
yield dut.bus.adr.eq(0x20010 >> 1)
yield dut.bus.sel.eq(0b11)
yield Delay(1e-6)
self.assertEqual((yield dut.bus.dat_r), 0x13121110)
yield dut.bus.sel.eq(0b01)
yield Delay(1e-6)
self.assertEqual((yield dut.bus.dat_r), 0x00001110)
yield dut.bus.sel.eq(0b10)
yield Delay(1e-6)
self.assertEqual((yield dut.bus.dat_r), 0x13120000)
yield dut.bus.adr.eq(0x30010 >> 1)
yield dut.bus.sel.eq(0b11)
yield Delay(1e-6)
self.assertEqual((yield dut.bus.dat_r), 0x0008)
yield dut.bus.sel.eq(0b01)
yield Delay(1e-6)
self.assertEqual((yield dut.bus.dat_r), 0x0008)
yield dut.bus.sel.eq(0b10)
yield Delay(1e-6)
self.assertEqual((yield dut.bus.dat_r), 0x0000)
yield dut.bus.adr.eq(0x30012 >> 1)
yield dut.bus.sel.eq(0b11)
yield Delay(1e-6)
self.assertEqual((yield dut.bus.dat_r), 0x0009)
yield dut.bus.adr.eq(0x40010 >> 1)
yield dut.bus.sel.eq(0b11)
yield Delay(1e-6)
self.assertEqual((yield dut.bus.dat_r), 0x08)
yield dut.bus.sel.eq(0b01)
yield Delay(1e-6)
self.assertEqual((yield dut.bus.dat_r), 0x08)
yield dut.bus.sel.eq(0b10)
yield Delay(1e-6)
self.assertEqual((yield dut.bus.dat_r), 0x00)
yield dut.bus.adr.eq(0x40012 >> 1)
yield dut.bus.sel.eq(0b11)
yield Delay(1e-6)
self.assertEqual((yield dut.bus.dat_r), 0x09)
m = Module()
m.submodules += dut, loop_1, loop_2, loop_3, loop_4
sim = Simulator(m)
sim.add_process(sim_test)
with sim.write_vcd(vcd_file=open("test.vcd", "w")):
sim.run()
def test_coarse_granularity(self):
dut = Decoder(addr_width=3, data_width=32)
sub = Interface(addr_width=2, data_width=32)
sub.memory_map = MemoryMap(addr_width=2, data_width=32)
dut.add(sub)
def sim_test():
yield dut.bus.cyc.eq(1)
yield dut.bus.adr.eq(0x0)
yield Delay(1e-6)
self.assertEqual((yield sub.cyc), 1)
yield dut.bus.adr.eq(0x4)
yield Delay(1e-6)
self.assertEqual((yield sub.cyc), 0)
sim = Simulator(dut)
sim.add_process(sim_test)
with sim.write_vcd(vcd_file=open("test.vcd", "w")):
sim.run()
class ArbiterTestCase(unittest.TestCase):
def setUp(self):
self.dut = Arbiter(addr_width=31, data_width=32, granularity=16,
features={"err"})
def test_add_wrong(self):
with self.assertRaisesRegex(TypeError,
r"Initiator bus must be an instance of wishbone\.Interface, not 'foo'"):
self.dut.add(intr_bus="foo")
def test_add_wrong_addr_width(self):
with self.assertRaisesRegex(ValueError,
r"Initiator bus has address width 15, which is not the same as arbiter "
r"address width 31"):
self.dut.add(Interface(addr_width=15, data_width=32, granularity=16))
def test_add_wrong_granularity(self):
with self.assertRaisesRegex(ValueError,
r"Initiator bus has granularity 8, which is lesser than "
r"the arbiter granularity 16"):
self.dut.add(Interface(addr_width=31, data_width=32, granularity=8))
def test_add_wrong_data_width(self):
with self.assertRaisesRegex(ValueError,
r"Initiator bus has data width 16, which is not the same as arbiter "
r"data width 32"):
self.dut.add(Interface(addr_width=31, data_width=16, granularity=16))
def test_add_wrong_optional_output(self):
with self.assertRaisesRegex(ValueError,
r"Arbiter has optional output 'err', but the initiator bus does "
r"not have a corresponding input"):
self.dut.add(Interface(addr_width=31, data_width=32, granularity=16))
class ArbiterSimulationTestCase(unittest.TestCase):
def test_simple(self):
dut = Arbiter(addr_width=30, data_width=32, granularity=8,
features={"err", "rty", "stall", "lock", "cti", "bte"})
intr_1 = Interface(addr_width=30, data_width=32, granularity=8,
features={"err", "rty"})
dut.add(intr_1)
intr_2 = Interface(addr_width=30, data_width=32, granularity=16,
features={"err", "rty", "stall", "lock", "cti", "bte"})
dut.add(intr_2)
def sim_test():
yield intr_1.adr.eq(0x7ffffffc >> 2)
yield intr_1.cyc.eq(1)
yield intr_1.stb.eq(1)
yield intr_1.sel.eq(0b1111)
yield intr_1.we.eq(1)
yield intr_1.dat_w.eq(0x12345678)
yield dut.bus.dat_r.eq(0xabcdef01)
yield dut.bus.ack.eq(1)
yield dut.bus.err.eq(1)
yield dut.bus.rty.eq(1)
yield Delay(1e-7)
self.assertEqual((yield dut.bus.adr), 0x7ffffffc >> 2)
self.assertEqual((yield dut.bus.cyc), 1)
self.assertEqual((yield dut.bus.stb), 1)
self.assertEqual((yield dut.bus.sel), 0b1111)
self.assertEqual((yield dut.bus.we), 1)
self.assertEqual((yield dut.bus.dat_w), 0x12345678)
self.assertEqual((yield dut.bus.lock), 0)
self.assertEqual((yield dut.bus.cti), CycleType.CLASSIC.value)
self.assertEqual((yield dut.bus.bte), BurstTypeExt.LINEAR.value)
self.assertEqual((yield intr_1.dat_r), 0xabcdef01)
self.assertEqual((yield intr_1.ack), 1)
self.assertEqual((yield intr_1.err), 1)
self.assertEqual((yield intr_1.rty), 1)
yield intr_1.cyc.eq(0)
yield intr_2.adr.eq(0xe0000000 >> 2)
yield intr_2.cyc.eq(1)
yield intr_2.stb.eq(1)
yield intr_2.sel.eq(0b10)
yield intr_2.we.eq(1)
yield intr_2.dat_w.eq(0x43218765)
yield intr_2.lock.eq(0)
yield intr_2.cti.eq(CycleType.INCR_BURST)
yield intr_2.bte.eq(BurstTypeExt.WRAP_4)
yield Tick()
yield dut.bus.stall.eq(0)
yield Delay(1e-7)
self.assertEqual((yield dut.bus.adr), 0xe0000000 >> 2)
self.assertEqual((yield dut.bus.cyc), 1)
self.assertEqual((yield dut.bus.stb), 1)
self.assertEqual((yield dut.bus.sel), 0b1100)
self.assertEqual((yield dut.bus.we), 1)
self.assertEqual((yield dut.bus.dat_w), 0x43218765)
self.assertEqual((yield dut.bus.lock), 0)
self.assertEqual((yield dut.bus.cti), CycleType.INCR_BURST.value)
self.assertEqual((yield dut.bus.bte), BurstTypeExt.WRAP_4.value)
self.assertEqual((yield intr_2.dat_r), 0xabcdef01)
self.assertEqual((yield intr_2.ack), 1)
self.assertEqual((yield intr_2.err), 1)
self.assertEqual((yield intr_2.rty), 1)
self.assertEqual((yield intr_2.stall), 0)
sim = Simulator(dut)
sim.add_clock(1e-6)
sim.add_sync_process(sim_test)
with sim.write_vcd(vcd_file=open("test.vcd", "w")):
sim.run()
def test_lock(self):
dut = Arbiter(addr_width=30, data_width=32, features={"lock"})
intr_1 = Interface(addr_width=30, data_width=32, features={"lock"})
dut.add(intr_1)
intr_2 = Interface(addr_width=30, data_width=32, features={"lock"})
dut.add(intr_2)
def sim_test():
yield intr_1.cyc.eq(1)
yield intr_1.lock.eq(1)
yield intr_2.cyc.eq(1)
yield dut.bus.ack.eq(1)
yield Delay(1e-7)
self.assertEqual((yield intr_1.ack), 1)
self.assertEqual((yield intr_2.ack), 0)
yield Tick()
yield Delay(1e-7)
self.assertEqual((yield intr_1.ack), 1)
self.assertEqual((yield intr_2.ack), 0)
yield intr_1.lock.eq(0)
yield Tick()
yield Delay(1e-7)
self.assertEqual((yield intr_1.ack), 0)
self.assertEqual((yield intr_2.ack), 1)
yield intr_2.cyc.eq(0)
yield Tick()
yield Delay(1e-7)
self.assertEqual((yield intr_1.ack), 1)
self.assertEqual((yield intr_2.ack), 0)
yield intr_1.stb.eq(1)
yield Tick()
yield Delay(1e-7)
self.assertEqual((yield intr_1.ack), 1)
self.assertEqual((yield intr_2.ack), 0)
yield intr_1.stb.eq(0)
yield intr_2.cyc.eq(1)
yield Tick()
yield Delay(1e-7)
self.assertEqual((yield intr_1.ack), 0)
self.assertEqual((yield intr_2.ack), 1)
sim = Simulator(dut)
sim.add_clock(1e-6)
sim.add_sync_process(sim_test)
with sim.write_vcd(vcd_file=open("test.vcd", "w")):
sim.run()
def test_stall(self):
dut = Arbiter(addr_width=30, data_width=32, features={"stall"})
intr_1 = Interface(addr_width=30, data_width=32, features={"stall"})
dut.add(intr_1)
intr_2 = Interface(addr_width=30, data_width=32, features={"stall"})
dut.add(intr_2)
def sim_test():
yield intr_1.cyc.eq(1)
yield intr_2.cyc.eq(1)
yield dut.bus.stall.eq(0)
yield Delay(1e-6)
self.assertEqual((yield intr_1.stall), 0)
self.assertEqual((yield intr_2.stall), 1)
yield dut.bus.stall.eq(1)
yield Delay(1e-6)
self.assertEqual((yield intr_1.stall), 1)
self.assertEqual((yield intr_2.stall), 1)
sim = Simulator(dut)
sim.add_process(sim_test)
with sim.write_vcd(vcd_file=open("test.vcd", "w")):
sim.run()
def test_stall_compat(self):
dut = Arbiter(addr_width=30, data_width=32)
intr_1 = Interface(addr_width=30, data_width=32, features={"stall"})
dut.add(intr_1)
intr_2 = Interface(addr_width=30, data_width=32, features={"stall"})
dut.add(intr_2)
def sim_test():
yield intr_1.cyc.eq(1)
yield intr_2.cyc.eq(1)
yield Delay(1e-6)
self.assertEqual((yield intr_1.stall), 1)
self.assertEqual((yield intr_2.stall), 1)
yield dut.bus.ack.eq(1)
yield Delay(1e-6)
self.assertEqual((yield intr_1.stall), 0)
self.assertEqual((yield intr_2.stall), 1)
sim = Simulator(dut)
sim.add_process(sim_test)
with sim.write_vcd(vcd_file=open("test.vcd", "w")):
sim.run()
def test_roundrobin(self):
dut = Arbiter(addr_width=30, data_width=32)
intr_1 = Interface(addr_width=30, data_width=32)
dut.add(intr_1)
intr_2 = Interface(addr_width=30, data_width=32)
dut.add(intr_2)
intr_3 = Interface(addr_width=30, data_width=32)
dut.add(intr_3)
def sim_test():
yield intr_1.cyc.eq(1)
yield intr_2.cyc.eq(0)
yield intr_3.cyc.eq(1)
yield dut.bus.ack.eq(1)
yield Delay(1e-7)
self.assertEqual((yield intr_1.ack), 1)
self.assertEqual((yield intr_2.ack), 0)
self.assertEqual((yield intr_3.ack), 0)
yield intr_1.cyc.eq(0)
yield intr_2.cyc.eq(0)
yield intr_3.cyc.eq(1)
yield Tick()
yield Delay(1e-7)
self.assertEqual((yield intr_1.ack), 0)
self.assertEqual((yield intr_2.ack), 0)
self.assertEqual((yield intr_3.ack), 1)
yield intr_1.cyc.eq(1)
yield intr_2.cyc.eq(1)
yield intr_3.cyc.eq(0)
yield Tick()
yield Delay(1e-7)
self.assertEqual((yield intr_1.ack), 1)
self.assertEqual((yield intr_2.ack), 0)
self.assertEqual((yield intr_3.ack), 0)
yield intr_1.cyc.eq(0)
yield intr_2.cyc.eq(1)
yield intr_3.cyc.eq(1)
yield Tick()
yield Delay(1e-7)
self.assertEqual((yield intr_1.ack), 0)
self.assertEqual((yield intr_2.ack), 1)
self.assertEqual((yield intr_3.ack), 0)
yield intr_1.cyc.eq(1)
yield intr_2.cyc.eq(0)
yield intr_3.cyc.eq(1)
yield Tick()
yield Delay(1e-7)
self.assertEqual((yield intr_1.ack), 0)
self.assertEqual((yield intr_2.ack), 0)
self.assertEqual((yield intr_3.ack), 1)
sim = Simulator(dut)
sim.add_clock(1e-6)
sim.add_sync_process(sim_test)
with sim.write_vcd(vcd_file=open("test.vcd", "w")):
sim.run()
```
|
{
"source": "jfnielsen/AutoDiffPulses",
"score": 2
}
|
#### File: AutoDiffPulses/adpulses/metrics.py
```python
from typing import Optional
from torch import Tensor
def err_null(Mr_: Tensor, Md_: Tensor, w_: Optional[Tensor] = None) -> Tensor:
"""
*INPUTS*
- `Mr_` (1, nM, xyz)
- `Md_` (1, nM, xyz)
*OPTIONALS*
- `w_` (1, nM)
*OUTPUTS*
- `err` (1,)
"""
return Mr_.new_zeros([])
def err_l2z(Mr_: Tensor, Md_: Tensor, w_: Optional[Tensor] = None) -> Tensor:
"""
*INPUTS*
- `Mr_` (1, nM, xyz)
- `Md_` (1, nM, xyz)
*OPTIONALS*
- `w_` (1, nM)
*OUTPUTS*
- `err` (1,)
"""
Me_ = (Mr_[..., 2] - Md_[..., 2]) # (1, nM)
err = (Me_ if w_ is None else Me_*w_).norm()**2
return err
def err_l2xy(Mr_: Tensor, Md_: Tensor, w_: Optional[Tensor] = None) -> Tensor:
"""
*INPUTS*
- `Mr_` (1, nM, xyz)
- `Md_` (1, nM, xyz)
*OPTIONALS*
- `w_` (1, nM)
*OUTPUTS*
- `err` (1,)
"""
Me_ = (Mr_[..., :2] - Md_[..., :2])
err = (Me_ if w_ is None else Me_*w_[..., None]).norm()**2
return err
# hijacking ml2xy for prephasing problem
def err_ml2xy(Mr_: Tensor, Md_: Tensor, w_: Optional[Tensor] = None) -> Tensor:
"""
*INPUTS*
- `Mr_` (1, nM, xyz)
- `Md_` (1, nM, xyz)
*OPTIONALS*
- `w_` (1, nM)
*OUTPUTS*
- `err` (1,)
"""
lam1 = 10.0 # 12/6/21: 1.0
lam2 = 1.0 # 12/6/21: 2.0
Me_ = Mr_[..., :2].norm(dim=-1) - Md_[..., :2].norm(dim=-1)
errmag = (Me_ if w_ is None else Me_*w_).norm()**2
Me_ = (Mr_[..., :2] - Md_[..., :2])
errcplx = (Me_ if w_ is None else Me_*w_[..., None]).norm()**2
err = lam1 * errmag + lam2 * errcplx
return err
```
#### File: +adpulses/+opt/parctanAD.py
```python
import torch
import adpulses
from adpulses import io, optimizers, metrics, penalties
if __name__ == "__main__":
import sys
if len(sys.argv) <= 1: # mode DEBUG
import os
os.chdir(os.path.dirname(os.path.abspath(__file__)))
m2pName = ('m2p.mat' if len(sys.argv) <= 1 else sys.argv[1])
p2mName = ('p2m.mat' if len(sys.argv) <= 2 else sys.argv[2])
gpuID = ('0' if len(sys.argv) <= 3 else sys.argv[3])
# %% load
if gpuID == '-1':
dkw = {'device': torch.device('cpu'), 'dtype': torch.float32}
else:
dkw = {'device': torch.device('cuda:'+gpuID), 'dtype': torch.float32}
target, cube, pulse, arg = io.m2p(m2pName, **dkw)
def dflt_arg(k, v, fn):
return (fn(k) if ((k in arg.keys()) and (arg[k].size > 0)) else v)
arg['doRelax'] = dflt_arg('doRelax', True, lambda k: bool(arg[k].item()))
arg['b1Map_'] = dflt_arg('b1Map_', None,
lambda k: f_tensor(f_c2r_np(arg[k], -2)))
arg['niter'] = dflt_arg('niter', 10, lambda k: arg[k].item())
arg['niter_gr'] = dflt_arg('niter_gr', 2, lambda k: arg[k].item())
arg['niter_rf'] = dflt_arg('niter_rf', 2, lambda k: arg[k].item())
arg['nB'] = dflt_arg('nB', 100, lambda k: arg[k].item())
arg['isHead'] = dflt_arg('isHead', True, lambda k: bool(arg[k].item()))
eta = dflt_arg('eta', 4, lambda k: float(arg[k].item()))
print('eta: ', eta)
err_meth = dflt_arg('err_meth', 'l2xy', lambda k: arg[k].item())
pen_meth = dflt_arg('pen_meth', 'l2', lambda k: arg[k].item())
err_hash = {'null': metrics.err_null,
'l2xy': metrics.err_l2xy, 'ml2xy': metrics.err_ml2xy,
'l2z': metrics.err_l2z}
pen_hash = {'null': penalties.pen_null, 'l2': penalties.pen_l2}
fn_err, fn_pen = err_hash[err_meth], pen_hash[pen_meth]
# %% pulse design
kw = {k: arg[k] for k in ('b1Map_', 'niter', 'niter_gr', 'niter_rf', 'nB',
'isHead', 'doRelax')}
pulse, optInfos = optimizers.parctanLBFGS(target, cube, pulse,
fn_err, fn_pen, eta=eta, **kw)
# %% saving
io.p2m(p2mName, pulse, optInfos)
```
|
{
"source": "jfoerderer/lda-topic-modeling",
"score": 3
}
|
#### File: jfoerderer/lda-topic-modeling/description tm.py
```python
import csv
from stop_words import get_stop_words
from nltk.stem.porter import PorterStemmer
from gensim import corpora
import gensim
import os
import re
from nltk.tokenize import RegexpTokenizer
#SET PATH
path = r''
inputname=""
def remove_html_tags(text):
"""Remove html tags from a string"""
clean = re.compile('<.*?>')
return re.sub(clean, '', text)
#setup
tokenizer = RegexpTokenizer(r'\w+')
en_stop = get_stop_words('en')
p_stemmer = PorterStemmer()
fn = os.path.join(path, inputname)
doc_set = []
with open(fn, encoding="utf8" ) as f:
csv_f = csv.reader(f)
for i, row in enumerate(csv_f):
if i > 1 and len(row) > 1 :
temp=remove_html_tags(row[1])
temp = re.sub("[^a-zA-Z ]","", temp)
doc_set.append(temp)
texts = []
for i in doc_set:
if i.strip():
raw = i.lower()
tokens = tokenizer.tokenize(raw)
if len(tokens)>5:
stopped_tokens = [i for i in tokens if not i in en_stop]
texts.append(stopped_tokens)
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
lsi = gensim.models.lsimodel.LsiModel(corpus, id2word=dictionary, num_topics=5 )
print (lsi.print_topics(num_topics=3, num_words=3))
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=20, id2word = dictionary, passes=20)
print(ldamodel.print_topics(num_topics=20, num_words=5))
K = ldamodel.num_topics
topicWordProbMat = ldamodel.print_topics(K)
```
|
{
"source": "jfoley-yw/scrabble",
"score": 3
}
|
#### File: scrabble/scrabbler/ABStrategy.py
```python
from scrabbler.ABgamestate import ABGamestate
from scrabbler.strategy import Strategy
import copy
import math
from collections import defaultdict
class ABStrategy(Strategy):
""" A class that represents the AB pruning algorithm"""
def __init__(self, reduce_opponent=True):
self.dictionary = None
self.reduce_opponent = reduce_opponent
self.search_depth = 2
self.nodes_visited = 0
self.total_nodes = []
def choose_move(self, game, rack, score_diff, opponent_rack, dictionary):
"""Chooses the best move based on the AB-pruning"""
self.nodes_visited = 0
self.dictionary = dictionary
board = copy.deepcopy(game.board)
gamestate = ABGamestate(board, rack, opponent_rack, 0, score_diff, [])
value, move = self.minimax(gamestate, self.search_depth, False, -math.inf, math.inf)
self.total_nodes.append(self.nodes_visited)
return move
def minimax(self, cur_state, max_depth, is_player_minimizer, alpha, beta):
""" recursively iterates through the game tree"""
self.nodes_visited += 1
best_move = None
first_move = None
if max_depth == 0 or cur_state.is_end_state():
return cur_state.score_diff, cur_state.moves[0]
moves = cur_state.find_next_moves(self.dictionary)
if len(moves) == 0:
return cur_state.score_diff, None
if is_player_minimizer:
value = math.inf
if self.reduce_opponent:
min_moves = [moves[0]]
else:
min_moves = moves
for move in min_moves:
# get the next rack for the minimizing player
next_rack = cur_state.get_next_rack(move)
# update the board with the new move, update score differential, and get new gamestate
next_board = cur_state.get_next_board(move, self.dictionary)
game_over = (len(next_rack) == 0)
leaf = max_depth - 1 == 0
next_score_diff = cur_state.new_score_diff(move.score, game_over, leaf, next_rack)
moves_to_get_here = cur_state.update_moves(move)
# create the next state
next_state = ABGamestate(next_board, cur_state.player_rack, next_rack, is_player_minimizer=False, score_diff=next_score_diff, moves=moves_to_get_here)
evaluation, first_move = self.minimax(next_state, max_depth - 1, False, alpha , beta)
value = min(value, evaluation)
beta = min(beta, evaluation)
if beta <= alpha:
break
return value, first_move
if not is_player_minimizer:
value = -math.inf
for move in moves:
# get the next rack for the minimizing player
next_rack = cur_state.get_next_rack(move)
# update the board with the new move
next_board = cur_state.get_next_board(move, self.dictionary)
# update the score diff
game_over = (len(next_rack) == 0)
leaf = max_depth - 1 == 0
next_score_diff = cur_state.new_score_diff(move.score, game_over, leaf, next_rack)
# update the moves to get to the gamestate
moves_to_get_here = cur_state.update_moves(move)
# create the next state
next_state = ABGamestate(next_board, next_rack, cur_state.opponent_rack, is_player_minimizer=True, score_diff=next_score_diff, moves=moves_to_get_here)
evaluation, moves_to_here = self.minimax(next_state, max_depth - 1, True, alpha, beta)
value = max(value, evaluation)
if max(value, evaluation) is evaluation:
value = evaluation
best_move = moves_to_here
alpha = max(alpha, evaluation)
if beta <= alpha:
break
return value, best_move
```
#### File: scrabble/scrabbler/MCTSgamestate.py
```python
class MCTSgamestate:
def __init__(self, board_state, rack_state, is_my_turn, score_differential):
# board_state is a tuple of size rows * columns where each entry is the tile on the board
# or None if there is no tile.
self.board_state = board_state
# rack_state is a tuple of size 27 where each entry is number of corresponding letters
# e.g. "ABBEF?" would be (1,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1)
self.rack_state = rack_state
# True if it's my turn, False otherwise
self.is_my_turn = is_my_turn
# my score minus other player's score
self.score_differential = score_differential
def __members(self):
return self.board_state, self.rack_state, self.is_my_turn, self.score_differential
def __eq__(self, other):
if type(other) is type(self):
return self.__members() == other.__members()
else:
return False
def __hash__(self):
return hash(self.__members())
```
#### File: scrabble/scrabbler/ShortMonteCarloSimStrategy.py
```python
import copy
import random
import math
from scrabbler.strategy import Strategy
from collections import defaultdict
''' Represents an implementation of an algorithm based on the Maven and Scrabble bots for the
Scrabble midgame. It utilizes a static evaluation function with several heuristics, simulation
2-ply deep, and pruning of moves based on standard deviation of each move's average simualted
future-game score differentials.
'''
class ShortMonteCarloSimStrategy(Strategy):
# Relative letter utilities calculated based off of simulating thousands of games. Once a letter
# was placed in a game, the future score differential was tracked and used to calculate utilities,
# as described in Maven implementation.
ESTIMATED_LETTER_UTILITY = {"A": 15.75, "B": 17.41, "C": 16.64, "D": 15.54, "E": 15.10, "F": 17.22,
"G": 15.66, "H": 15.81, "I": 15.92, "J": 21.71, "K": 17.95, "L": 13.50,
"M": 17.20, "N": 15.17, "O": 16.76, "P": 18.26, "Q": 20.26, "R": 13.28,
"S": 13.74, "T": 15.38, "U": 16.40, "V": 16.39, "W": 16.66, "X": 21.08,
"Y": 16.29, "Z": 23.14}
# Used in calculation of the heuristic for ratio of vowels to consonants
VOWELS = ["A", "E", "I", "O", "U", "Y"]
LETTERS = ("AAAAAB"
"BCDEEE"
"EEE"
"FGGHIIII"
"IIJKL"
"LMNNO"
"OOOPQ"
"RRSS"
"TTUU"
"VWXYZ")
RACK_SIZE = 7
''' Initialize strategy with the number of rollouts to do, the exploration weight, map of
states->child states, map of tiles->value, and an empty list of the current best moves.
'''
def __init__(self, num_rollouts = 1000, exploration_weight=1):
self.num_rollouts = num_rollouts
self.next_states = {}
self.dictionary = None
self.tiles = {'A': 1, 'B': 3, 'C': 3, 'D': 2, 'E': 1, 'F': 4, 'G': 2, 'H': 4, 'I': 1, 'J': 8, 'K': 5, 'L': 1,
'M': 3, 'N': 1, 'O': 1, 'P': 3, 'Q': 10, 'R': 1, 'S': 1, 'T': 1, 'U': 1, 'V': 4, 'W': 4, 'X': 8,
'Y': 4, 'Z': 10}
self.num_best_moves = 0
self.best_moves = []
''' Choose the estimated best move by using a static evaluation function and performing simulations.
'''
def choose_move(self, game, rack, current_score_differential, other_rack, dictionary):
self.board = copy.deepcopy(game.board)
print('Starting Rollouts: ')
self.dictionary = dictionary
self.my_rack = copy.copy(rack)
self.move_to_score_differential = {}
self.move_to_visits = defaultdict(int)
moves = game.find_valid_moves(self.my_rack)
if not moves:
return None
num_moves = min(len(moves), 30)
self.best_moves = self.static_evaluation(moves[0:num_moves], self.my_rack)
return self.do_rollouts(game)
''' Determine the best moves from all possible moves based on several heuristics.
'''
def static_evaluation(self, moves, rack):
static_value = defaultdict(int)
highest_score = moves[0].score
for move in moves:
# Heuristic 1: Score
static_value[move] += move.score
# Heuristic 2: leftover rack value (leftover tiles values + potential tile values in bag)
leftover_rack = copy.deepcopy(rack)
start_square = move.start_square
start_row = start_square[0]
start_column = start_square[1]
if move.direction == "across":
for i in range(start_column, start_column + len(move.word)):
if self.board.square(start_row, i).tile is None:
leftover_rack.remove(move.word[i - start_column])
else:
for i in range(start_row, start_row + len(move.word)):
if self.board.square(i, start_column).tile is None:
leftover_rack.remove(move.word[i - start_row])
for char in leftover_rack:
static_value[move] += ShortMonteCarloSimStrategy.ESTIMATED_LETTER_UTILITY[char] * highest_score * 0.0003
static_value[move] += self.unseen_tiles_value(move, rack) * highest_score * 0.03
# Heuristic 3: value:consonants ratio closer to 1
vowels = 0
for tile in leftover_rack:
if tile in ShortMonteCarloSimStrategy.VOWELS:
vowels += 1
if vowels == 0 or vowels == len(leftover_rack):
ratio = 0
elif len(leftover_rack) < 2 * vowels:
ratio = vowels / (len(leftover_rack) - vowels)
else:
ratio = (len(leftover_rack) - vowels) / vowels
static_value[move] += math.sqrt(ratio) * highest_score * 0.004
# Heuristic 4: U-with-Q-unseen
if self.is_Q_in_unseen(move, rack) and "U" in leftover_rack:
static_value[move] += highest_score * 0.002
# Heuristic 5: First turn places fewer tiles
if self.board.empty:
static_value[move] += (1 / len(move.word)) * highest_score * 0.02
sorted_moves = sorted(moves, key= lambda move: static_value[move], reverse=True)
num_moves = min(12, len(moves))
return sorted_moves[0:num_moves]
''' Check to see if there might be a Q tile in the bag. It cannot be known for sure if a Q tile
is in the opponent's rack or in the bag, just that it has a chance of being in the bag.
'''
def is_Q_in_unseen(self, move, rack):
used_tiles = list(rack) + [square.tile for square in self.board.get_board()]
used_tiles_dict = defaultdict(int)
for tile in used_tiles:
used_tiles_dict[tile] += 1
all_tiles = defaultdict(int)
for tile in ShortMonteCarloSimStrategy.LETTERS:
all_tiles[tile] += 1
potential_letters = []
for tile in all_tiles:
for i in range(all_tiles[tile] - used_tiles_dict[tile]):
potential_letters.append(tile)
if "Q" in potential_letters:
return True
''' Calculate the average value of tiles that might be in the bag, for the purposes of estimating
how good a leftover rack might be.
'''
def unseen_tiles_value(self, move, rack):
used_tiles = list(rack) + [square.tile for square in self.board.get_board()]
used_tiles_dict = defaultdict(int)
for tile in used_tiles:
used_tiles_dict[tile] += 1
all_tiles = defaultdict(int)
for tile in ShortMonteCarloSimStrategy.LETTERS:
all_tiles[tile] += 1
potential_letters = []
for tile in all_tiles:
for i in range(all_tiles[tile] - used_tiles_dict[tile]):
potential_letters.append(tile)
unseen_value = 0
for tile in potential_letters:
unseen_value += ShortMonteCarloSimStrategy.ESTIMATED_LETTER_UTILITY[tile]
average_val = 0.01 * unseen_value / len(potential_letters)
return average_val * min(ShortMonteCarloSimStrategy.RACK_SIZE - len(move.word), len(potential_letters))
''' Simulate 2-ply into the future to estimate the average score differential assocaited with a
move. 17 iterations are performed for each move, then for subsequent iterations, any moves
with a score differential not within 2 standard deviations of the highest score differential is
pruned from the list of moves being considered. Simulations are carried out until one move
statistically distances itself from all other moves or until the allotted number of rollouts is
reached.
'''
def do_rollouts(self, game):
# after 17 iterations through each move, start pruning anything not within 2 standard deviations
rollouts_left = self.num_rollouts
moves_under_consideration = self.best_moves
if len(moves_under_consideration) == 1:
return moves_under_consideration[0]
is_after_17_rollouts_each = False
current_move_index = 0
for i in range(self.num_rollouts):
self.move_to_visits[moves_under_consideration[current_move_index]] += 1
if moves_under_consideration[current_move_index] not in self.move_to_score_differential:
self.move_to_score_differential[moves_under_consideration[current_move_index]] = 0
self.simulate(moves_under_consideration[current_move_index])
self.board = copy.deepcopy(game.board)
current_move_index = (current_move_index + 1) % len(moves_under_consideration)
if not is_after_17_rollouts_each and (i / len(moves_under_consideration)) >= 17:
is_after_17_rollouts_each = True
if current_move_index == 0 and is_after_17_rollouts_each:
moves_under_consideration = self.remove_poor_words(moves_under_consideration)
if len(moves_under_consideration) == 1:
return moves_under_consideration[0]
return max(moves_under_consideration, key = lambda move: self.move_to_score_differential[move] / self.move_to_visits[move])
''' Remove any words whose average future score differential is not within 2 standard deviations of
the highest future score differential.
'''
def remove_poor_words(self, moves):
new_moves = []
current_max = float("-inf")
total_points = 0
all_same_score = True
for move in moves:
if not (self.move_to_score_differential[moves[0]] == self.move_to_score_differential[move]):
all_same_score = False
break
if all_same_score:
return [moves[0]]
for move in moves:
total_points += self.move_to_score_differential[move]
# Calculate standard deviation
average = total_points / len(moves)
diff_summation = 0
for move in moves:
diff_summation += (self.move_to_score_differential[move] - average) ** 2
standard_deviation = math.sqrt(diff_summation / len(moves))
for move in moves:
current_max = max(current_max, self.move_to_score_differential[move])
for move in moves:
if current_max - self.move_to_score_differential[move] < (2 * standard_deviation):
new_moves.append(move)
return new_moves
''' Create a random rack for the opponent during simulation that could plausibly exist given
one's own rack and the tiles that have already been played on the board.
'''
def find_plausible_opponent_rack(self, my_rack):
used_tiles = list(my_rack) + [square.tile for square in self.board.get_board()]
used_tiles_dict = defaultdict(int)
for tile in used_tiles:
used_tiles_dict[tile] += 1
all_tiles = defaultdict(int)
for tile in ShortMonteCarloSimStrategy.LETTERS:
all_tiles[tile] += 1
potential_letters = []
for tile in all_tiles:
for i in range(all_tiles[tile] - used_tiles_dict[tile]):
potential_letters.append(tile)
opponent_rack = []
for i in range(min(ShortMonteCarloSimStrategy.RACK_SIZE, len(potential_letters))):
added_tile = random.choice(potential_letters)
opponent_rack += added_tile
potential_letters.remove(added_tile)
return opponent_rack
''' Refill agent's own rack after a move with tiles that are potentially in the bag based on
what tiles have already been played.
'''
def get_refilled_rack(self, my_rack):
used_tiles = list(my_rack) + [square.tile for square in self.board.get_board()]
used_tiles_dict = defaultdict(int)
for tile in used_tiles:
used_tiles_dict[tile] += 1
all_tiles = defaultdict(int)
for tile in ShortMonteCarloSimStrategy.LETTERS:
all_tiles[tile] += 1
potential_letters = []
for tile in all_tiles:
for i in range(all_tiles[tile] - used_tiles_dict[tile]):
potential_letters.append(tile)
next_rack = []
for i in range(min(ShortMonteCarloSimStrategy.RACK_SIZE, len(potential_letters))):
added_tile = random.choice(potential_letters)
next_rack += added_tile
potential_letters.remove(added_tile)
return next_rack
''' Start the simulation from a move to 2-ply forward in the game. The move is placed on the board,
the agent's rack is refilled, and we move on to the opponent's simualted move.
'''
def simulate(self, move):
self.current_initial_move = move
start_square = move.start_square
word = move.word
direction = move.direction
# Remove tiles from my rack and then refill
my_rack = copy.deepcopy(self.my_rack)
start_row = start_square[0]
start_column = start_square[1]
if move.direction == "across":
for i in range(start_column, start_column + len(move.word)):
if self.board.square(start_row, i).tile is None:
if move.word[i - start_column] not in my_rack:
my_rack.remove('?')
else:
my_rack.remove(move.word[i - start_column])
else:
for i in range(start_row, start_row + len(move.word)):
if self.board.square(i, start_column).tile is None:
if move.word[i - start_row] not in my_rack:
my_rack.remove('?')
else:
my_rack.remove(move.word[i - start_row])
# place move on board
self.board.place_word(start_square, word, direction)
# update affected cross sets
self.board.update_cross_set(start_square, direction, self.dictionary)
other_direction = "across" if direction == "down" else "down"
coordinate = start_square
for _ in word:
self.board.update_cross_set(coordinate, other_direction, self.dictionary)
coordinate = self.board.offset(coordinate, direction, 1)
self.move_to_score_differential[self.current_initial_move] += move.score
my_new_rack = self.get_refilled_rack(my_rack)
self.simulate_opponent_move(my_new_rack)
''' Simulate the opponent's move given that the agent has just placed a word on the board and
refilled their rack. '''
def simulate_opponent_move(self, my_rack):
# set up opponent's plausible next rack
opponent_rack = self.find_plausible_opponent_rack(my_rack)
across_moves = self.board.find_best_moves(opponent_rack, "across", self.dictionary, self.tiles)
down_moves = self.board.find_best_moves(opponent_rack, "down", self.dictionary, self.tiles)
moves = across_moves + down_moves
if not moves:
return
move = moves[0] # Not random simulation but logical move instead
start_square = move.start_square
word = move.word
direction = move.direction
# Remove used letters from opponent rack
start_row = start_square[0]
start_column = start_square[1]
if move.direction == "across":
for i in range(start_column, start_column + len(move.word)):
if self.board.square(start_row, i).tile is None:
if move.word[i - start_column] not in opponent_rack:
opponent_rack.remove('?')
else:
opponent_rack.remove(move.word[i - start_column])
else:
for i in range(start_row, start_row + len(move.word)):
if self.board.square(i, start_column).tile is None:
if move.word[i - start_row] not in opponent_rack:
opponent_rack.remove('?')
else:
opponent_rack.remove(move.word[i - start_row])
# place move on board
self.board.place_word(start_square, word, direction)
# update affected cross sets
self.board.update_cross_set(start_square, direction, self.dictionary)
other_direction = "across" if direction == "down" else "down"
coordinate = start_square
for _ in word:
self.board.update_cross_set(coordinate, other_direction, self.dictionary)
coordinate = self.board.offset(coordinate, direction, 1)
# Since opponent made move, subtract their word's score from our score differential
self.move_to_score_differential[self.current_initial_move] -= move.score
self.simulate_my_final_move(my_rack)
''' Simulate agent's final move and add move's score to score differential for the associated
original move.
'''
def simulate_my_final_move(self, my_rack):
across_moves = self.board.find_best_moves(my_rack, "across", self.dictionary, self.tiles)
down_moves = self.board.find_best_moves(my_rack, "down", self.dictionary, self.tiles)
moves = across_moves + down_moves
if not moves:
return
move = moves[0] # Not random simulation but logical move instead
self.move_to_score_differential[self.current_initial_move] += move.score
''' Return the best move from the current game state. The best move has the highest average
score differential from its simulations.
'''
def get_best_move(self):
return max(self.move_to_score_differential.keys(), key = lambda move: self.move_to_score_differential[move] / self.move_to_visits[move])
```
#### File: scrabble/scrabbler/strategy.py
```python
import random
class Strategy:
'''
Class representing a player's Strategy. The three algorithms should be a
subclass of this class and override the method choose move.
'''
def choose_move(self, game, rack, score_diff, opponents_rack, dictionary):
raise Exception('Method not defined!')
class BaselineStrategy(Strategy):
def choose_move(self, game, rack, score_diff, opponents_rack, dictionary):
""" method that chooses a move based on this strategy"""
# return the valid move with the highest score
valid_moves = game.find_valid_moves(rack)
if len(valid_moves) == 0:
return None
return valid_moves[0]
class RandomStrategy(Strategy):
""" Strategy Class that chooses a random move instead of the move resulting in the highest score """
def choose_move(self, game, rack, score_diff, opponents_rack, dictionary):
# return the valid move with the highest score
valid_moves = game.find_valid_moves(rack)
if len(valid_moves) == 0:
return None
return random.choice(valid_moves)
```
|
{
"source": "Jfool97/API",
"score": 3
}
|
#### File: login/CaptchaVtopBeta/parser.py
```python
from PIL import Image
import os
def CaptchaParse(img):
captcha=""
dirs=os.listdir("Chars")
img=img.convert('L')
pix=img.load()
for y in range(1,44):
for x in range(1,179):
if pix[x,y-1]==255 and pix[x,y]==0 and pix[x,y+1]==255:
pix[x,y]=255
if pix[x-1,y]==255 and pix[x,y]==0 and pix[x+1,y]==255:
pix[x,y]=255
if pix[x,y]!=255 and pix[x,y]!=0:
pix[x,y]=255
for j in range(30,181,30):
ch=img.crop((j-30,12,j,44))
pix1=ch.load()
matches={}
for i in dirs:
match=0
black=0
pixx=0
im2=Image.open("Chars/"+i)
im2=im2.convert('L')
pix2=im2.load()
for y in range(0,32):
for x in range(0,30):
## if pix1[x,y]==pix2[x,y] and pix2[x,y]==(0,0,0):
## match+=1
## if pix2[x,y]==(0,0,0):
## black+=1
## if pix1[x,y]==(0,0,0):
## pixx+=1
if pix1[x,y]==pix2[x,y] and pix2[x,y]==0:
match+=1
if pix2[x,y]==0:
black+=1
if pix1[x,y]==0:
pixx+=1
if float(match)/float(black)>=0.80:
perc=float(match)/float(black)
matches.update({perc:i[0].upper()})
try:
captcha+=matches[max(matches.keys())]
except ValueError:
captcha+="0"
## img.save("testcaptcha\\"+captcha+".png")
return captcha
##img=Image.open("2.png")
##print CaptchaParse(img)
```
|
{
"source": "jfoote/fuzzy-stack-hash-analysis",
"score": 3
}
|
#### File: jfoote/fuzzy-stack-hash-analysis/bugs.py
```python
import time, sys
from launchpadlib.launchpad import Launchpad
def has_stack_trace(bug):
'''
Returns True if this bug report has an Apport-style stack trace attachment.
Returns False otherwise.
'''
for attachment in bug.attachments:
if attachment.title == "Stacktrace.txt":
return True
return False
launchpad = Launchpad.login_anonymously('hello-world', 'production')
bfile = file("bugs.csv", "at")
project_names = file("projects.txt", "rt").read().split("\n")
total_projects = len(project_names)
i = 0
start_time = time.time()
# iterate over all projects and log Apport crash bug report info
for project_name in project_names:
try:
print project_name
pillar = launchpad.projects[project_name]
bugs = pillar.searchTasks(status=["Fix Committed", "Fix Released"])
# ^ note: omit_duplicates is set to True by default
for bug in bugs:
# verify bug report was produced by Apport, w/ stack trace
if not "crashed with" in bug.title.lower():
continue
bug_id_str = str(bug).split("/")[-1]
bug = launchpad.bugs[bug_id_str]
if not has_stack_trace(bug):
continue
# collect duplication info for bug
count = 1
if getattr(bug, "number_of_duplicates", False):
count += bug.number_of_duplicates
if bug.duplicate_of:
dupe_of = str(bug.duplicate_of).split("/")[-1]
else:
dupe_of = "None"
# log bug to csv file
bfile.write(",".join([str(pillar).split("/")[-1], str(pillar),
bug_id_str, str(bug), str(len(bugs)), str(count), dupe_of]) + "\n")
i += 1
print "%d/%d, %f sec remaining" % (i, total_projects,
(time.time()-start_time)/i * (total_projects-i))
sys.stdout.flush()
except Exception as e:
print "%s: %s" % (project_name, str(e))
print "done"
```
|
{
"source": "jfoote/trollbox",
"score": 2
}
|
#### File: jfoote/trollbox/trollbox.py
```python
import sys
from functools import partial
import PySide
from PySide.QtGui import *
from PySide.QtCore import Slot, Qt
from argparse import ArgumentParser
from trollbox.image_picker import ImagePicker
from trollbox.image_downloader import ImageDownloader
from trollbox.wordlogger import get_wordlogger
class MainWindow(QMainWindow):
def __init__(self, parent=None):
'''
Define widgets, their layout, and signal/slot interactions
'''
QMainWindow.__init__(self, parent)
self.resize(800,600)
centralWidget = QWidget(self)
self.setCentralWidget(centralWidget)
# Define widgets
self.tagEdit = QLineEdit(centralWidget)
self.tagEdit.setPlaceholderText("Current Image Tags")
self.urlEdit = QLineEdit(centralWidget)
self.urlEdit.setPlaceholderText("Current Image URL")
self.urlEdit.setDisabled(True)
saveButton = QPushButton("Save Selection", centralWidget)
deleteButton = QPushButton("Delete Selection", centralWidget)
self.searchEdit = QLineEdit(centralWidget)
self.searchEdit.setPlaceholderText("Image Tag Search")
self.liveCheckBox = ClearableCheckBox("WordLogger", centralWidget)
self.clearSearchButton = QPushButton("Clear Filter", centralWidget)
self.imagePicker = ImagePicker(centralWidget)
self.getUrlEdit = QLineEdit(centralWidget)
self.getUrlEdit.setPlaceholderText("Download Image URL")
self.getUrlButton = QPushButton("Download URL", centralWidget)
self.pasteUrlButton = QPushButton("Paste for Download", centralWidget)
# Set layout
layout = QGridLayout(centralWidget)
layout.addWidget(self.imagePicker, 0, 0, 3, 5)
layout.addWidget(QLabel("Selection:"), 5, 0)
layout.addWidget(self.urlEdit, 5, 1)
layout.addWidget(self.tagEdit, 5, 2)
layout.addWidget(saveButton, 5, 3)
layout.addWidget(deleteButton, 5, 4)
layout.addWidget(QLabel("Search:"), 6, 0)
layout.addWidget(self.searchEdit, 6, 1, 1, 2)
layout.addWidget(self.liveCheckBox, 6, 3, 1, 1)
layout.addWidget(self.clearSearchButton, 6, 4, 1, 1)
layout.addWidget(QLabel("Download:"), 7, 0)
layout.addWidget(self.getUrlEdit, 7, 1, 1, 2)
layout.addWidget(self.getUrlButton, 7, 3, 1, 1)
layout.addWidget(self.pasteUrlButton, 7, 4, 1, 1)
# Let user search by tag
self.searchEdit.textChanged.connect(self.imagePicker.setFilterTagsString)
self.clearSearchButton.clicked.connect(self.clearSearch)
# Reflect selection in tags and URL boxes
self.imagePicker.selectedTagsStringChanged.connect(self.tagEdit.setText)
self.imagePicker.selectedUrlChanged.connect(self.urlEdit.setText)
# Enabling saving changes of tags to model
saveButton.clicked.connect(self.saveTags)
self.tagEdit.returnPressed.connect(saveButton.clicked)
# Enable deleting selections from model
deleteButton.clicked.connect(self.imagePicker.deleteSelected)
self.imagePicker.preDelete.connect(self.tagEdit.clear)
self.imagePicker.preDelete.connect(self.urlEdit.clear)
self.imagePicker.preDelete.connect(self.showDeletedMessage)
# Set up image downloading
self.getUrlButton.clicked.connect(self.downloadImage)
self.pasteUrlButton.clicked.connect(self.pasteUrl)
# Enable wordlogging support
self.wordlogger = get_wordlogger()
if self.wordlogger:
self.liveCheckBox.stateChanged.connect(self.toggleWordLogging)
else:
self.liveCheckBox.setEnabled(False)
# Enable fast URL copying (this might be annoying)
self.imagePicker.clicked.connect(self.copyUrl)
self.statusBar().showMessage("Click an image to copy its URL")
def showDeletedMessage(self):
self.statusBar().showMessage("Deleted image")
def clearSearch(self):
self.searchEdit.setText("")
self.statusBar().showMessage("Cleared search")
def copyUrl(self):
if self.imagePicker.selectedIndexes():
url = self.urlEdit.text()
QApplication.clipboard().setText(url)
print "copied URL to clipboard:", url
self.statusBar().showMessage("Copied '%s'" % url)
def pasteUrl(self):
data = QApplication.clipboard().mimeData()
if data.hasText():
self.getUrlEdit.setText(data.text())
else:
self.statusBar().showMessage("Paste URL: No text on clipboard")
print "Paste URL: No text on clipboard"
self.statusBar().showMessage("Pasted URL")
def toggleWordLogging(self, new_state):
if (new_state == Qt.Checked) and not self.wordlogger.is_active():
# clicking into UI elements disables live search
self.searchEdit.textEdited.connect(self.liveCheckBox.clear)
self.urlEdit.textEdited.connect(self.liveCheckBox.clear)
self.tagEdit.textEdited.connect(self.liveCheckBox.clear)
self.getUrlEdit.textEdited.connect(self.liveCheckBox.clear)
self.getUrlButton.clicked.connect(self.liveCheckBox.clear)
self.pasteUrlButton.clicked.connect(self.liveCheckBox.clear)
self.imagePicker.clicked.connect(self.liveCheckBox.clear)
# start word logger thread
self.wordlogger.start()
self.wordlogger.wordEntered.connect(self.searchEdit.setText)
else:
# stop word logger thread
self.wordlogger.stop()
self.wordlogger.wordEntered.disconnect(self.searchEdit.setText)
# disable connections to UI elements
self.searchEdit.textEdited.disconnect(self.liveCheckBox.clear)
self.urlEdit.textEdited.disconnect(self.liveCheckBox.clear)
self.tagEdit.textEdited.disconnect(self.liveCheckBox.clear)
self.getUrlEdit.textEdited.disconnect(self.liveCheckBox.clear)
self.getUrlButton.clicked.disconnect(self.liveCheckBox.clear)
self.pasteUrlButton.clicked.disconnect(self.liveCheckBox.clear)
self.imagePicker.clicked.disconnect(self.liveCheckBox.clear)
self.statusBar().showMessage("Toggled word logger")
def downloadImage(self):
downloader = ImageDownloader(self)
downloader.failure.connect(self.showDownloadError)
downloader.success.connect(self.imagePicker.addImage)
downloader.success.connect(self.searchEdit.clear)
url = self.getUrlEdit.text()
downloader.get(url, self.imagePicker.getLocalFilepath(url))
self.statusBar().showMessage("Downloading %s" % url)
def showDownloadError(self, message):
mb = QMessageBox()
mb.setText(message)
mb.exec_()
def saveTags(self):
self.imagePicker.setTagsString(self.tagEdit.text())
self.statusBar().showMessage("Saved tags")
class ClearableCheckBox(QCheckBox):
@Slot()
def clear(self):
self.setChecked(False)
if __name__ == "__main__":
parser = ArgumentParser(description="A searchable database of imagse")
parser.add_argument("-a", "--alwaysontop", action='store_true',
help="Window always on top")
args = parser.parse_args()
app = QApplication(sys.argv)
window = MainWindow()
if args.alwaysontop:
window.setWindowFlags(Qt.WindowStaysOnTopHint)
window.show()
sys.exit(app.exec_())
```
#### File: trollbox/test/test_image_model.py
```python
from unittest import TestCase
from trollbox.image_model import ImageModel
from PySide.QtGui import QApplication
from PySide.QtCore import Qt
import json, tempfile, os, subprocess
app = None
class Test_ImageModel(TestCase):
def setUp(self):
global app
if not app:
app = QApplication([])
def test_instantiation(self):
temp_dir = tempfile.mkdtemp()
model = ImageModel(troll_dir=temp_dir)
self.assertEqual(model.rowCount(), 0)
subprocess.call(["rm", "-rf", temp_dir])
def test_load(self):
file_dir = os.path.dirname(os.path.realpath(__file__))
sample_path = os.path.join(file_dir, "data", "0")
model = ImageModel(troll_dir=sample_path)
index = model.index(0, 0)
self.assertEqual(model.data(index, Qt.DisplayRole), "http://foo.bar")
self.assertEqual(model.data(index, ImageModel.TagRole), ["tng", "book"])
index = model.index(1, 0)
self.assertEqual(model.data(index, ImageModel.TagRole), ["koop", "postmaster"])
def test_addImage(self):
# create a troll box in a temp dir and add an image from the "0" test
# data set
file_dir = os.path.dirname(os.path.realpath(__file__))
sample_path = os.path.join(file_dir, "data", "0", "images", "book.jpg")
temp_dir = tempfile.mkdtemp()
try:
model = ImageModel(troll_dir=temp_dir)
model.addImage("http://foo.bar", ["tng"], sample_path)
index = model.index(0, 0)
self.assertEqual(model.data(index, Qt.DisplayRole), "http://foo.bar")
finally:
subprocess.call(["rm", "-rf", temp_dir])
def test_save(self):
# create a troll box in a temp dir and add an image from the "0" test
# data set, save it, and verify another model picks up the changes
file_dir = os.path.dirname(os.path.realpath(__file__))
sample_path = os.path.join(file_dir, "data", "0", "images", "book.jpg")
temp_dir = tempfile.mkdtemp()
try:
model = ImageModel(troll_dir=temp_dir)
model.addImage("http://foo.bar", ["tng"], sample_path)
model_b = ImageModel(troll_dir=temp_dir)
self.assertEqual(model_b.data(model.index(0, 0), Qt.DisplayRole), "http://foo.bar")
finally:
subprocess.call(["rm", "-rf", temp_dir])
def test_setData(self):
# create a troll box in a temp dir, add an image from the "0" test
# data set, and modify it via setData
file_dir = os.path.dirname(os.path.realpath(__file__))
sample_path = os.path.join(file_dir, "data", "0", "images", "book.jpg")
temp_dir = tempfile.mkdtemp()
try:
model = ImageModel(troll_dir=temp_dir)
model.addImage("http://foo.bar", ["tng"], sample_path)
self.assertEqual(model.data(model.index(0, 0), Qt.DisplayRole), "http://foo.bar")
model.setData(model.index(0, 0), "http://bar.bar", Qt.DisplayRole)
self.assertEqual(model.data(model.index(0, 0), Qt.DisplayRole), "http://bar.bar")
sample_path = os.path.join(file_dir, "data", "0", "images", "koop.jpg")
model.setData(model.index(0, 0), sample_path, Qt.DecorationRole)
# no good way to check icon currently
finally:
subprocess.call(["rm", "-rf", temp_dir])
def test_delete(self):
# copy "0" test troll box to temp dir, delete from it
file_dir = os.path.dirname(os.path.realpath(__file__))
data_path = os.path.join(file_dir, "data", "0")
temp_dir = tempfile.mkdtemp()
temp_model_dir = os.path.join(temp_dir, "0")
sample_path = os.path.join(temp_model_dir, "images", "book.jpg")
try:
print data_path, temp_dir, sample_path
subprocess.check_call(["cp", "-R", data_path, temp_dir])
model = ImageModel(troll_dir=temp_model_dir)
self.assertEqual(model.data(model.index(0, 0), Qt.DisplayRole), "http://foo.bar")
print subprocess.check_output(["file", sample_path])
self.assertTrue(os.path.exists(sample_path))
# make sure image has been removed from model and file is removed from disk
model.deleteImage(model.index(0,0))
self.assertEqual(model.data(model.index(0, 0), Qt.DisplayRole), "http://foo.baz")
self.assertFalse(os.path.exists(sample_path))
finally:
subprocess.call(["rm", "-rf", temp_dir])
```
|
{
"source": "jfoox/epiQC",
"score": 3
}
|
#### File: epiQC/bin/combine_methylation_outputs.py
```python
import gzip
import sys
sample_outname = sys.argv[1]
biscov_files = sys.argv[2:]
combined_data = {}
def merger(biscov):
for line in infile:
if not 'track' in line:
line = line.strip().split('\t')
locus = line[0]+'.'+line[1]+'.'+line[2]
CpG_values = (int(line[4]), int(line[5]))
if locus not in combined_data:
combined_data[locus] = CpG_values
else:
combined_data[locus] = tuple(map(sum, zip(combined_data[locus], CpG_values)))
# process
for biscov in biscov_files:
print('incorporating ' + biscov + ' ...')
if biscov.endswith('.gz'):
with gzip.open(biscov, 'rt') as infile:
merger(infile)
else:
with open(biscov, 'r') as infile:
merger(infile)
# output
with gzip.open(sample_outname+'.bedGraph.gz', 'wt') as outfile:
for locus in combined_data:
try:
pct = round(float(combined_data[locus][0]) / (float(combined_data[locus][0]) + float(combined_data[locus][1])) * 100.0, 3)
outfile.write(locus.replace('.','\t') +'\t'+ str(pct) +'\t'+ str(combined_data[locus][0]) +'\t'+ str(combined_data[locus][1]) +'\n')
except ZeroDivisionError:
pass
```
|
{
"source": "jfoox/venninator",
"score": 2
}
|
#### File: jfoox/venninator/settings.py
```python
def init():
global user_evalues
global allvsallfile
global similarity
global numthreads
global filelist
global species_id
global evalues
global outblastp_splitfiles
global flatfiles_and_foldernums
global flatfiles_foldernumber
global flatfiles_added
global current_numclusters
user_evalues = ''
allvsallfile = ''
similarity = 70
numthreads = 8
species_id = {}
filelist = []
evalues = []
outblastp_splitfiles = []
flatfiles_and_foldernums = {}
flatfiles_foldernumber = 0
flatfiles_added = 0
current_numclusters = 0
```
|
{
"source": "jforand/sqlalchemy-soft-delete",
"score": 2
}
|
#### File: migrations/versions/2f31d469cdc4_users_and_messages_tables.py
```python
revision = '2f<PASSWORD>cdc4'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('messages',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('message', sa.String(length=256), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('messages')
op.drop_table('users')
### end Alembic commands ###
```
|
{
"source": "J-Ford/car_control",
"score": 2
}
|
#### File: car_control/src/reset.py
```python
import rospy
from std_msgs.msg import Float64
from std_msgs.msg import Empty
class CarReset(object):
def __init__(self):
rospy.init_node('reset_node', anonymous=False)
self.sub = rospy.Subscriber("reset", Empty, self._callback)
self.pub_speed = rospy.Publisher('throttle/setpoint', Float64, queue_size=5)
self.pub_direction = rospy.Publisher('steering/setpoint', Float64, queue_size=5)
def _callback(self, msg):
rospy.logwarn(rospy.get_caller_id() + ": Controller Setpoints Reset to 0")
self.pub_speed.publish(0.0)
self.pub_direction.publish(0.0)
if __name__ == '__main__':
car_reset = CarReset()
rospy.spin()
```
#### File: us_digital_encoders/src/encoder_interface.py
```python
import rospy
from us_digital_encoders.msg import USDigitalEncoders
from std_msgs.msg import Float64
class EncoderInterface(object):
def __init__(self, ticks_per_metre, ticks_per_degree):
rospy.init_node('encoder_interface')
self.ticks_per_metre = ticks_per_metre
self.ticks_per_degree = ticks_per_degree
self.last_msg_time = None
self.sub = rospy.Subscriber('encoders', USDigitalEncoders, self._callback)
self.pub_speed = rospy.Publisher('throttle/state/data', Float64, queue_size=5)
self.pub_direction = rospy.Publisher('steering/state/data', Float64, queue_size=5)
def _callback(self, msg):
speed_ticks = float(msg.ticks[0])
direction_ticks = float(msg.ticks[1])
timestamp = msg.header.stamp.to_sec()
if self.last_msg_time is None:
self.last_msg_time = timestamp
else:
speed = self._ticks_to_speed(speed_ticks, timestamp)
self.pub_speed.publish(speed)
direction = self._ticks_to_direction(direction_ticks)
self.pub_direction.publish(direction)
self.last_msg_time = timestamp
def _ticks_to_speed(self, ticks, timestamp):
delta_metres = ticks / self.ticks_per_metre
speed = delta_metres / (timestamp - self.last_msg_time)
return speed
def _ticks_to_direction(self, ticks):
direction = ticks * self.ticks_per_degree
return direction
if __name__ == '__main__':
encoder_interface = EncoderInterface(47363, 100)
rospy.spin()
```
|
{
"source": "JForden/pylint-errors",
"score": 3
}
|
#### File: pylint-errors/plerr/cli.py
```python
import argparse
import pathlib
import sys
from pygments import highlight
from pygments.lexers import MarkdownLexer
from pygments.formatters import TerminalFormatter
from . import __version__
def main():
"""Get a pylint error description by an error code."""
parser = argparse.ArgumentParser(
description=(
'Get a verbose description of a pylint error by an error code.'
)
)
parser.add_argument(
'code',
metavar='error code',
type=str,
help='a pylint error code either r1710 or R1710'
)
parser.add_argument(
'-v',
'--version',
action='version',
version=f'plerr v{__version__}'
)
args = parser.parse_args()
root = pathlib.Path(__file__).resolve().parent
try:
error = next(root.rglob(f'*{args.code.upper()}.md'))
content = error.read_bytes()
print(highlight(content, MarkdownLexer(), TerminalFormatter()))
sys.exit(0)
except StopIteration:
print(
f'Cannot find {args.code} pylint error by such error code.',
file=sys.stderr
)
sys.exit(1)
```
#### File: plerr/tests/test_cli_module.py
```python
import io
import pathlib
import sys
import unittest
from unittest.mock import patch
from plerr.cli import main
ROOT = pathlib.Path(__file__).resolve().parent
class TestPlErrModule(unittest.TestCase):
def setUp(self):
# Redirect stdout/stderr to mock objects to read later.
self.stdout = patch('plerr.cli.sys.stdout', new=io.StringIO())
self.stderr = patch('plerr.cli.sys.stderr', new=io.StringIO())
self.stdout.start()
self.stderr.start()
def tearDown(self):
self.stdout.stop()
self.stderr.stop()
@patch('plerr.cli.sys.argv', new=['plerr', 'r1710'])
def test_plerr_error_getter_with_lower_case_letter(self):
# Given: a command to get a description of a pylint error by an
# error code with a lower case letter.
# When: the command invokes.
with self.assertRaises(SystemExit) as err:
main()
expected_stdout = (ROOT / 'command_output_fixture.txt').read_bytes()
# Then: it produces a highlighted output to stdout of the given error
# with the exit code 0.
assert sys.stdout.getvalue().encode() == expected_stdout
assert not sys.stderr.getvalue().encode()
assert err.exception.code == 0
@patch('plerr.cli.sys.argv', new=['plerr', 'R1710'])
def test_plerr_error_getter_with_capital_letter(self):
# Given: a command to get a description of a pylint error by an
# error code with a capital letter.
# When: the command invokes.
with self.assertRaises(SystemExit) as err:
main()
expected_stdout = (ROOT / 'command_output_fixture.txt').read_bytes()
# Then: it produces a highlighted output to stdout of the given error
# with the exit code 0.
assert sys.stdout.getvalue().encode() == expected_stdout
assert not sys.stderr.getvalue().encode()
assert err.exception.code == 0
@patch('plerr.cli.sys.argv', new=['plerr', 'R0000'])
def test_plerr_non_existent_error(self):
# Given: a command to get a description of a pylint error with a
# non-existent error code.
# When: the command invokes.
with self.assertRaises(SystemExit) as err:
main()
expected_stderr = (
b'Cannot find R0000 pylint error by such error code.\n'
)
# Then: it produces an error message to stderr with the exit code 1.
assert sys.stderr.getvalue().encode() == expected_stderr
assert not sys.stdout.getvalue().encode()
assert err.exception.code == 1
```
|
{
"source": "jforge/blog-serverless-ping-pong",
"score": 2
}
|
#### File: blog-serverless-ping-pong/lambdas/common.py
```python
import boto3
import time
client = boto3.client('sns')
def _publish(region: str, account: str, topic: str, msg: str) -> None:
"publish a message to sns"
client.publish(TopicArn=f'arn:aws:sns:{region}:{account}:{topic}', Message=msg)
def publish(ctx, topic, msg) -> None:
"publish a message to sns"
region, account = determine_region_and_account(ctx)
delay(1)
_publish(region, account, topic, msg)
def delay(x: int) -> None:
"sleep for x seconds, to slow down the game a little"
time.sleep(x)
def determine_region_and_account(arn: str) -> (str, str):
"returns the region and account from a given arn"
xs = arn.split(':')
return xs[3], xs[4]
def dump_context(ctx) -> None:
"Logs the lambda context"
print(
f"""
function_name: {ctx.function_name}
function_version: {ctx.function_version}
invoked_function_arn: {ctx.invoked_function_arn}
memory_limit_in_mb: {ctx. memory_limit_in_mb}
aws_request_id: {ctx.aws_request_id}
log_group_name: {ctx.log_group_name}
log_stream_name: {ctx.log_stream_name}
identity: {ctx.identity}
""")
```
#### File: blog-serverless-ping-pong/lambdas/ping_function.py
```python
if __package__ == 'lambdas':
from lambdas.common import *
else:
from common import *
started = False
def handler(event, ctx) -> None:
global started
if event.get('detail-type'):
if not started:
print('Starting ping-pong')
started = True
publish(ctx.invoked_function_arn, 'ping-topic', 'ping')
else:
print('Already started ping-pong')
else:
print(f'Received: {event}, sending ping to ping-topic')
publish(ctx.invoked_function_arn, 'ping-topic', 'ping')
```
#### File: blog-serverless-ping-pong/lambdas/pong_function.py
```python
if __package__ == 'lambdas':
from lambdas.common import *
else:
from common import *
def handler(event, ctx):
print(f'Received event: {event}, sending pong to pong-topic')
publish(ctx.invoked_function_arn, 'pong-topic', 'pong')
```
#### File: blog-serverless-ping-pong/resolvers/s3_version.py
```python
from sceptre.resolvers import Resolver
from sceptre.resolvers.stack_output import StackOutput
class S3Version(Resolver):
NAME = "s3_version"
def __init__(self, *args, **kwargs):
super(S3Version, self).__init__(*args, **kwargs)
def determine_stack_output(self, arg: str):
return StackOutput(argument=arg,
connection_manager=self.connection_manager,
environment_config=self.environment_config,
stack_config=self.stack_config,
).resolve()
def determine_bucket_name(self, arg: str):
if '::' in arg:
return self.determine_stack_output(arg)
else:
return arg
def resolve(self):
"""
Resolves the latest version of an object
Usage: !s3_version stack_name::output_name/object_key
:return:
"""
if self.argument:
s3_bucket, s3_key = self.argument.split("/", 1)
s3_bucket = self.determine_bucket_name(s3_bucket)
print(f"[{self.NAME}] S3 bucket/key parsed from the argument -> s3_bucket={s3_bucket}, s3_key={s3_key}")
elif "sceptre_user_data" in self.stack_config:
code = self.stack_config.get("sceptre_user_data").get("Code", {})
s3_bucket, s3_key = [code.get("S3Bucket"), code.get("S3Key")]
s3_bucket = self.determine_bucket_name(s3_bucket)
print(f"[{self.NAME}] S3 bucket/key parsed from sceptre_user_data['Code'] -> s3_bucket={s3_bucket}, s3_key={s3_key}")
else:
raise Exception("S3 bucket/key could not be parsed nor from the argument, neither from sceptre_user_data['Code']")
try:
result = self.connection_manager.call(
service="s3",
command="head_object",
kwargs={"Bucket": s3_bucket, "Key": s3_key},
)
version_id = result.get("VersionId")
print("[{}] object s3://{}/{} latest version: {}".format(self.NAME, s3_bucket, s3_key, version_id))
return version_id
except Exception as e:
print(e)
return ''
```
#### File: blog-serverless-ping-pong/tests/test_common.py
```python
from lambdas.common import *
def test_determine_region_and_account():
arn = 'arn:aws:lambda:eu-west-1:612483924670:function:serverless-ping-pong-serverless-pingp-PingFunction-16DJONH1PWZLU'
assert determine_region_and_account(arn) == ('eu-west-1', '612483924670')
```
|
{
"source": "jforge/vaadin",
"score": 3
}
|
#### File: vaadin/scripts/GeneratePostPublishReport.py
```python
import argparse, requests
parser = argparse.ArgumentParser(description="Post-publish report generator")
parser.add_argument("version", type=str, help="Vaadin version that was just built")
parser.add_argument("teamcityUrl", type=str, help="Address to the teamcity server")
parser.add_argument("buildTypeId", type=str, help="The ID of this build step")
parser.add_argument("buildId", type=str, help="ID of the build to generate this report for")
parser.add_argument("projectId", type=str, help="The ID of this project")
args = parser.parse_args()
buildResultUrl = "http://{}/viewLog.html?buildId={}&tab=buildResultsDiv&buildTypeId={}".format(args.teamcityUrl, args.buildId, args.buildTypeId)
(major, minor, maintenance) = args.version.split(".", 2)
prerelease = "." in maintenance
def checkUrlStatus(url):
r = requests.get(url)
return r.status_code == 200
def createTableRow(*columns):
html = "<tr>"
for column in columns:
html += "<td>" + column + "</td>"
return html + "</tr>"
traffic_light = "<svg width=\"20px\" height=\"20px\" style=\"padding-right:5px\"><circle cx=\"10\" cy=\"10\" r=\"10\" fill=\"{color}\"/></svg>"
def getTrafficLight(b):
return traffic_light.format(color="green") if b else traffic_light.format(color="red")
def checkArchetypeMetaData(archetypeMetadataUrl, version):
archetype_metadata_request = requests.get(archetypeMetadataUrl)
if archetype_metadata_request.status_code != 200:
return createTableRow(traffic_light.format(color="black"), "Check archetype metadata: <a href='{url}'>unable to retrieve metadata from {url}</a>".format(url=archetypeMetadataUrl))
else:
if "version=\"{version}\"".format(version=version) in archetype_metadata_request.content:
return createTableRow(traffic_light.format(color="green"), "Check archetype metadata: <a href='{url}'>metadata is correct for {url}</a>".format(url=archetypeMetadataUrl))
else:
return createTableRow(traffic_light.format(color="red"), "Check archetype metadata: <a href='{url}'>metadata seems to be incorrect for {url}</a>".format(url=archetypeMetadataUrl))
content = "<html><head></head><body><table>"
tagOk = checkUrlStatus("https://github.com/vaadin/framework/releases/tag/{ver}".format(ver=args.version))
content += createTableRow(getTrafficLight(tagOk), "Tag ok on github.com")
# Tag and pin build
content += createTableRow("", "<a href=\"{url}\">Tag and pin build</a>".format(url=buildResultUrl))
# Traffic light for archetype metadata
content += checkArchetypeMetaData("http://vaadin.com/download/eclipse-maven-archetypes.xml", args.version)
if prerelease:
content += checkArchetypeMetaData("http://vaadin.com/download/maven-archetypes-prerelease.xml", args.version)
content += createTableRow("", "Optionally check that <a href=\"http://vaadin.com/download/maven-archetypes.xml\">old Eclipse metadata</a> still refers to Vaadin 7")
content += createTableRow("", "Note that archetype metadata checks do not verify that the relevant sections are not commented out when changing from pre-release to stable and back!")
content += createTableRow("", "Build and deploy new sampler if necessary")
# Inform marketing and PO
content += createTableRow("", "Inform marketing and PO about the release")
# Link to version update in teamcity
content += createTableRow("", "<a href=\"http://{}/admin/editProject.html?projectId={}&tab=projectParams\">Update vaadin.version.latest and vaadin.version.next parameters in TeamCity</a>".format(args.teamcityUrl, args.projectId))
# Link to GH release notes
content += createTableRow("", "<a href=\"https://github.com/vaadin/framework/releases\">Finish and publish release notes in GH</a>")
content += "</table></body></html>"
with open("result/report.html", "wb") as f:
f.write(content)
```
#### File: vaadin/scripts/GeneratePublishReportPart1.py
```python
try:
import requests
except Exception as e:
print("GeneratePublishReportPart1 depends on requests library. Install it with `pip install requests`")
sys.exit(1)
import argparse, cgi, re
from os.path import exists, isdir
from os import makedirs
metadataChecks = {
'https://vaadin.com/download/LATEST7': '^7\..*',
'https://vaadin.com/download/VERSIONS_7': '^7\..*',
'https://vaadin.com/download/release/7.7/LATEST': '^7\..*',
'https://vaadin.com/download/LATEST': '^6\..*',
'https://vaadin.com/download/LATEST8': '^8\.1\..*',
'https://vaadin.com/download/PRERELEASES': '^{ver}'
}
parser = argparse.ArgumentParser(description="Post-publish report generator")
parser.add_argument("version", type=str, help="Vaadin version that was just built")
parser.add_argument("teamcityUrl", type=str, help="Address to the teamcity server")
parser.add_argument("buildTypeId", type=str, help="The ID of this build step")
parser.add_argument("buildId", type=str, help="ID of the build to generate this report for")
args = parser.parse_args()
traffic_light = "<svg width=\"20px\" height=\"20px\" style=\"padding-right:5px\"><circle cx=\"10\" cy=\"10\" r=\"10\" fill=\"{color}\"/></svg>"
def getTrafficLight(b):
return traffic_light.format(color="green") if b else traffic_light.format(color="red")
resultPath = "result"
if not exists(resultPath):
makedirs(resultPath)
elif not isdir(resultPath):
print("Result path is not a directory.")
sys.exit(1)
(major, minor, maintenance) = args.version.split(".", 2)
prerelease = "." in maintenance
if prerelease:
maintenance = maintenance.split('.')[0]
def checkUrlContents(url, regexp):
r = requests.get(url)
return re.match(regexp, r.text) != None
def checkUrlStatus(url):
r = requests.get(url)
return r.status_code == 200
metadataOk = True
for url in metadataChecks:
metadataOk = metadataOk and checkUrlContents(url, metadataChecks[url].format(ver=args.version))
tagOk = checkUrlStatus("https://github.com/vaadin/framework/releases/tag/{ver}".format(ver=args.version))
if not prerelease:
downloadPageOk = checkUrlStatus("https://vaadin.com/download/release/{maj}.{min}/{ver}/".format(maj=major, min=minor, ver=args.version))
else:
downloadPageOk = checkUrlStatus("https://vaadin.com/download/prerelease/{maj}.{min}/{maj}.{min}.{main}/{ver}".format(maj=major, min=minor, main=maintenance, ver=args.version))
content = """<html>
<head></head>
<body>
<table>
<tr><td>{metadataOk}</td><td>Metadata ok on vaadin.com</td></tr>
<tr><td>{downloadPageOk}</td><td>Download folder on vaadin.com contains the version</td></tr>
""".format(metadataOk=getTrafficLight(metadataOk), downloadPageOk=getTrafficLight(downloadPageOk))
mavenUrl = ""
if not prerelease:
mavenUrl = "http://repo1.maven.org/maven2/com/vaadin/vaadin-server/"
content += "<tr><td></td><td><a href='{mvnUrl}'>Check {ver} is published to maven.org (might take a while)</td></tr>".format(ver=args.version, mvnUrl=mavenUrl)
else:
mavenUrl = "http://maven.vaadin.com/vaadin-prereleases/com/vaadin/vaadin-server/"
content += "<tr><td></td><td><a href='{mvnUrl}'>Check {ver} is published as prerelease to maven.vaadin.com</td></tr>".format(ver=args.version, mvnUrl=mavenUrl)
content += "<tr><td></td><td><a href=\"https://github.com/vaadin/framework/milestones\">Create milestone for next version in GitHub</a></td></tr>"
#content += """
#<tr><td></td><td><a href="http://test.vaadin.com/{version}/run/LabelModes?restartApplication">Verify uploaded to test.vaadin.com</a></td></tr>
#""".format(version=args.version)
if not prerelease:
content += '<tr><td></td><td><a href="http://vaadin.com/api">Verify API version list updated</a></td></tr>'
content += "<tr><td></td><td>Run the generated tag_repositories.sh script</td></tr>"
# close GitHub milestone
content += "<tr><td></td><td><a href=\"https://github.com/vaadin/framework/milestones\">Close GitHub Milestone and create one for next version</a></td></tr>"
# release notes
content += "<tr><td></td><td><a href=\"https://github.com/vaadin/framework/releases/new\">Prepare release notes in GH</a></td></tr>"
content += """
<tr><td></td><td><a href="http://{teamcityUrl}/viewLog.html?buildId={buildId}&buildTypeId={buildTypeId}&tab=dependencies"><h2>Start Post-Publish Release from dependencies tab</a></td></tr>
</table>
</body>
</html>""".format(teamcityUrl=args.teamcityUrl, buildTypeId=args.buildTypeId, buildId=args.buildId, version=args.version)
f = open("result/report.html", 'w')
f.write(content)
```
|
{
"source": "jforissier/burn-boot",
"score": 2
}
|
#### File: jforissier/burn-boot/hisi-idt.py
```python
import os
import os.path
import serial, time
import array
import sys, getopt
class bootdownload(object):
'''
Hisilicon boot downloader
>>> downloader = bootdownload()
>>> downloader.download(filename)
'''
# crctab calculated by <NAME>, Network Systems Corporation
crctable = [
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
]
startframe = {
'hi3716cv200':[0xFE,0x00,0xFF,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x02,0x01]
}
headframe = {
'hi3716cv200':[0xFE,0x00,0xFF,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x02,0x01]
}
bootheadaddress = {
'hi3716cv200':0xF9800800
}
bootdownloadaddress = {
'hi3716cv200':0x07000000
}
BOOT_HEAD_LEN = 0x4F00
MAX_DATA_LEN = 0x400
def __init__(self,chiptype,serialport):
try:
self.s = serial.Serial(port=serialport, baudrate=115200, timeout=1)
except serial.serialutil.SerialException:
#no serial connection
self.s = None
print("\nFailed to open serial!", serialport)
sys.exit(2)
self.chip = chiptype
def __del__(self):
if self.s != None:
self.s.close()
def calc_crc(self, data, crc=0):
for char in data:
crc = ((crc << 8) | ord(char)) ^ self.crctable[(crc >> 8) & 0xff]
for i in range(0,2):
crc = ((crc << 8) | 0) ^ self.crctable[(crc >> 8) & 0xff]
return crc & 0xffff
def getsize(self, filename):
st = os.stat(filename)
return st.st_size
def sendframe(self, data, loop):
for i in range(1, loop):
self.s.flushOutput()
self.s.write(data)
self.s.flushInput()
try:
ack = self.s.read()
if len(ack) == 1:
if ack == chr(0xaa):
return None
except:
return None
print('failed')
def sendstartframe(self):
self.s.timeout =0.01
data = array.array('B', self.startframe[self.chip]).tostring()
crc = self.calc_crc(data)
data += chr((crc >> 8)&0xff)
data += chr(crc&0xff)
self.sendframe(data,10000)
def sendheadframe(self,length,address):
self.s.timeout = 0.03
self.headframe[self.chip][4] = (length>>24)&0xff
self.headframe[self.chip][5] = (length>>16)&0xff
self.headframe[self.chip][6] = (length>>8)&0xff
self.headframe[self.chip][7] = (length)&0xff
self.headframe[self.chip][8] = (address>>24)&0xff
self.headframe[self.chip][9] = (address>>16)&0xff
self.headframe[self.chip][10] = (address>>8)&0xff
self.headframe[self.chip][11] = (address)&0xff
data = array.array('B', self.headframe[self.chip]).tostring()
crc = self.calc_crc(data)
data += chr((crc >> 8)&0xff)
data += chr(crc&0xff)
self.sendframe(data,16)
def senddataframe(self,seq,data):
self.s.timeout = 0.15
head = chr(0xDA)
head += chr(seq&0xFF)
head += chr((~seq)&0xFF)
data = head + data
crc = self.calc_crc(data)
data += chr((crc >> 8)&0xff)
data += chr(crc&0xff)
self.sendframe(data,32)
def sendtailframe(self,seq):
data = chr(0xED)
data += chr(seq&0xFF)
data += chr((~seq)&0xFF)
crc = self.calc_crc(data)
data += chr((crc >> 8)&0xff)
data += chr(crc&0xff)
self.sendframe(data,16)
def senddata(self, data, address):
length=len(data)
self.sendheadframe(length,address)
seq=1
while length > self.MAX_DATA_LEN:
self.senddataframe(seq,data[(seq-1)*self.MAX_DATA_LEN:seq*self.MAX_DATA_LEN])
seq = seq+1
length = length-self.MAX_DATA_LEN
self.senddataframe(seq,data[(seq-1)*self.MAX_DATA_LEN:])
self.sendtailframe(seq+1)
def download(self, filename1, filename2):
f=open(filename1,"rb")
data = f.read()
f.close()
print('Sending', filename1, '...')
self.senddata(data,self.bootheadaddress[self.chip])
print('Done\n')
if filename2:
f=open(filename2,"rb")
data = f.read()
f.close()
print('Sending', filename2, '...')
self.senddata(data,self.bootdownloadaddress[self.chip])
print('Done\n')
def burnboot(chiptype, serialport, filename1, filename2=''):
downloader = bootdownload(chiptype, serialport)
downloader.download(filename1, filename2)
def startterm(serialport=0):
try:
miniterm = Miniterm(
serialport,
115200,
'N',
rtscts=False,
xonxoff=False,
echo=False,
convert_outgoing=2,
repr_mode=0,
)
except serial.SerialException as e:
sys.stderr.write("could not open port %r: %s\n" % (port, e))
sys.exit(1)
miniterm.start()
miniterm.join(True)
miniterm.join()
def main(argv):
'''
img2 = 'fastboot2.img'
'''
img1 = 'fastboot1.img'
img2 = ''
dev = '';
dev1 = '/dev/serial/by-id/usb-䕇䕎䥎_㌲㔴㜶㤸-if00-port0'
dev2 = '/dev/serial/by-id/pci-䕇䕎䥎_㌲㔴㜶㤸-if00-port0'
try:
opts, args = getopt.getopt(argv,"hd:",["img1=","img2="])
except getopt.GetoptError:
print('hisi-idt.py -d device --img1 <fastboot1> --img2 <fastboot2>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('hisi-idt.py -d device --img1 <fastboot1> --img2 <fastboot2>')
sys.exit()
elif opt in ("-d"):
dev = arg
elif opt in ("--img1"):
img1 = arg
elif opt in ("--img2"):
img2 = arg
if dev == '':
if os.path.exists(dev1):
dev = dev1
elif os.path.exists(dev2):
dev = dev2
else:
print('Device not detected under /dev/serial/by-id/. Please use -d.')
sys.exit(3)
print('+----------------------+')
print(' Serial: ', dev)
print(' Image1: ', img1)
print(' Image2: ', img2)
print('+----------------------+\n')
if not os.path.isfile(img1):
print("Image don't exists:", img1)
sys.exit(1)
if (img2):
if not os.path.isfile(img2):
print("Image don't exists:", img2)
sys.exit(1)
burnboot('hi3716cv200', dev, img1, img2)
if __name__ == "__main__":
main(sys.argv[1:])
```
|
{
"source": "jforseth210/cattledatabase",
"score": 2
}
|
#### File: jforseth210/cattledatabase/main.py
```python
import json
import re
import subprocess
import socket
import contextlib
import io
import sys
import os
import platform
import time
import logging
import urllib.parse
from getpass import getpass
import argparse
# Trying to get this installed on Windows is agnonizing
# This might help: https://github.com/miniupnp/miniupnp/issues/159
import miniupnpc
import requests
from flask import Flask, render_template, request, redirect, Markup
from jinja2 import Environment, BaseLoader
from flask_simplelogin import SimpleLogin, login_required
import click
import werkzeug.security
from models import *
from search_functions import *
from setup_utils import *
from sensitive_data import SECRET_KEY
from api import api
from api_cows import api_cows
from api_events import api_events
from api_transactions import api_transactions
from cows import cows
from events import events
from transactions import transactions
if getattr(sys, 'frozen', False):
template_folder = os.path.join(sys._MEIPASS, 'templates')
static_folder = os.path.join(sys._MEIPASS, 'static')
app = Flask(__name__, template_folder=template_folder,
static_folder=static_folder)
else:
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///cattle.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = SECRET_KEY
app.register_blueprint(api, url_prefix='/api')
app.register_blueprint(api_cows, url_prefix='/api/cows')
app.register_blueprint(api_events, url_prefix='/api/events')
app.register_blueprint(api_transactions, url_prefix='/api/transactions')
app.register_blueprint(cows, url_prefix='/cows')
app.register_blueprint(events, url_prefix='/events')
app.register_blueprint(transactions, url_prefix='/transactions')
db.init_app(app)
app.jinja_env.globals['COW_SEXES'] = COW_SEXES
app.jinja_env.globals['COW_SEXES_FEMALE'] = COW_SEXES_FEMALE
app.jinja_env.globals['COW_SEXES_FEMALE_POSSIBLE_PARENTS'] = COW_SEXES_FEMALE_POSSIBLE_PARENTS
app.jinja_env.globals['COW_SEXES_FEMALE_IMPOSSIBLE_PARENTS'] = COW_SEXES_FEMALE_IMPOSSIBLE_PARENTS
app.jinja_env.globals['COW_SEXES_MALE'] = COW_SEXES_MALE
app.jinja_env.globals['COW_SEXES_MALE_POSSIBLE_PARENTS'] = COW_SEXES_MALE_POSSIBLE_PARENTS
app.jinja_env.globals['COW_SEXES_MALE_IMPOSSIBLE_PARENTS'] = COW_SEXES_MALE_IMPOSSIBLE_PARENTS
def login_checker(provided_user):
with open("config.json", "r") as file:
config_string = file.read()
config = json.loads(config_string)
users = config["users"]
for user in users:
if provided_user["username"] == user["username"] and werkzeug.security.check_password_hash(user["hashed_password"], provided_user["password"]):
return True
return False
messages = {
'login_success': 'Logged In',
'login_failure': 'Login Failed',
'is_logged_in': 'Logged In!',
'logout': 'Logged Out',
'login_required': 'Please log in first',
'access_denied': 'You don\'t have access to this page',
'auth_error': 'Something went wrong: {0}'
}
SimpleLogin(app, login_checker=login_checker, messages=messages)
@app.route("/")
@login_required
def home():
return redirect("/cows")
@app.route('/calendar/events/api')
def event_api():
events = Event.query.all()
formatted_events = []
for event in events:
cow_string = ", ".join(cow.tag_number for cow in event.cows)
formatted_event = {
'title': event.name + ": " + cow_string,
'start': event.date,
'id': event.event_id
}
formatted_events.append(formatted_event)
return json.dumps(formatted_events)
@app.route("/search")
@login_required
def search():
# Arguments
query = request.args.get("q")
# What kind of things are we searching for?
types = determine_types(request)
argument_dict = {"types": request.args.getlist("type")}
if "Cow" in types:
argument_dict.update({
"tags": request.args.getlist("tag"),
"sexes": request.args.getlist("sex"),
"owners": request.args.getlist("owner"),
"sires": request.args.getlist("sire"),
"dams": request.args.getlist("dam")
})
if "Event" in types:
argument_dict.update({
"dates": request.args.getlist("date"),
"event_names": request.args.getlist("event_name")
})
if "Transaction" in types:
argument_dict.update({
"transaction_names": request.args.getlist("transaction_name"),
"prices": request.args.getlist("price")
})
unique_values = get_unique_values()
results = get_results(types, argument_dict, query)
# TODO: Fix this mess
if types == ["Transaction"]:
if argument_dict["prices"] == ["Low to High"]:
for i in results:
print(i.body)
results.sort(key=lambda x: float(
re.search("\$.\d+.\d+", x.body).group().strip("$")))
else:
results.sort(key=lambda x: float(
re.search("\$.\d+.\d+", x.body).group().strip("$")), reverse=True)
# Send it
return render_template("search.html", query=query, results=results, unique_values=unique_values, checked_values=argument_dict)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--headless", action="store_true",
help="Run the server without opening it in browser")
parser.add_argument("--show_log", action="store_true",
help="Show default flask server information")
args = parser.parse_args()
SHOW_SERVER = not args.show_log
app.debug = True
if getattr(sys, 'frozen', False) or SHOW_SERVER:
show_server(args.headless)
app.debug = False
# Silence server log
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
def secho(text, file=None, nl=None, err=None, color=None, **styles):
pass
def echo(text, file=None, nl=None, err=None, color=None, **styles):
pass
click.echo = echo
click.secho = secho
app.run(debug=app.debug, host="0.0.0.0")
```
#### File: jforseth210/cattledatabase/transactions.py
```python
from flask import Blueprint, render_template, request, redirect
from flask_simplelogin import login_required
from models import Transaction, Cow, Event, db, get_cow_from_tag
transactions = Blueprint('transactions', __name__, template_folder='templates')
@transactions.route("/")
@login_required
def show_transactions():
transactions = Transaction.query.all()
total = sum(
transaction.price * len(transaction.cows) for transaction in transactions
)
formatted_total = "${:,.2f}".format(total)
return render_template("transactions.html", transactions=transactions, formatted_total=formatted_total, unformatted_total=total)
@transactions.route("/transaction/<transaction_id>")
@login_required
def show_transaction(transaction_id):
transaction = Transaction.query.filter_by(
transaction_id=transaction_id).first()
if not transaction:
return redirect(request.referrer)
return render_template("transaction.html", transaction=transaction, all_cows=Cow.query.all())
@ transactions.route("/new", methods=["POST"])
@login_required
def new_transaction():
event_id = request.form.get('event_id')
event = Event.query.filter_by(event_id=event_id).first()
price = request.form.get('price')
name = request.form.get('name')
tofrom = request.form.get('tofrom')
description = request.form.get('description')
new_transaction_object = Transaction(
price=price,
name=name,
description=description,
event_id=event_id,
tofrom=tofrom,
cows=event.cows
)
db.session.add(new_transaction_object)
db.session.commit()
return redirect(request.referrer+"#transactions")
@transactions.route("/update_cows", methods=["POST"])
@login_required
def transaction_add_remove_cows():
all_cows = request.form.getlist("all_cows")
new_cow = request.form.get("new_cow")
transaction_id = request.form.get("transaction")
transaction = Transaction.query.filter_by(
transaction_id=transaction_id).first()
if all_cows:
transaction.cows = [get_cow_from_tag(cow) for cow in all_cows]
elif new_cow:
transaction.cows.append(get_cow_from_tag(new_cow))
db.session.commit()
return redirect(request.referrer)
@ transactions.route("/change_price", methods=["POST"])
@login_required
def transaction_change_price():
transaction_id = request.form.get("transaction_id")
price = request.form.get("price")
transaction = Transaction.query.filter_by(
transaction_id=transaction_id).first()
transaction.price = price
db.session.commit()
return redirect(request.referrer)
@ transactions.route("/change_description", methods=["POST"])
@login_required
def transaction_change_description():
transaction_id = request.form.get("transaction_id")
description = request.form.get("description")
transaction = Transaction.query.filter_by(
transaction_id=transaction_id).first()
transaction.description = description
db.session.commit()
return redirect(request.referrer)
@ transactions.route("/change_name", methods=["POST"])
@login_required
def transaction_change_name():
transaction_id = request.form.get("transaction_id")
name = request.form.get("name")
transaction = Transaction.query.filter_by(
transaction_id=transaction_id).first()
transaction.name = name
db.session.commit()
return redirect(request.referrer)
@ transactions.route("/change_to_from", methods=["POST"])
@login_required
def transaction_change_to_from():
transaction_id = request.form.get("transaction_id")
tofrom = request.form.get("tofrom")
transaction = Transaction.query.filter_by(
transaction_id=transaction_id).first()
transaction.tofrom = tofrom
db.session.commit()
return redirect(request.referrer)
@transactions.route("/delete", methods=["POST"])
@login_required
def delete_transaction():
transaction_id = request.form.get("transaction_id")
transaction = Transaction.query.filter_by(
transaction_id=transaction_id).first()
db.session.delete(transaction)
db.session.commit()
return redirect('/transactions')
```
|
{
"source": "jforv/prmgt",
"score": 2
}
|
#### File: doctype/unit_user/unit_user_dashboard.py
```python
from frappe import _
def get_data():
return {
'heatmap': False,
'heatmap_message': _('This is based on transactions against this Patient. See timeline below for details'),
# 'fieldname': 'tenant',
'transactions': [
{
'label': _('Facility and Request'),
'items': ['Facility Booking', 'Facility', 'Special Request']
},
{
'label': _('Sales Invoice and Complaint'),
'items': ['Sales Invoice', 'Item']
}
]
}
```
|
{
"source": "jforv/property",
"score": 2
}
|
#### File: doctype/meter_reading/meter_reading.py
```python
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import msgprint, throw, _
class MeterReading(Document):
pass
def get_submeters(self):
r = frappe.db.sql("""SELECT u.name as unit, b.previous_reading as previous_reading, b.name as unit_charge """+
"""FROM `tabUnit Charge` b, tabUnit u, tabCharge c, `tabMain Meter` m"""+
" WHERE u.name = b.parent AND b.charge = c.name AND c.charge_type = 'Meter' AND b.main_meter = m.name AND m.name = %s",self.main_meter, as_dict = 1)
return r
def on_submit(self):
l = frappe.db.sql("""SELECT unit_charge, current_reading FROM `tabUnit Meter Reading` b, `tabMeter Reading` a """+
"""WHERE a.name = b.parent AND a.name = %s""", self.name,as_dict=1)
for d in l:
frappe.db.sql("""UPDATE `tabUnit Charge` SET previous_reading = %s """+
"""WHERE name = %s""", (d.current_reading, d.unit_charge))
```
|
{
"source": "jfowl/badge-2021",
"score": 3
}
|
#### File: badge-2021/software/game_of_life.py
```python
from emulator import SSD1306_I2C
BLACK = 0
WHITE = 1
def rng():
'Return a pseudo random byte value'
try:
# http://docs.micropython.org/en/latest/library/uos.html?highlight=uos#uos.urandom
import os
return os.urandom(1)[0]
except:
print('unable to use urandom. Trying other RNG')
try:
import random
return random.randbytes(1)[0]
except:
print('Unable to use randbytes')
class GameOfLife:
"""Conway's Game of Life"""
MAX_ITERATIONS = 100
INTITIAL_CELL_COVERAGE = 0.2
def __init__(self):
self.iterations = 0
self.oled = SSD1306_I2C()
self.cells = []
for x in range(self.oled.width):
self.cells.append([0] * self.oled.height)
self.reset()
def reset(self):
self.iterations = 0
for i in range(
int(self.oled.width * self.oled.height * GameOfLife.INTITIAL_CELL_COVERAGE)):
x = int(rng() / 255 * (self.oled.width-1))
y = int(rng() / 255 * (self.oled.height-1))
self.cells[x][y] = 1
def _iterate(self):
new_board = []
for x in range(gol.oled.width):
new_board.append([0] * self.oled.height)
for y in range(self.oled.height):
for x in range(self.oled.width):
num_neighbours = self._neighbours(x,y)
if self.cells[x][y]==1 and num_neighbours in (2, 3):
new_board[x][y] = 1
elif self.cells[x][y]==0 and num_neighbours==3:
new_board[x][y] = 1
else:
new_board[x][y] = 0
return new_board
def on_board(self, x, y):
return 0 <= x < self.oled.width and 0 <= y < self.oled.height
def _neighbours(self, x,y):
r = [-1, 0, 1]
num_neighbours = 0
for dx,dy in [(i,j) for i in r for j in r if not i == j == 0]:
if self.on_board(x+dx,y+dy) and self.cells[x+dx][y+dy] == 1:
num_neighbours += 1
return num_neighbours
def prepare(self):
self.cells = self._iterate()
self.iterations += 1
if self.iterations > GameOfLife.MAX_ITERATIONS:
self.reset()
def handle_px(self, x, y):
if self.cells[x][y]==1:
color = WHITE
else:
color = BLACK
self.oled.pixel(x,y, color)
gol = GameOfLife()
while True:
gol.prepare()
for y in range(gol.oled.height):
for x in range(gol.oled.width):
gol.handle_px(x,y)
gol.oled.fill_rect(0,0, 90, 10, BLACK)
gol.oled.text(str(gol.iterations) + " - Game Of Life", 0,0, WHITE)
gol.oled.show()
```
|
{
"source": "jfozard/HEI10",
"score": 2
}
|
#### File: HEI10/plotting/kymo_tc.py
```python
import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import matplotlib as mpl
mpl.rcParams.update({ #'figure.figsize': (6.0,4.0),
'figure.facecolor': 'none', #(1,1,1,0), # play nicely with white background in the Qt and notebook
'axes.facecolor': 'none',
'figure.edgecolor': 'none',
'font.size': 20, # 12pt labels get cutoff on 6x4 logplots, so use 10pt.
'figure.dpi': 72, # 72 dpi matches SVG/qtconsole
'figure.subplot.bottom' : .15, # 10pt still needs a little more room on the xlabel
'axes.labelsize':28,
'savefig.edgecolor': 'none',
'savefig.facecolor': 'none',
'svg.fonttype' : 'none',
})
# Simulation time-course plotting HEI10 amount at each RI over time
def main():
u = []
with open(sys.argv[1], 'r') as f:
try:
h = next(f)
L, T = map(float, h.split(','))
h = next(f)
x = list(map(float, h.split(',')))
while True:
l = next(f)
u.append(list(map(float, l.split(','))))
except StopIteration:
pass
u = np.array(u)
m = 100
T = T/60/60
n_dt, N = u.shape
t_data = np.linspace(0, T, n_dt)
plt.figure()
for i in range(N):
v = u[:, i]
plt.plot(t_data, v)
plt.ylabel('Intensity (arb. units)')
plt.xlabel('Time (hr)')
plt.savefig(sys.argv[2])
# plt.show()
main()
```
#### File: HEI10/source_data_scripts/data_make_table.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import os
import os.path
from imageio import imread
from scipy.signal import find_peaks, find_peaks_cwt
from scipy.signal import peak_prominences
import scipy.linalg as la
from functools import reduce
import pickle
alpha_max = 0.4
def pc(peaks, alpha=alpha_max, criterion='max'):
pos, v, l = peaks
idx = (v>=alpha*np.max(v))
return np.array(pos[idx]), v[idx], l
## Analysis of each dataset
def analyse_data(A, data):
all_peak_data = data['all_peak_data']
o_hei10_traces = data['o_hei10_traces']
orig_trace_lengths = data['orig_trace_lengths']
stages = []
cell_id = []
all_quality = []
all_good = []
chromosome_id = []
sig_peaks = []
new_peak_hei10 = []
orig_peak_hei10 = []
trace_median_hei10 = []
trace_sum_hei10 = []
### For each cell in turn, process the
for i in range(0, len(A), 5):
stage = A.iloc[(i//5)*5].Stage
if(type(stage)==str):
stage = stage.lower()
stages += [stage]*5
cell_id += [i//5]*5
all_quality += [(A.iloc[i:i+5]['quality']==1).all()]*5
all_good += ['y' if (A.iloc[i:i+5]['good trace?']=='y').all() else 'n']*5
chromosome_id += list(range(5))
median_all = np.median(np.concatenate([o_hei10_traces[k] for k in range(i,i+5)]))
std_all = np.std(np.concatenate([o_hei10_traces[k] for k in range(i,i+5)]))
for j in range(i, i+5):
p, h, l = all_peak_data[j]
hei10=np.array(o_hei10_traces[j])
if len(p):
pp = pc((p, h, l), alpha_max)
orig_peak_hei10.append(hei10[pp[0]])
sig_peaks.append(pp[0])
else:
sig_peaks.append([])
orig_peak_hei10.append([])
trace_median_hei10.append(np.median(hei10))
trace_sum_hei10.append(np.sum(hei10))
print(len(A.index), len(stages))
A['Stage']=stages
A['original_trace_pixel_length']=list(orig_trace_lengths.values())
A['trace_pixel_length']=list(v[2] for v in all_peak_data.values())
A['all_quality']=all_quality
A['all_good']=all_good
A['num_foci']=[len(p) for p in sig_peaks]
A['trace_median_hei10']=trace_median_hei10
A['trace_sum_hei10']=trace_sum_hei10
# Sort the 5 chromosomes in each cell by length
# Fo each cell, sort the chromosomes in order
def analyse_SC_lengths(df):
ch_sort_idx = []
for j, i in enumerate(df.index[::5]):
p = df.iloc[5*j:5*j+5]
s = sorted(p['SC length'])
sidx = np.argsort(p['SC length'].to_numpy())
rank = np.zeros((5,), dtype=np.int32)
for k in range(5):
rank[sidx[k]] = k
ch_sort_idx += list(rank)
return ch_sort_idx
print(len(A.index), len(analyse_SC_lengths(A)))
A['ch_sort_idx'] = analyse_SC_lengths(A)
cell_total_SC_length = []
for i in range(0, len(A), 5):
lengths = A['SC length'][i:i+5]
tot_length = np.sum(lengths)
cell_total_SC_length += [tot_length]*5
A['cell_total_SC_length'] = cell_total_SC_length
norm_lengths = []
for i in range(0, len(A), 5):
lengths = A['SC length'][i:i+5]
lengths = lengths/np.sum(lengths)
norm_lengths+= list(lengths)
A['norm_lengths']=norm_lengths
# Now get sig peak positions and intensities
foci_data = []
for i in range(len(A)):
foci_pos = sig_peaks[i]
foci_hei10 = orig_peak_hei10[i]
data_line = {}
for j, (p, h) in enumerate(zip(foci_pos, foci_hei10)):
data_line[f'pos_{j}'] = p
data_line[f'HEI10_{j}'] = h
foci_data.append(data_line)
print(foci_data)
fd = pd.DataFrame(foci_data)
A = pd.concat([A, fd], axis=1)
return A
def process_data(csv_fn, pkl_fn, output_file, output_trace_file, specific_traces, trace_output_paths):
# Read CSV file with data specification
A = pd.read_csv(csv_fn)
# Remove some unused columns and rename one field
A = A.drop(A.columns[[6,7,8,9]], axis=1)
A = A.drop(columns=[x for x in ('foci count', 'foci per chromosome', 'comments') if x in A])
A = A.rename(columns={'Plant ':'Plant'})
data = pickle.load(open(pkl_fn, 'rb'))
print(data.keys())
print(data['orig_trace_lengths'])
A = analyse_data(A, data)
for i, v in data['all_peak_data'].items():
print(v)
print(A.iloc[0])
A.to_csv(output_file)
# Write original hei10 traces to file
with open(output_trace_file, 'w') as f:
for i, v in data['o_hei10_traces'].items():
f.write(', '.join(map(str, v)) + '\n')
for i, fn in zip(specific_traces, trace_output_paths):
with open(fn, 'w') as f:
t = data['o_hei10_traces'][i]
for v in t:
f.write(f'{v}' + '\n')
data_output_path = '../output/data_output/'
process_data('../input_data/200406.csv', data_output_path+'test.pkl', '../source_data/fig2_cytology.csv', '../source_data/fig2_cytology_raw_traces.csv', [675, 664, 492], [f'../source_data/fig1a_HEI10_trace_{s}.csv' for s in ['upper', 'mid', 'lower']])
process_data('../input_data/OX.csv', data_output_path+'test_ox.pkl', '../source_data/fig3_cytology.csv', '../source_data/fig3_cytology_raw_traces.csv', [260], ['../source_data/fig3a_HEI10_trace.csv'])
process_data('../input_data/UX.csv', data_output_path+'test_ux.pkl', '../source_data/fig4_cytology.csv', '../source_data/fig4_cytology_raw_traces.csv', [115], ['../source_data/fig4a_HEI10_trace.csv'])
```
|
{
"source": "jfozard/napari-manual-stack-project",
"score": 2
}
|
#### File: napari-manual-stack-project/napari_manual_stack_project/_dock_widget.py
```python
from napari_plugin_engine import napari_hook_implementation
from qtpy.QtWidgets import QWidget, QHBoxLayout, QPushButton
from magicgui import magic_factory
from napari.layers import Image, Shapes
def apply(image, points, axis):
@magic_factory
def example_magic_widget(img_layer: "napari.layers.Image"):
print(f"you have selected {img_layer}")
def widget_wrapper():
from napari.qt.threading import thread_worker
@thread_worker
def run_project(image, points, axis, method):
if method == 'CNN':
ar2 = apply(image, points, channel)
else:
ar2 = np.max(image, axis=axis)
return ar2
@magicgui(call_button='run projection',
layout='vertical',
axis = dict(widget_type='SpinBox', label='axis', value=0),
method = dict(widget_type='ComboBox', label ='method', choices=('rbf', 'maxproj'), value='rbf')
)
def widget(#label_logo,
viewer: napari.viewer.Viewer,
image_layer: Image,
point_layer: Points,
axis,
method):
def _new_layers(result):
viewer.add_image(result[0], name=image_layer.name + '_orig', visible=False)
viewer.add_image(result[1], name=image_layer.name + '_mask', visible=False)
viewer.add_image(result[2], name=image_layer.name + '_projected', visible=False)
def _new_image(result):
_new_layers(result)
viewer.layers[-1].visible = True
image_layer.visible = True
widget.call_button.enabled = True
image = image_layer.data
cp_worker = run_project(image, points, axis, method)
cp_worker.returned.connect(_new_image)
cp_worker.start()
@napari_hook_implementation
def napari_experimental_provide_dock_widget():
# you can return either a single widget, or a sequence of widgets
return widget_wrapper, {'name':'manual-stack-project'}
```
|
{
"source": "jfozard/scenic",
"score": 2
}
|
#### File: scenic/dataset_lib/leaf_dataset.py
```python
import functools
from typing import Optional
from absl import logging
import jax.numpy as jnp
from scenic.dataset_lib import dataset_utils
from scenic.dataset_lib import datasets
import tensorflow as
from . import jf_leaves
IMAGE_SIZE = [512, 512]
def preprocess_example(example, dtype=tf.float32):
"""Preprocesses the given image.
Args:
example: dict; Example coming from TFDS.
dtype: Tensorflow data type; Data type of the image.
Returns:
An example dict as required by the model.
"""
example_out = {}
# For simplicity, just resize all images to the same shape:
example_out['inputs'] = tf.image.resize(
dataset_utils.normalize(example['image'], dtype), IMAGE_SIZE, 'bilinear')
example_out['inputs'] = tf.cast(example_out['inputs'], dtype)
example_out['label'] = tf.image.resize(
example['segmentation_mask'], IMAGE_SIZE, 'nearest')
example_out['label'] = tf.squeeze(example_out['label'], axis=2)
example_out['label'] = tf.cast(example_out['label'], dtype)
return example_out
@datasets.add_dataset('jf_leaves')
def get_dataset(*,
batch_size,
eval_batch_size,
num_shards,
dtype_str='float32',
shuffle_seed=0,
rng=None,
dataset_configs=None,
dataset_service_address: Optional[str] = None):
"""Returns generators for the Oxford Pet train, validation, and test set.
Args:
batch_size: int; Determines the train batch size.
eval_batch_size: int; Determines the evaluation batch size.
num_shards: int; Number of shards --> batch shape: [num_shards, bs, ...].
dtype_str: Data type of the image (e.g. 'float32').
shuffle_seed: int; Seed for shuffling the training data.
rng: JAX rng key, which can be used for augmentation, shuffling, etc.
dataset_configs: dict; Dataset specific configurations.
dataset_service_address: If set, will distribute the training dataset using
the given tf.data service at the given address.
Returns:
A dataset_utils.Dataset() which includes a train_iter, a valid_iter,
a test_iter, and a dict of meta_data.
"""
del rng
del dataset_configs
dtype = getattr(tf, dtype_str)
preprocess_ex = functools.partial(preprocess_example, dtype=dtype)
logging.info('Loading train split of the leaf dataset.')
train_ds, _ = dataset_utils.load_split_from_tfds(
'jf_leaves',
batch_size,
split='train',
preprocess_example=preprocess_ex,
shuffle_seed=shuffle_seed)
if dataset_service_address:
if shuffle_seed is not None:
raise ValueError('Using dataset service with a random seed causes each '
'worker to produce exactly the same data. Add '
'config.shuffle_seed = None to your config if you '
'want to run with dataset service.')
logging.info('Using the tf.data service at %s', dataset_service_address)
train_ds = dataset_utils.distribute(train_ds, dataset_service_address)
logging.info('Loading test split of the leaf dataset.')
eval_ds, _ = dataset_utils.load_split_from_tfds(
'jf_leaves', eval_batch_size, split='test',
preprocess_example=preprocess_ex)
maybe_pad_batches_train = functools.partial(
dataset_utils.maybe_pad_batch, train=True, batch_size=batch_size,
pixel_level=True)
maybe_pad_batches_eval = functools.partial(
dataset_utils.maybe_pad_batch, train=False, batch_size=eval_batch_size,
pixel_level=True)
shard_batches = functools.partial(dataset_utils.shard, n_devices=num_shards)
train_iter = iter(train_ds)
train_iter = map(dataset_utils.tf_to_numpy, train_iter)
train_iter = map(maybe_pad_batches_train, train_iter)
train_iter = map(shard_batches, train_iter)
eval_iter = iter(eval_ds)
eval_iter = map(dataset_utils.tf_to_numpy, eval_iter)
eval_iter = map(maybe_pad_batches_eval, eval_iter)
eval_iter = map(shard_batches, eval_iter)
input_shape = (-1, IMAGE_SIZE[0], IMAGE_SIZE[1], 3)
meta_data = {
'num_classes':
2,
'input_shape':
input_shape,
'num_train_examples':
dataset_utils.get_num_examples('jf_leaves', 'train'),
'num_eval_examples':
dataset_utils.get_num_examples('jf_leaves', 'test'),
'input_dtype':
getattr(jnp, dtype_str),
'target_is_onehot':
False,
}
return dataset_utils.Dataset(train_iter, eval_iter, None, meta_data)
```
#### File: projects/vivit/trainer.py
```python
import copy
import functools
from typing import Any, Dict, Tuple
from absl import logging
from clu import metric_writers
from clu import periodic_actions
from flax import jax_utils
import jax
import jax.numpy as jnp
import jax.profiler
import ml_collections
import numpy as np
from scenic.dataset_lib import dataset_utils
from scenic.projects.vivit import evaluation_lib
from scenic.projects.vivit import train_utils as vivit_train_utils
from scenic.train_lib import lr_schedules
from scenic.train_lib import optimizers
from scenic.train_lib import pretrain_utils
from scenic.train_lib import train_utils
def train(
*,
rng: jnp.ndarray,
config: ml_collections.ConfigDict,
model_cls: Any,
dataset: dataset_utils.Dataset,
workdir: str,
writer: metric_writers.MetricWriter,
) -> Tuple[train_utils.TrainState, Dict[str, Any], Dict[str, Any]]:
"""Main training loop lives in this function.
Given the model class and dataset, it prepares the items needed to run the
training, including the TrainState.
Args:
rng: Jax rng key.
config: Configurations of the experiment.
model_cls: Model class; A model has a flax_module, a loss_fn, and a
metrics_fn associated with it.
dataset: The dataset that has train_iter, eval_iter, meta_data, and
optionally, test_iter.
workdir: Directory for checkpointing.
writer: CLU metrics writer instance.
Returns:
train_state that has the state of training (including current
global_step, model_state, rng, and the optimizer), train_summary
and eval_summary which are dict of metrics. These outputs are used for
regression testing.
"""
lead_host = jax.process_index() == 0
# Build the loss_fn, metrics, and flax_model.
model = model_cls(config, dataset.meta_data)
is_multilabel_model = (config.model_name == 'vivit_multilabel_classification')
get_confusion_matrix = (config.get('confusion_matrix_metrics', False)
and not is_multilabel_model)
# Initialize model.
rng, init_rng = jax.random.split(rng)
(params, model_state, num_trainable_params,
gflops) = train_utils.initialize_model(
model_def=model.flax_model,
input_spec=[(dataset.meta_data['input_shape'],
dataset.meta_data.get('input_dtype', jnp.float32))],
config=config,
rngs=init_rng)
# Create optimizer.
# We jit this, such that the arrays that are created are created on the same
# device as the input is, in this case the CPU. Else they'd be on device[0].
optimizer = jax.jit(
optimizers.get_optimizer(config).create, backend='cpu')(
params)
rng, train_rng = jax.random.split(rng)
train_state = train_utils.TrainState(
global_step=0,
optimizer=optimizer,
model_state=model_state,
rng=train_rng,
accum_train_time=0)
start_step = train_state.global_step
if config.checkpoint:
train_state, start_step = train_utils.restore_checkpoint(
workdir, train_state)
if (start_step == 0 # Which means "no" checkpoint is restored!
and config.get('init_from') is not None):
restored_model_cfg = config.init_from.get('model_config')
init_checkpoint_path = config.init_from.get('checkpoint_path')
checkpoint_format = config.init_from.get('checkpoint_format', 'scenic')
if checkpoint_format == 'scenic':
restored_train_state = pretrain_utils.restore_pretrained_checkpoint(
init_checkpoint_path, train_state, assert_exist=True)
elif checkpoint_format == 'bigvision':
restored_train_state = pretrain_utils.convert_bigvision_to_scenic_checkpoint(
init_checkpoint_path, train_state)
# Config dict in bigvision is not the same format as scenic.
# Therefore, make sure config match the config of the loaded model!
restored_model_cfg = copy.deepcopy(config)
# The following is needed when the restored and target models used a
# different classifier. As bigvision uses a different config dict, we have
# to specify this manually.
restored_model_cfg.model.classifier = config.init_from.get(
'classifier_type', 'token')
train_state = model.init_from_train_state(train_state, restored_train_state,
restored_model_cfg)
# Free unnecessary memory.
del restored_train_state
elif start_step == 0:
logging.info('Training completely from scratch.'
'Not restoring from any checkpoint.')
# Replicate the optimzier, state, and rng.
train_state = jax_utils.replicate(train_state)
del params # Do not keep a copy of the initial params.
# Calculate the total number of training steps.
total_steps, steps_per_epoch = train_utils.get_num_training_steps(
config, dataset.meta_data)
# Get learning rate scheduler.
learning_rate_fn = lr_schedules.get_learning_rate_fn(config)
train_step_pmapped = jax.pmap(
functools.partial(
vivit_train_utils.train_step,
flax_model=model.flax_model,
learning_rate_fn=learning_rate_fn,
loss_fn=model.loss_function,
metrics_fn=model.get_metrics_fn('train'),
config=config,
debug=config.debug_train),
axis_name='batch',
# We can donate both buffers of train_state and train_batch.
donate_argnums=(0, 1),
)
eval_step_pmapped = jax.pmap(
functools.partial(
vivit_train_utils.eval_step,
flax_model=model.flax_model,
metrics_fn=model.get_metrics_fn('validation'),
return_logits_and_labels=is_multilabel_model,
return_confusion_matrix=get_confusion_matrix,
debug=config.debug_eval),
axis_name='batch',
# We can donate the eval_batch's buffer.
donate_argnums=(1,),
)
log_eval_steps = config.get('log_eval_steps') or steps_per_epoch
log_test_steps = 0
if config.dataset_configs.get('do_multicrop_test'):
log_test_steps = int(steps_per_epoch *
config.dataset_configs.log_test_epochs)
test_step_pmapped = jax.pmap(
functools.partial(
vivit_train_utils.test_step,
flax_model=model.flax_model,
metrics_fn=model.get_metrics_fn('test'),
n_clips=config.get('multicrop_clips_per_device', 2),
debug=config.debug_eval),
axis_name='batch',
# We can donate the test_batch's buffer.
donate_argnums=(1,),
)
assert config.dataset_configs.test_batch_size == jax.local_device_count(), (
'The per-host batch size must be equal to the number of local devices.'
'This ensures that each TPU device is processing different views of'
'the same original video.')
total_test_steps = int(
np.ceil(dataset.meta_data['num_test_examples'] /
(config.get('dataset_configs.test_batch_size') *
config.get('dataset_configs.num_test_clips') *
jax.process_count())))
steps_per_test = config.get('steps_per_test') or total_test_steps
if not log_eval_steps:
raise ValueError("'log_eval_steps' should be specified in the config.")
checkpoint_steps = config.get('checkpoint_steps') or log_eval_steps
log_summary_steps = config.get('log_summary_steps') or log_eval_steps
# Ceil rounding such that we include the last incomplete batch.
total_eval_steps = int(
np.ceil(dataset.meta_data['num_eval_examples'] / config.batch_size))
steps_per_eval = config.get('steps_per_eval') or total_eval_steps
train_metrics, extra_training_logs = [], []
train_summary, eval_summary = None, None
chrono = train_utils.Chrono(
first_step=start_step,
total_steps=total_steps,
steps_per_epoch=steps_per_epoch,
global_bs=config.batch_size,
accum_train_time=int(jax_utils.unreplicate(train_state.accum_train_time)))
logging.info('Starting training loop at step %d.', start_step + 1)
report_progress = periodic_actions.ReportProgress(
num_train_steps=total_steps, writer=writer)
hooks = [report_progress]
if config.get('xprof', True) and lead_host:
hooks.append(periodic_actions.Profile(num_profile_steps=5, logdir=workdir))
if start_step == 0:
step0_log = {'num_trainable_params': num_trainable_params}
if gflops:
step0_log['gflops'] = gflops
writer.write_scalars(1, step0_log)
# Manually defragment memory before starting training, if we are using the
# tfrt runtime.
do_memory_defrag = False
if config.get('do_memory_defrag', False):
client = jax.lib.xla_bridge.get_backend()
try:
logging.info('Defragmenting memory')
client.defragment()
do_memory_defrag = True
except RuntimeError:
logging.warn('Memory defragmentation not possible, use the tfrt runtime')
for step in range(start_step + 1, total_steps + 1):
with jax.profiler.StepTraceContext('train', step_num=step):
train_batch = next(dataset.train_iter)
train_state, t_metrics, lr = train_step_pmapped(train_state, train_batch)
# This will accumulate metrics in TPU memory up to the point that we log
# them. This is no problem for small metrics but may be a problem for
# large (e.g. segmentation) metrics. An alternative is to set
# `log_summary_steps` to a small number, or to use
# `train_utils.unreplicate_and_get` here instead of right before writing
# summaries, but that means in each step, we have data transfer between
# tpu and host, which might slow down the training.
train_metrics.append(t_metrics)
# Additional training logs: learning rate:
extra_training_logs.append({'learning_rate': lr})
for h in hooks:
# Catch exception in case XProf fails.
try:
h(step)
except ValueError as error:
logging.exception('Hook failed: %r', error)
chrono.pause() # Below are once-in-a-while ops -> pause.
###################### LOG TRAIN SUMMARY ########################
if (step % log_summary_steps == 1) or (step == total_steps):
if lead_host:
chrono.tick(step, writer=writer)
train_summary = train_utils.log_train_summary(
step=step,
train_metrics=jax.tree_map(train_utils.unreplicate_and_get,
train_metrics),
extra_training_logs=jax.tree_map(train_utils.unreplicate_and_get,
extra_training_logs),
writer=writer,
key_separator='/')
# Reset metric accumulation for next evaluation cycle.
train_metrics, extra_training_logs = [], []
if do_memory_defrag:
logging.info('Defragmenting memory')
client.defragment()
################### EVALUATION ################################
if (step % log_eval_steps == 1) or (step == total_steps):
with report_progress.timed('eval'):
if do_memory_defrag:
logging.info('Defragmenting memory')
client.defragment()
eval_metrics = []
additional_summary = None
if is_multilabel_model:
eval_logits = []
eval_labels = []
n_classes = dataset.meta_data['num_classes']
if get_confusion_matrix:
confusion_matrices = []
n_classes = dataset.meta_data['num_classes']
# Sync model state across replicas.
train_state = train_utils.sync_model_state_across_replicas(train_state)
for _ in range(steps_per_eval):
eval_batch = next(dataset.valid_iter)
e_metrics = eval_step_pmapped(train_state, eval_batch)
if is_multilabel_model:
e_metrics, logits_batch, labels_batch = e_metrics
# TODO(dehghani, lucic): Fetching from the device in each step might
# be an unnecessary penalty. Consider updating to async fetching
# as in CL/378384754.
eval_logits.append(vivit_train_utils.to_cpu(logits_batch))
eval_labels.append(vivit_train_utils.to_cpu(labels_batch))
if get_confusion_matrix:
e_metrics, conf_matrix = e_metrics
confusion_matrices.append(vivit_train_utils.to_cpu(conf_matrix))
# Fetch e_metrics to host and store.
eval_metrics.append(train_utils.unreplicate_and_get(e_metrics))
# Compute global metrics if applicable from all the batches.
if is_multilabel_model:
additional_summary = evaluation_lib.compute_mean_average_precision(
np.concatenate(eval_logits, axis=0),
np.concatenate(eval_labels, axis=0),
return_per_class_ap=n_classes < 10)
if get_confusion_matrix:
additional_summary = evaluation_lib.compute_confusion_matrix_metrics(
confusion_matrices, return_per_class_metrics=n_classes < 10)
if lead_host:
conf_matrix_image = vivit_train_utils.render_confusion_matrices(
confusion_matrices, normalization_method='rows')
conf_matrix_unnorm = vivit_train_utils.render_confusion_matrices(
confusion_matrices, normalization_method='none')
writer.write_images(
step, {'valid/conf_matrix': conf_matrix_image,
'valid/conf_matrix_unnormalized': conf_matrix_unnorm})
# Log eval summary.
eval_summary = train_utils.log_eval_summary(
step=step,
eval_metrics=eval_metrics,
extra_eval_summary=additional_summary,
writer=writer,
key_separator='/')
writer.flush()
del eval_metrics
if do_memory_defrag:
logging.info('Defragmenting memory')
client.defragment()
##################### CHECKPOINTING ###########################
if ((step % checkpoint_steps == 0 and step > 0) or
(step == total_steps)) and config.checkpoint:
with report_progress.timed('checkpoint'):
# Sync model state across replicas.
train_state = train_utils.sync_model_state_across_replicas(train_state)
if lead_host:
train_state.replace( # pytype: disable=attribute-error
accum_train_time=chrono.accum_train_time)
train_utils.save_checkpoint(workdir, train_state)
############# MULTICROP TESTING ############################
if (config.dataset_configs.get('do_multicrop_test') and
((step % log_test_steps == 1 and step > 1) or step == total_steps)):
with report_progress.timed('test'):
if do_memory_defrag:
logging.info('Defragmenting memory')
client.defragment()
test_metrics = []
# Sync model state across replicas.
train_state = train_utils.sync_model_state_across_replicas(train_state)
# At the end of training, evaluate on the whole test set.
if step == total_steps:
steps_per_test = total_test_steps
logging.info('Starting multicrop test')
for _ in range(steps_per_test):
test_batch = next(dataset.test_iter)
t_metrics = test_step_pmapped(train_state, test_batch)
# Fetch t_metrics to host and store.
test_metrics.append(train_utils.unreplicate_and_get(t_metrics))
# Log eval summary.
train_utils.log_eval_summary(
step=step,
eval_metrics=test_metrics,
writer=writer,
prefix='test',
key_separator='/')
logging.info('Completed multicrop test')
writer.flush()
# Free up some space.
del test_metrics
if do_memory_defrag:
logging.info('Defragmenting memory')
client.defragment()
chrono.resume() # un-pause now
# Wait until computations are done before exiting.
jax.random.normal(jax.random.PRNGKey(0), ()).block_until_ready()
# Return the train and eval summary after last step for regression testing.
return train_state, train_summary, eval_summary
```
|
{
"source": "jfpalomeque/indeed_scrapper",
"score": 3
}
|
#### File: jfpalomeque/indeed_scrapper/scrapper.py
```python
import requests
try:
from bs4 import BeautifulSoup
except :
from BeautifulSoup import BeautifulSoup
import pandas as pd
import time
import streamlit as st
from geopy import Nominatim
import numpy as np
def scrapper(title, location = "United Kingdom"):
#Add job title to search. Any space will be reeplaced with a + symbol
title = title
title = title.replace(" ", "+")
#Add location to search. Any space will be reeplaced with a + symbol. No case sensitive
location =location
location = location.replace(" ", "+")
base_url = "https://www.indeed.co.uk/jobs?q="+title+"&l="+location
#First url to check for number of ads stracting
first_url = base_url+"&start=0"
#conducting a request of the stated URL above:
first_page = requests.get(base_url)
#specifying a desired format of “page” using the html parser - this allows python to read the various components of the page, rather than treating it as one long string.
soup = BeautifulSoup(first_page.text, "html.parser")
#Extract all the posts of the page
posts =soup.find_all(name="div", attrs={"class":"row"})
def extract_n_ads(soup):
#Extract the total number of add in the search
n_ads_soup = soup.find("div", {"id":"searchCountPages"})
try:
n_ads = int(str(n_ads_soup)[str(n_ads_soup).find("of")+2:str(n_ads_soup).find("jobs")])
except:
st.write("Error!")
return n_ads
#Extract all the posts of the page
posts =soup.find_all(name="div", attrs={"class":"row"})
def element_extraction(post_list):
posts = post_list
#Extract all the data from the post lists
ads = []
for i in range(len(list(posts))):
ad = []
title = posts[i].find_all(name="a", attrs={"data-tn-element":"jobTitle"})[0].string
ad.append(title)
company = posts[i].find_all(name="span", attrs={"class":"company"})[0].text.strip()
ad.append(company)
try:
rating = float(posts[i].find_all(name="span", attrs={"class":"ratingsDisplay"})[0].text.strip())
except:
rating = "NaN"
ad.append(rating)
try:
location = posts[i].find_all(name="span", attrs={"class":"location accessible-contrast-color-location"})[0].text.strip()
except:
location = "NaN"
ad.append(location)
if "Remote" in str(posts[i]):
remote = "Remote"
elif "remote" in str(posts[i]):
remote = "Temporarily remote"
else:
remote = "No"
ad.append(remote)
ad_url =posts[i].find_all(name="a", attrs={"data-tn-element":"jobTitle"})[0].get('href')
url = "https://www.indeed.co.uk" + str(ad_url)
ad.append(url)
ads.append(ad)
return ads
search_index = 0
all_adverts = []
n_ads = extract_n_ads(soup)
while search_index < n_ads:
print(search_index)
#Creating an url with the search index
url = base_url+"&start="+str(search_index)
#conducting a request of the stated URL above:
page = requests.get(url)
#specifying a desired format of “page” using the html parser - this allows python to read the various components of the page, rather than treating it as one long string.
soup = BeautifulSoup(page.text, "html.parser")
#Extract all the posts of the page
posts =soup.find_all(name="div", attrs={"class":"row"})
all_adverts = all_adverts + (element_extraction(posts))
search_index = search_index + 15
print(len(all_adverts))
time.sleep(1)
return all_adverts
st.title("Welcome to the indeed.co.uk crapper")
if st.button("See Readme"):
st.write("""This is a scrapper for Indeed.co.uk, the job ads website. This project has three parts, an advance webscrapper, a little exploratory analysis of those ads and a
visualization tool using streamlite. Although sleepers were added in order to avoid IP ban by the website, some times the connection was blocked.
I recomend use an VPN anc change server, or a similar solution, if it's going to be tried more than once in a short time period.
The city name to coordinates translation is really slow too, making that the map take ages in appear, depending of the number of ads. Code in https://github.com/jfpalomeque/indeed_scrapper""")
st.write("## Write here the job title to search in Indeed.co.uk")
title_input = st.text_input("Job title", "Data Science")
if st.button("Run!"):
test = scrapper(title_input)
df = pd.DataFrame(test)
df.drop_duplicates()
df.columns = ["job_Title", "company", "rating", "location", "remote","ad_url"]
n_ads_found = len(df)-1
df["rating"] = pd.to_numeric(df["rating"], errors="coerce")
avg_rate = df["rating"].mean()
location_freq = df.location.value_counts()
st.write("The total number of ads scrapped for " + title_input + " jobs is " + str(n_ads_found))
st.write("The average rating over 5 of ads scrapped for " + title_input + " jobs is " + str(avg_rate))
st.write("The locations of ads scrapped for " + title_input + " jobs are ")
st.write(location_freq)
st.write("Ads DataFrame")
st.write(df)
st.write("Map / Please wait, coords process can take a while!!")
geolocator = Nominatim(user_agent="indeed_scrapper")
location_coords = []
for line in df.location:
loc_row = []
location = geolocator.geocode(line)
loc_row.append(line)
loc_row.append(location.latitude)
loc_row.append(location.longitude)
location_coords.append(loc_row)
print (location.latitude, location.longitude)
time.sleep(1)
location_coords = pd.DataFrame(location_coords)
location_coords.columns = ["location", "latitude", "longitude"]
st.map(location_coords)
```
|
{
"source": "jfpanisset/Cryptomatte",
"score": 3
}
|
#### File: Cryptomatte/nuke/pymmh3.py
```python
import sys as _sys
if (_sys.version_info > (3, 0)):
def xrange( a, b, c ):
return list(range( a, b, c))
def xencode(x):
if isinstance(x, bytes) or isinstance(x, bytearray):
return x
else:
return x.encode()
else:
def xencode(x):
return x
del _sys
def hash( key, seed = 0x0 ):
''' Implements 32bit murmur3 hash. '''
key = bytearray( xencode(key) )
def fmix( h ):
h ^= h >> 16
h = ( h * 0x85ebca6b ) & 0xFFFFFFFF
h ^= h >> 13
h = ( h * 0xc2b2ae35 ) & 0xFFFFFFFF
h ^= h >> 16
return h
length = len( key )
nblocks = int( length / 4 )
h1 = seed
c1 = 0xcc9e2d51
c2 = 0x1b873593
# body
for block_start in range( 0, nblocks * 4, 4 ):
# ??? big endian?
k1 = key[ block_start + 3 ] << 24 | \
key[ block_start + 2 ] << 16 | \
key[ block_start + 1 ] << 8 | \
key[ block_start + 0 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( c2 * k1 ) & 0xFFFFFFFF
h1 ^= k1
h1 = ( h1 << 13 | h1 >> 19 ) & 0xFFFFFFFF # inlined ROTL32
h1 = ( h1 * 5 + 0xe6546b64 ) & 0xFFFFFFFF
# tail
tail_index = nblocks * 4
k1 = 0
tail_size = length & 3
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( k1 * c2 ) & 0xFFFFFFFF
h1 ^= k1
#finalization
unsigned_val = fmix( h1 ^ length )
if unsigned_val & 0x80000000 == 0:
return unsigned_val
else:
return -( (unsigned_val ^ 0xFFFFFFFF) + 1 )
def hash128( key, seed = 0x0, x64arch = True ):
''' Implements 128bit murmur3 hash. '''
def hash128_x64( key, seed ):
''' Implements 128bit murmur3 hash for x64. '''
def fmix( k ):
k ^= k >> 33
k = ( k * 0xff51afd7ed558ccd ) & 0xFFFFFFFFFFFFFFFF
k ^= k >> 33
k = ( k * 0xc4ceb9fe1a85ec53 ) & 0xFFFFFFFFFFFFFFFF
k ^= k >> 33
return k
length = len( key )
nblocks = int( length / 16 )
h1 = seed
h2 = seed
c1 = 0x87c37b91114253d5
c2 = 0x4cf5ad432745937f
#body
for block_start in range( 0, nblocks * 8, 8 ):
# ??? big endian?
k1 = key[ 2 * block_start + 7 ] << 56 | \
key[ 2 * block_start + 6 ] << 48 | \
key[ 2 * block_start + 5 ] << 40 | \
key[ 2 * block_start + 4 ] << 32 | \
key[ 2 * block_start + 3 ] << 24 | \
key[ 2 * block_start + 2 ] << 16 | \
key[ 2 * block_start + 1 ] << 8 | \
key[ 2 * block_start + 0 ]
k2 = key[ 2 * block_start + 15 ] << 56 | \
key[ 2 * block_start + 14 ] << 48 | \
key[ 2 * block_start + 13 ] << 40 | \
key[ 2 * block_start + 12 ] << 32 | \
key[ 2 * block_start + 11 ] << 24 | \
key[ 2 * block_start + 10 ] << 16 | \
key[ 2 * block_start + 9 ] << 8 | \
key[ 2 * block_start + 8 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFFFFFFFFFF
k1 = ( k1 << 31 | k1 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k1 = ( c2 * k1 ) & 0xFFFFFFFFFFFFFFFF
h1 ^= k1
h1 = ( h1 << 27 | h1 >> 37 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h1 = ( h1 * 5 + 0x52dce729 ) & 0xFFFFFFFFFFFFFFFF
k2 = ( c2 * k2 ) & 0xFFFFFFFFFFFFFFFF
k2 = ( k2 << 33 | k2 >> 31 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k2 = ( c1 * k2 ) & 0xFFFFFFFFFFFFFFFF
h2 ^= k2
h2 = ( h2 << 31 | h2 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h2 = ( h2 * 5 + 0x38495ab5 ) & 0xFFFFFFFFFFFFFFFF
#tail
tail_index = nblocks * 16
k1 = 0
k2 = 0
tail_size = length & 15
if tail_size >= 15:
k2 ^= key[ tail_index + 14 ] << 48
if tail_size >= 14:
k2 ^= key[ tail_index + 13 ] << 40
if tail_size >= 13:
k2 ^= key[ tail_index + 12 ] << 32
if tail_size >= 12:
k2 ^= key[ tail_index + 11 ] << 24
if tail_size >= 11:
k2 ^= key[ tail_index + 10 ] << 16
if tail_size >= 10:
k2 ^= key[ tail_index + 9 ] << 8
if tail_size >= 9:
k2 ^= key[ tail_index + 8 ]
if tail_size > 8:
k2 = ( k2 * c2 ) & 0xFFFFFFFFFFFFFFFF
k2 = ( k2 << 33 | k2 >> 31 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k2 = ( k2 * c1 ) & 0xFFFFFFFFFFFFFFFF
h2 ^= k2
if tail_size >= 8:
k1 ^= key[ tail_index + 7 ] << 56
if tail_size >= 7:
k1 ^= key[ tail_index + 6 ] << 48
if tail_size >= 6:
k1 ^= key[ tail_index + 5 ] << 40
if tail_size >= 5:
k1 ^= key[ tail_index + 4 ] << 32
if tail_size >= 4:
k1 ^= key[ tail_index + 3 ] << 24
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFFFFFFFFFF
k1 = ( k1 << 31 | k1 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k1 = ( k1 * c2 ) & 0xFFFFFFFFFFFFFFFF
h1 ^= k1
#finalization
h1 ^= length
h2 ^= length
h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h1 = fmix( h1 )
h2 = fmix( h2 )
h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
return ( h2 << 64 | h1 )
def hash128_x86( key, seed ):
''' Implements 128bit murmur3 hash for x86. '''
def fmix( h ):
h ^= h >> 16
h = ( h * 0x85ebca6b ) & 0xFFFFFFFF
h ^= h >> 13
h = ( h * 0xc2b2ae35 ) & 0xFFFFFFFF
h ^= h >> 16
return h
length = len( key )
nblocks = int( length / 16 )
h1 = seed
h2 = seed
h3 = seed
h4 = seed
c1 = 0x239b961b
c2 = 0xab0e9789
c3 = 0x38b34ae5
c4 = 0xa1e38b93
#body
for block_start in range( 0, nblocks * 16, 16 ):
k1 = key[ block_start + 3 ] << 24 | \
key[ block_start + 2 ] << 16 | \
key[ block_start + 1 ] << 8 | \
key[ block_start + 0 ]
k2 = key[ block_start + 7 ] << 24 | \
key[ block_start + 6 ] << 16 | \
key[ block_start + 5 ] << 8 | \
key[ block_start + 4 ]
k3 = key[ block_start + 11 ] << 24 | \
key[ block_start + 10 ] << 16 | \
key[ block_start + 9 ] << 8 | \
key[ block_start + 8 ]
k4 = key[ block_start + 15 ] << 24 | \
key[ block_start + 14 ] << 16 | \
key[ block_start + 13 ] << 8 | \
key[ block_start + 12 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( c2 * k1 ) & 0xFFFFFFFF
h1 ^= k1
h1 = ( h1 << 19 | h1 >> 13 ) & 0xFFFFFFFF # inlined ROTL32
h1 = ( h1 + h2 ) & 0xFFFFFFFF
h1 = ( h1 * 5 + 0x561ccd1b ) & 0xFFFFFFFF
k2 = ( c2 * k2 ) & 0xFFFFFFFF
k2 = ( k2 << 16 | k2 >> 16 ) & 0xFFFFFFFF # inlined ROTL32
k2 = ( c3 * k2 ) & 0xFFFFFFFF
h2 ^= k2
h2 = ( h2 << 17 | h2 >> 15 ) & 0xFFFFFFFF # inlined ROTL32
h2 = ( h2 + h3 ) & 0xFFFFFFFF
h2 = ( h2 * 5 + 0x0bcaa747 ) & 0xFFFFFFFF
k3 = ( c3 * k3 ) & 0xFFFFFFFF
k3 = ( k3 << 17 | k3 >> 15 ) & 0xFFFFFFFF # inlined ROTL32
k3 = ( c4 * k3 ) & 0xFFFFFFFF
h3 ^= k3
h3 = ( h3 << 15 | h3 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
h3 = ( h3 + h4 ) & 0xFFFFFFFF
h3 = ( h3 * 5 + 0x96cd1c35 ) & 0xFFFFFFFF
k4 = ( c4 * k4 ) & 0xFFFFFFFF
k4 = ( k4 << 18 | k4 >> 14 ) & 0xFFFFFFFF # inlined ROTL32
k4 = ( c1 * k4 ) & 0xFFFFFFFF
h4 ^= k4
h4 = ( h4 << 13 | h4 >> 19 ) & 0xFFFFFFFF # inlined ROTL32
h4 = ( h1 + h4 ) & 0xFFFFFFFF
h4 = ( h4 * 5 + 0x32ac3b17 ) & 0xFFFFFFFF
#tail
tail_index = nblocks * 16
k1 = 0
k2 = 0
k3 = 0
k4 = 0
tail_size = length & 15
if tail_size >= 15:
k4 ^= key[ tail_index + 14 ] << 16
if tail_size >= 14:
k4 ^= key[ tail_index + 13 ] << 8
if tail_size >= 13:
k4 ^= key[ tail_index + 12 ]
if tail_size > 12:
k4 = ( k4 * c4 ) & 0xFFFFFFFF
k4 = ( k4 << 18 | k4 >> 14 ) & 0xFFFFFFFF # inlined ROTL32
k4 = ( k4 * c1 ) & 0xFFFFFFFF
h4 ^= k4
if tail_size >= 12:
k3 ^= key[ tail_index + 11 ] << 24
if tail_size >= 11:
k3 ^= key[ tail_index + 10 ] << 16
if tail_size >= 10:
k3 ^= key[ tail_index + 9 ] << 8
if tail_size >= 9:
k3 ^= key[ tail_index + 8 ]
if tail_size > 8:
k3 = ( k3 * c3 ) & 0xFFFFFFFF
k3 = ( k3 << 17 | k3 >> 15 ) & 0xFFFFFFFF # inlined ROTL32
k3 = ( k3 * c4 ) & 0xFFFFFFFF
h3 ^= k3
if tail_size >= 8:
k2 ^= key[ tail_index + 7 ] << 24
if tail_size >= 7:
k2 ^= key[ tail_index + 6 ] << 16
if tail_size >= 6:
k2 ^= key[ tail_index + 5 ] << 8
if tail_size >= 5:
k2 ^= key[ tail_index + 4 ]
if tail_size > 4:
k2 = ( k2 * c2 ) & 0xFFFFFFFF
k2 = ( k2 << 16 | k2 >> 16 ) & 0xFFFFFFFF # inlined ROTL32
k2 = ( k2 * c3 ) & 0xFFFFFFFF
h2 ^= k2
if tail_size >= 4:
k1 ^= key[ tail_index + 3 ] << 24
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( k1 * c2 ) & 0xFFFFFFFF
h1 ^= k1
#finalization
h1 ^= length
h2 ^= length
h3 ^= length
h4 ^= length
h1 = ( h1 + h2 ) & 0xFFFFFFFF
h1 = ( h1 + h3 ) & 0xFFFFFFFF
h1 = ( h1 + h4 ) & 0xFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFF
h3 = ( h1 + h3 ) & 0xFFFFFFFF
h4 = ( h1 + h4 ) & 0xFFFFFFFF
h1 = fmix( h1 )
h2 = fmix( h2 )
h3 = fmix( h3 )
h4 = fmix( h4 )
h1 = ( h1 + h2 ) & 0xFFFFFFFF
h1 = ( h1 + h3 ) & 0xFFFFFFFF
h1 = ( h1 + h4 ) & 0xFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFF
h3 = ( h1 + h3 ) & 0xFFFFFFFF
h4 = ( h1 + h4 ) & 0xFFFFFFFF
return ( h4 << 96 | h3 << 64 | h2 << 32 | h1 )
key = bytearray( xencode(key) )
if x64arch:
return hash128_x64( key, seed )
else:
return hash128_x86( key, seed )
def hash64( key, seed = 0x0, x64arch = True ):
''' Implements 64bit murmur3 hash. Returns a tuple. '''
hash_128 = hash128( key, seed, x64arch )
unsigned_val1 = hash_128 & 0xFFFFFFFFFFFFFFFF
if unsigned_val1 & 0x8000000000000000 == 0:
signed_val1 = unsigned_val1
else:
signed_val1 = -( (unsigned_val1 ^ 0xFFFFFFFFFFFFFFFF) + 1 )
unsigned_val2 = ( hash_128 >> 64 ) & 0xFFFFFFFFFFFFFFFF
if unsigned_val2 & 0x8000000000000000 == 0:
signed_val2 = unsigned_val2
else:
signed_val2 = -( (unsigned_val2 ^ 0xFFFFFFFFFFFFFFFF) + 1 )
return ( int( signed_val1 ), int( signed_val2 ) )
def hash_bytes( key, seed = 0x0, x64arch = True ):
''' Implements 128bit murmur3 hash. Returns a byte string. '''
hash_128 = hash128( key, seed, x64arch )
bytestring = ''
for i in range(0, 16, 1):
lsbyte = hash_128 & 0xFF
bytestring = bytestring + str( chr( lsbyte ) )
hash_128 = hash_128 >> 8
return bytestring
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser( 'pymurmur3', 'pymurmur [options] "string to hash"' )
parser.add_argument( '--seed', type = int, default = 0 )
parser.add_argument( 'strings', default = [], nargs='+')
opts = parser.parse_args()
for str_to_hash in opts.strings:
sys.stdout.write( '"%s" = 0x%08X\n' % ( str_to_hash, hash( str_to_hash ) ) )
```
|
{
"source": "jfpanisset/flameTimewarpML",
"score": 2
}
|
#### File: flameTimewarpML/bundle/inference_flame_tw.py
```python
from ntpath import basename
import os
import sys
from turtle import backward, forward
import cv2
import torch
import argparse
import numpy as np
from tqdm import tqdm
from torch.nn import functional as F
import warnings
import _thread
from queue import Queue, Empty
from pprint import pprint, pformat
import time
import psutil
import signal
import multiprocessing as mp
import inference_common
warnings.filterwarnings("ignore")
IOThreadsFlag = True
IOProcesses = []
cv2.setNumThreads(1)
# Exception handler
def exeption_handler(exctype, value, tb):
import traceback
locks = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'locks')
cmd = 'rm -f ' + locks + '/*'
# os.system(cmd)
pprint('%s in %s' % (value, exctype))
pprint(traceback.format_exception(exctype, value, tb))
sys.__excepthook__(exctype, value, tb)
input("Press Enter to continue...")
sys.excepthook = exeption_handler
# ctrl+c handler
def signal_handler(sig, frame):
global IOThreadsFlag
IOThreadsFlag = False
time.sleep(0.1)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def clear_write_buffer(args, write_buffer, output_duration):
global IOThreadsFlag
global IOProcesses
number_of_write_threads = 4
print('rendering %s frames to %s' % (output_duration, args.output))
pbar = tqdm(total=output_duration, unit='frame')
while IOThreadsFlag:
alive_processes = []
for process in IOProcesses:
if process.is_alive():
alive_processes.append(process)
else:
process.join(timeout=0)
IOProcesses = list(alive_processes)
item = write_buffer.get()
frame_number, image_data = item
if frame_number == -1:
pbar.close() # type: ignore
IOThreadsFlag = False
break
path = os.path.join(os.path.abspath(args.output), '{:0>7d}.exr'.format(frame_number))
if len(IOProcesses) < number_of_write_threads:
try:
p = mp.Process(target=cv2.imwrite, args=(path, image_data[:, :, ::-1], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF], ))
p.start()
IOProcesses.append(p)
except:
try:
cv2.imwrite(path, image_data[:, :, ::-1], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])
except Exception as e:
print ('Error wtiring %s: %s' % (path, e))
else:
try:
cv2.imwrite(path, image_data[:, :, ::-1], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])
except Exception as e:
print ('Error wtiring %s: %s' % (path, e))
pbar.update(1)
def build_read_buffer(user_args, read_buffer, videogen):
global IOThreadsFlag
for frame in videogen:
frame_data = cv2.imread(os.path.join(user_args.input, frame), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
read_buffer.put(frame_data)
read_buffer.put(None)
def make_inference_rational(model, I0, I1, ratio, rthreshold=0.02, maxcycles=5, UHD=False, always_interp=False):
I0_ratio = 0.0
I1_ratio = 1.0
rational_m = torch.mean(I0) * ratio + torch.mean(I1) * (1 - ratio)
if not always_interp:
if ratio <= I0_ratio + rthreshold / 2:
return I0
if ratio >= I1_ratio - rthreshold / 2:
return I1
for inference_cycle in range(0, maxcycles):
middle = model.inference(I0, I1, UHD)
middle_ratio = (I0_ratio + I1_ratio) / 2
if not always_interp:
if ratio - (rthreshold / 2) <= middle_ratio <= ratio + (rthreshold / 2):
return middle # + (rational_m - torch.mean(middle)).expand_as(middle)
if ratio > middle_ratio:
I0 = middle
I0_ratio = middle_ratio
else:
I1 = middle
I1_ratio = middle_ratio
return middle # + (rational_m - torch.mean(middle)).expand_as(middle)
def make_inference_rational_cpu(model, I0, I1, ratio, frame_num, w, h, write_buffer, rthreshold=0.02, maxcycles=8, UHD=False, always_interp=False):
device = torch.device("cpu")
torch.set_grad_enabled(False)
I0_ratio = 0.0
I1_ratio = 1.0
rational_m = torch.mean(I0) * ratio + torch.mean(I1) * (1 - ratio)
if not always_interp:
if ratio <= I0_ratio + rthreshold / 2:
I0 = (((I0[0]).cpu().detach().numpy().transpose(1, 2, 0)))
write_buffer.put((frame_num, I0[:h, :w]))
return
if ratio >= I1_ratio - rthreshold / 2:
I1 = (((I1[0]).cpu().detach().numpy().transpose(1, 2, 0)))
write_buffer.put((frame_num, I1[:h, :w]))
return
for inference_cycle in range(0, maxcycles):
middle = model.inference(I0, I1, UHD)
middle_ratio = (I0_ratio + I1_ratio) / 2
if not always_interp:
if ratio - (rthreshold / 2) <= middle_ratio <= ratio + (rthreshold / 2):
# middle = middle + (rational_m - torch.mean(middle)).expand_as(middle)
middle = (((middle[0]).cpu().detach().numpy().transpose(1, 2, 0)))
write_buffer.put((frame_num, middle[:h, :w]))
return
if ratio > middle_ratio:
middle = middle.detach()
I0 = middle.to(device, non_blocking=True)
I0_ratio = middle_ratio
else:
middle = middle.detach()
I1 = middle.to(device, non_blocking=True)
I1_ratio = middle_ratio
# middle + (rational_m - torch.mean(middle)).expand_as(middle)
middle = (((middle[0]).cpu().detach().numpy().transpose(1, 2, 0)))
write_buffer.put((frame_num, middle[:h, :w]))
return
def dictify(r, root=True):
from copy import copy
if root:
return {r.tag: dictify(r, False)}
d = copy(r.attrib)
if r.text:
d["_text"] = r.text
for x in r.findall("./*"):
if x.tag not in d:
d[x.tag] = []
d[x.tag].append(dictify(x, False))
return d
def bake_flame_tw_setup(tw_setup_path, start, end):
# parses tw setup from flame and returns dictionary
# with baked frame - value pairs
def extrapolate_linear(xa, ya, xb, yb, xc):
m = (ya - yb) / (xa - xb)
yc = (xc - xb) * m + yb
return yc
import xml.etree.ElementTree as ET
frame_value_map = {}
with open(tw_setup_path, 'r') as tw_setup_file:
tw_setup_string = tw_setup_file.read()
tw_setup_file.close()
tw_setup_xml = ET.fromstring(tw_setup_string)
tw_setup = dictify(tw_setup_xml)
# start = int(tw_setup['Setup']['Base'][0]['Range'][0]['Start'])
# end = int(tw_setup['Setup']['Base'][0]['Range'][0]['End'])
# TW_Timing_size = int(tw_setup['Setup']['State'][0]['TW_Timing'][0]['Channel'][0]['Size'][0]['_text'])
TW_SpeedTiming_size = int(tw_setup['Setup']['State'][0]['TW_SpeedTiming'][0]['Channel'][0]['Size'][0]['_text'])
TW_RetimerMode = int(tw_setup['Setup']['State'][0]['TW_RetimerMode'][0]['_text'])
parsed_and_baked_path = os.path.join(os.path.dirname(args.setup), 'parsed_and_baked.txt')
if sys.platform == 'darwin':
parser_and_baker = os.path.join(os.path.dirname(__file__), 'flame_channel_parser', 'bin', 'bake_flame_channel_mac')
else:
parser_and_baker = os.path.join(os.path.dirname(__file__), 'flame_channel_parser', 'bin', 'bake_flame_channel')
if TW_SpeedTiming_size == 1 and TW_RetimerMode == 0:
# just constant speed change with no keyframes set
x = float(tw_setup['Setup']['State'][0]['TW_SpeedTiming'][0]['Channel'][0]['KFrames'][0]['Key'][0]['Frame'][0]['_text'])
y = float(tw_setup['Setup']['State'][0]['TW_SpeedTiming'][0]['Channel'][0]['KFrames'][0]['Key'][0]['Value'][0]['_text'])
ldx = float(tw_setup['Setup']['State'][0]['TW_SpeedTiming'][0]['Channel'][0]['KFrames'][0]['Key'][0]['LHandle_dX'][0]['_text'])
ldy = float(tw_setup['Setup']['State'][0]['TW_SpeedTiming'][0]['Channel'][0]['KFrames'][0]['Key'][0]['LHandle_dY'][0]['_text'])
rdx = float(tw_setup['Setup']['State'][0]['TW_SpeedTiming'][0]['Channel'][0]['KFrames'][0]['Key'][0]['RHandle_dX'][0]['_text'])
rdy = float(tw_setup['Setup']['State'][0]['TW_SpeedTiming'][0]['Channel'][0]['KFrames'][0]['Key'][0]['RHandle_dY'][0]['_text'])
for frame_number in range(start, end+1):
frame_value_map[frame_number] = extrapolate_linear(x + ldx, y + ldy, x + rdx, y + rdy, frame_number)
return frame_value_map
# add point tangents from vecrors to match older version of setup
# used by Julik's parser
from xml.dom import minidom
xml = minidom.parse(tw_setup_path)
keys = xml.getElementsByTagName('Key')
for key in keys:
frame = key.getElementsByTagName('Frame')
if frame:
frame = (frame[0].firstChild.nodeValue)
value = key.getElementsByTagName('Value')
if value:
value = (value[0].firstChild.nodeValue)
rdx = key.getElementsByTagName('RHandle_dX')
if rdx:
rdx = (rdx[0].firstChild.nodeValue)
rdy = key.getElementsByTagName('RHandle_dY')
if rdy:
rdy = (rdy[0].firstChild.nodeValue)
ldx = key.getElementsByTagName('LHandle_dX')
if ldx:
ldx = (ldx[0].firstChild.nodeValue)
ldy = key.getElementsByTagName('LHandle_dY')
if ldy:
ldy = (ldy[0].firstChild.nodeValue)
lx = xml.createElement('LHandleX')
lx.appendChild(xml.createTextNode('{:.6f}'.format(float(frame) + float(ldx))))
key.appendChild(lx)
ly = xml.createElement('LHandleY')
ly.appendChild(xml.createTextNode('{:.6f}'.format(float(value) + float(ldy))))
key.appendChild(ly)
rx = xml.createElement('RHandleX')
rx.appendChild(xml.createTextNode('{:.6f}'.format(float(frame) + float(rdx))))
key.appendChild(rx)
ry = xml.createElement('RHandleY')
ry.appendChild(xml.createTextNode('{:.6f}'.format(float(value) + float(rdy))))
key.appendChild(ry)
xml_string = xml.toxml()
dirname, name = os.path.dirname(tw_setup_path), os.path.basename(tw_setup_path)
xml_path = os.path.join(dirname, 'fix_' + name)
with open(xml_path, 'a') as xml_file:
xml_file.write(xml_string)
xml_file.close()
intp_start = start
intp_end = end
if TW_RetimerMode == 0:
tw_speed = {}
tw_speed_frames = []
TW_Speed = xml.getElementsByTagName('TW_Speed')
keys = TW_Speed[0].getElementsByTagName('Key')
for key in keys:
index = key.getAttribute('Index')
frame = key.getElementsByTagName('Frame')
if frame:
frame = (frame[0].firstChild.nodeValue)
value = key.getElementsByTagName('Value')
if value:
value = (value[0].firstChild.nodeValue)
tw_speed[int(index)] = {'frame': int(frame), 'value': float(value)}
tw_speed_frames.append(int(frame))
intp_start = min(start, min(tw_speed_frames))
intp_end = max(end, max(tw_speed_frames))
else:
tw_timing = {}
tw_timing_frames = []
TW_Timing = xml.getElementsByTagName('TW_Timing')
keys = TW_Timing[0].getElementsByTagName('Key')
for key in keys:
index = key.getAttribute('Index')
frame = key.getElementsByTagName('Frame')
if frame:
frame = (frame[0].firstChild.nodeValue)
value = key.getElementsByTagName('Value')
if value:
value = (value[0].firstChild.nodeValue)
tw_timing[int(index)] = {'frame': int(frame), 'value': float(value)}
tw_timing_frames.append(int(frame))
intp_start = min(start, min(tw_timing_frames))
intp_end = max(end, max(tw_timing_frames))
tw_channel_name = 'Speed' if TW_RetimerMode == 0 else 'Timing'
cmd = parser_and_baker + ' -c ' + tw_channel_name
cmd += ' -s ' + str(intp_start) + ' -e ' + str(intp_end)
cmd += ' --to-file ' + parsed_and_baked_path + ' ' + xml_path
os.system(cmd)
if not os.path.isfile(parsed_and_baked_path):
print ('can not find parsed channel file %s' % parsed_and_baked_path)
input("Press Enter to continue...")
sys.exit(1)
tw_channel = {}
with open(parsed_and_baked_path, 'r') as parsed_and_baked:
import re
# taken from Julik's parser
CORRELATION_RECORD = re.compile(
r"""
^([-]?\d+) # -42 or 42
\t # tab
(
[-]?(\d+(\.\d*)?) # "-1" or "1" or "1.0" or "1."
| # or:
\.\d+ # ".2"
)
([eE][+-]?[0-9]+)? # "1.2e3", "1.2e-3" or "1.2e+3"
$
""", re.VERBOSE)
lines = parsed_and_baked.readlines()
for i, line in enumerate(lines):
line = line.rstrip()
m = CORRELATION_RECORD.match(line)
if m is not None:
frame_number = int(m.group(1))
value = float(m.group(2))
tw_channel[frame_number] = value
if TW_RetimerMode == 1:
# job's done for 'Timing' channel
return tw_channel
else:
# speed - based timewaro needs a bit more love
# to solve frame values against speed channel
# with the help of anchor frames in SpeedTiming channel
tw_speed_timing = {}
TW_SpeedTiming = xml.getElementsByTagName('TW_SpeedTiming')
keys = TW_SpeedTiming[0].getElementsByTagName('Key')
for key in keys:
index = key.getAttribute('Index')
frame = key.getElementsByTagName('Frame')
if frame:
frame = (frame[0].firstChild.nodeValue)
value = key.getElementsByTagName('Value')
if value:
value = (value[0].firstChild.nodeValue)
tw_speed_timing[int(index)] = {'frame': int(frame), 'value': float(value)}
if tw_speed_timing[0]['frame'] > start:
# we need to extrapolate backwards from the first
# keyframe in SpeedTiming channel
anchor_frame_value = tw_speed_timing[0]['value']
for frame_number in range(tw_speed_timing[0]['frame'] - 1, start - 1, -1):
if frame_number + 1 not in tw_channel.keys() or frame_number not in tw_channel.keys():
step_back = tw_channel[min(list(tw_channel.keys()))] / 100
else:
step_back = (tw_channel[frame_number + 1] + tw_channel[frame_number]) / 200
frame_value_map[frame_number] = anchor_frame_value - step_back
anchor_frame_value = frame_value_map[frame_number]
# build up frame values between keyframes of SpeedTiming channel
for key_frame_index in range(0, len(tw_speed_timing.keys()) - 1):
# The value from my gess algo is close to the one in flame but not exact
# and error is accumulated. SO quick and dirty way is to do forward
# and backward pass and mix them rationally
range_start = tw_speed_timing[key_frame_index]['frame']
range_end = tw_speed_timing[key_frame_index + 1]['frame']
if range_end == range_start + 1:
# keyframes on next frames, no need to interpolate
frame_value_map[range_start] = tw_speed_timing[key_frame_index]['value']
frame_value_map[range_end] = tw_speed_timing[key_frame_index + 1]['value']
continue
forward_pass = {}
anchor_frame_value = tw_speed_timing[key_frame_index]['value']
forward_pass[range_start] = anchor_frame_value
for frame_number in range(range_start + 1, range_end):
if frame_number + 1 not in tw_channel.keys() or frame_number not in tw_channel.keys():
step = tw_channel[max(list(tw_channel.keys()))] / 100
else:
step = (tw_channel[frame_number] + tw_channel[frame_number + 1]) / 200
forward_pass[frame_number] = anchor_frame_value + step
anchor_frame_value = forward_pass[frame_number]
forward_pass[range_end] = tw_speed_timing[key_frame_index + 1]['value']
backward_pass = {}
anchor_frame_value = tw_speed_timing[key_frame_index + 1]['value']
backward_pass[range_end] = anchor_frame_value
for frame_number in range(range_end - 1, range_start -1, -1):
if frame_number + 1 not in tw_channel.keys() or frame_number not in tw_channel.keys():
step_back = tw_channel[min(list(tw_channel.keys()))] / 100
else:
step_back = (tw_channel[frame_number + 1] + tw_channel[frame_number]) / 200
backward_pass[frame_number] = anchor_frame_value - step_back
anchor_frame_value = backward_pass[frame_number]
backward_pass[range_start] = tw_speed_timing[key_frame_index]['value']
# create easy in and out soft mixing curve
import numpy as np
from scipy import interpolate
ctr =np.array( [(0 , 0), (0.1, 0), (0.9, 1), (1, 1)])
x=ctr[:,0]
y=ctr[:,1]
interp = interpolate.CubicSpline(x, y)
work_range = list(forward_pass.keys())
ratio = 0
rstep = 1 / len(work_range)
for frame_number in sorted(work_range):
frame_value_map[frame_number] = forward_pass[frame_number] * (1 - interp(ratio)) + backward_pass[frame_number] * interp(ratio)
ratio += rstep
last_key_index = list(sorted(tw_speed_timing.keys()))[-1]
if tw_speed_timing[last_key_index]['frame'] < end:
# we need to extrapolate further on from the
# last keyframe in SpeedTiming channel
anchor_frame_value = tw_speed_timing[last_key_index]['value']
frame_value_map[tw_speed_timing[last_key_index]['frame']] = anchor_frame_value
for frame_number in range(tw_speed_timing[last_key_index]['frame'] + 1, end + 1):
if frame_number + 1 not in tw_channel.keys() or frame_number not in tw_channel.keys():
step = tw_channel[max(list(tw_channel.keys()))] / 100
else:
step = (tw_channel[frame_number] + tw_channel[frame_number + 1]) / 200
frame_value_map[frame_number] = anchor_frame_value + step
anchor_frame_value = frame_value_map[frame_number]
return frame_value_map
if __name__ == '__main__':
start = time.time()
msg = 'Timewarp using FX setup from Flame\n'
parser = argparse.ArgumentParser(description=msg)
parser.add_argument('--input', dest='input', type=str, default=None, help='folder with input sequence')
parser.add_argument('--output', dest='output', type=str, default=None, help='folder to output sequence to')
parser.add_argument('--setup', dest='setup', type=str, default=None, help='flame tw setup to use')
parser.add_argument('--record_in', dest='record_in', type=int, default=1, help='record in point relative to tw setup')
parser.add_argument('--record_out', dest='record_out', type=int, default=0, help='record out point relative to tw setup')
parser.add_argument('--model', dest='model', type=str, default='./trained_models/default/v2.0.model')
parser.add_argument('--UHD', dest='UHD', action='store_true', help='flow size 1/4')
parser.add_argument('--cpu', dest='cpu', action='store_true', help='do not use GPU at all, process only on CPU')
args = parser.parse_args()
if (args.output is None or args.input is None or args.setup is None):
parser.print_help()
sys.exit()
print('Initializing TimewarpML from Flame setup...')
img_formats = ['.exr',]
src_files_list = []
for f in os.listdir(args.input):
name, ext = os.path.splitext(f)
if ext in img_formats:
src_files_list.append(f)
input_duration = len(src_files_list)
if not input_duration:
print('not enough input frames: %s given' % input_duration)
input("Press Enter to continue...")
sys.exit()
if not args.record_out:
args.record_out = input_duration
frame_value_map = bake_flame_tw_setup(args.setup, args.record_in, args.record_out)
# input("Press Enter to continue...")
# sys.exit(0)
start_frame = 1
src_files_list.sort()
src_files = {x:os.path.join(args.input, file_path) for x, file_path in enumerate(src_files_list, start=start_frame)}
output_folder = os.path.abspath(args.output)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
output_duration = (args.record_out - args.record_in) + 1
if torch.cuda.is_available() and not args.cpu:
# Process on GPU
if 'v1.8.model' in args.model:
from model.RIFE_HD import Model # type: ignore
else:
from model.RIFE_HDv2 import Model # type: ignore
model = Model()
model.load_model(args.model, -1)
model.eval()
model.device()
print ('Trained model loaded: %s' % args.model)
write_buffer = Queue(maxsize=mp.cpu_count() - 3)
_thread.start_new_thread(clear_write_buffer, (args, write_buffer, output_duration))
src_start_frame = cv2.imread(src_files.get(start_frame), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
h, w, _ = src_start_frame.shape
ph = ((h - 1) // 64 + 1) * 64
pw = ((w - 1) // 64 + 1) * 64
padding = (0, pw - w, 0, ph - h)
device = torch.device("cuda")
torch.set_grad_enabled(False)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
output_frame_number = 1
for frame_number in range(args.record_in, args.record_out +1):
I0_frame_number = int(frame_value_map[frame_number])
if I0_frame_number < 1:
I0_image = cv2.imread(src_files.get(1), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
write_buffer.put((output_frame_number, I0_image))
output_frame_number += 1
continue
if I0_frame_number >= input_duration:
I0_image = cv2.imread(src_files.get(input_duration), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
write_buffer.put((output_frame_number, I0_image))
output_frame_number += 1
continue
I1_frame_number = I0_frame_number + 1
ratio = frame_value_map[frame_number] - int(frame_value_map[frame_number])
# pprint ('frame_number: %s, value: %s' % (frame_number, frame_value_map[frame_number]))
# pprint ('I0_frame_number: %s, I1_frame_number: %s, ratio: %s' % (I0_frame_number, I1_frame_number, ratio))
I0_image = cv2.imread(src_files.get(I0_frame_number), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
I1_image = cv2.imread(src_files.get(I1_frame_number), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
I0 = torch.from_numpy(np.transpose(I0_image, (2,0,1))).to(device, non_blocking=True).unsqueeze(0)
I1 = torch.from_numpy(np.transpose(I1_image, (2,0,1))).to(device, non_blocking=True).unsqueeze(0)
I0 = F.pad(I0, padding)
I1 = F.pad(I1, padding)
mid = make_inference_rational(model, I0, I1, ratio, UHD = args.UHD)
mid = (((mid[0]).cpu().numpy().transpose(1, 2, 0)))
write_buffer.put((output_frame_number, mid[:h, :w]))
output_frame_number += 1
# send write loop exit code
write_buffer.put((-1, -1))
# it should put IOThreadsFlag to False it return
while(IOThreadsFlag):
time.sleep(0.01)
else:
# process on GPU
if 'v1.8.model' in args.model:
from model_cpu.RIFE_HD import Model # type: ignore
else:
from model_cpu.RIFE_HDv2 import Model # type: ignore
model = Model()
model.load_model(args.model, -1)
model.eval()
model.device()
print ('Trained model loaded: %s' % args.model)
src_start_frame = cv2.imread(src_files.get(start_frame), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
h, w, _ = src_start_frame.shape
ph = ((h - 1) // 64 + 1) * 64
pw = ((w - 1) // 64 + 1) * 64
padding = (0, pw - w, 0, ph - h)
device = torch.device('cpu')
torch.set_grad_enabled(False)
sim_workers, thread_ram = inference_common.safe_threads_number(h, w)
'''
max_cpu_workers = mp.cpu_count() - 2
available_ram = psutil.virtual_memory()[1]/( 1024 ** 3 )
megapixels = ( h * w ) / ( 10 ** 6 )
thread_ram = megapixels * 2.4
sim_workers = round( available_ram / thread_ram )
if sim_workers < 1:
sim_workers = 1
elif sim_workers > max_cpu_workers:
sim_workers = max_cpu_workers
print ('---\nFree RAM: %s Gb available' % '{0:.1f}'.format(available_ram))
print ('Image size: %s x %s' % ( w, h,))
print ('Peak memory usage estimation: %s Gb per CPU thread ' % '{0:.1f}'.format(thread_ram))
print ('Using %s CPU worker thread%s (of %s available)\n---' % (sim_workers, '' if sim_workers == 1 else 's', mp.cpu_count()))
if thread_ram > available_ram:
print ('Warning: estimated peak memory usage is greater then RAM avaliable')
'''
write_buffer = mp.Queue(maxsize=mp.cpu_count() - 3)
_thread.start_new_thread(clear_write_buffer, (args, write_buffer, input_duration))
active_workers = []
output_frame_number = 1
last_thread_time = time.time()
for frame_number in range(args.record_in, args.record_out +1):
I0_frame_number = int(frame_value_map[frame_number])
if I0_frame_number < 1:
I0_image = cv2.imread(src_files.get(1), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
write_buffer.put((output_frame_number, I0_image))
output_frame_number += 1
continue
if I0_frame_number >= input_duration:
I0_image = cv2.imread(src_files.get(input_duration), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
write_buffer.put((output_frame_number, I0_image))
output_frame_number += 1
continue
I1_frame_number = I0_frame_number + 1
ratio = frame_value_map[frame_number] - int(frame_value_map[frame_number])
# pprint ('frame_number: %s, value: %s' % (frame_number, frame_value_map[frame_number]))
# pprint ('I0_frame_number: %s, I1_frame_number: %s, ratio: %s' % (I0_frame_number, I1_frame_number, ratio))
I0_image = cv2.imread(src_files.get(I0_frame_number), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
I1_image = cv2.imread(src_files.get(I1_frame_number), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
I0 = torch.from_numpy(np.transpose(I0_image, (2,0,1))).to(device, non_blocking=True).unsqueeze(0)
I1 = torch.from_numpy(np.transpose(I1_image, (2,0,1))).to(device, non_blocking=True).unsqueeze(0)
I0 = F.pad(I0, padding)
I1 = F.pad(I1, padding)
p = mp.Process(target=make_inference_rational_cpu, args=(model, I0, I1, ratio, output_frame_number, w, h, write_buffer), kwargs = {'UHD': args.UHD})
p.start()
active_workers.append(p)
if (time.time() - last_thread_time) < (thread_ram / 8):
if sim_workers > 1:
time.sleep(thread_ram/8)
while len(active_workers) >= sim_workers:
finished_workers = []
alive_workers = []
for worker in active_workers:
if not worker.is_alive():
finished_workers.append(worker)
else:
alive_workers.append(worker)
active_workers = list(alive_workers)
time.sleep(0.01)
last_thread_time = time.time()
output_frame_number += 1
while len(active_workers) >= sim_workers:
finished_workers = []
alive_workers = []
for worker in active_workers:
if not worker.is_alive():
finished_workers.append(worker)
else:
alive_workers.append(worker)
active_workers = list(alive_workers)
time.sleep(0.01)
last_thread_time = time.time()
# wait for all active worker threads left to finish
for p in active_workers:
p.join()
# send write loop exit code
write_buffer.put((-1, -1))
# it should put IOThreadsFlag to False it return
while(IOThreadsFlag):
time.sleep(0.01)
for p in IOProcesses:
p.join(timeout=8)
for p in IOProcesses:
p.terminate()
p.join(timeout=0)
import hashlib
lockfile = os.path.join('locks', hashlib.sha1(output_folder.encode()).hexdigest().upper() + '.lock')
if os.path.isfile(lockfile):
os.remove(lockfile)
# input("Press Enter to continue...")
sys.exit(0)
```
|
{
"source": "j-f-paquet/frzout-duke2020",
"score": 2
}
|
#### File: frzout/test/test_hrg.py
```python
import random
import numpy as np
from scipy import integrate
from scipy import special
from nose.tools import assert_almost_equal, assert_warns_regex
from .. import HRG, species_dict
hbarc = 0.1973269788
def test_hrg():
ID, info = random.choice(list(species_dict.items()))
m = info['mass']
g = info['degen']
sign = -1 if info['boson'] else 1
prefactor = g / (2*np.pi**2*hbarc**3)
if info['has_anti']:
prefactor *= 2
T = random.uniform(.13, .15)
hrg = HRG(T, species=[ID], res_width=False)
assert_almost_equal(
hrg.T, T, delta=1e-15,
msg='incorrect temperature'
)
n = np.arange(1, 50)
density = prefactor * m*m*T * (
(-sign)**(n-1)/n * special.kn(2, n*m/T)
).sum()
assert_almost_equal(
hrg.density(), density, delta=1e-12,
msg='incorrect density'
)
def integrand(p):
E = np.sqrt(m*m + p*p)
return p*p * E / (np.exp(E/T) + sign)
energy_density = prefactor * integrate.quad(integrand, 0, 10)[0]
assert_almost_equal(
hrg.energy_density(), energy_density, delta=1e-12,
msg='incorrect energy density'
)
def integrand(p):
E = np.sqrt(m*m + p*p)
return p**4 / (3*E) / (np.exp(E/T) + sign)
pressure = prefactor * integrate.quad(integrand, 0, 10)[0]
assert_almost_equal(
hrg.pressure(), pressure, delta=1e-12,
msg='incorrect pressure'
)
with assert_warns_regex(Warning, 'high particlization temperature'):
HRG(.193)
def test_bulk_corrections():
hrg = HRG(.15, res_width=False)
p0 = hrg.pressure()
Pi_min, Pi_max = hrg.Pi_lim()
assert Pi_min == -p0, \
'The minimum bulk pressure should equal negative the ideal pressure.'
assert 0 < Pi_max <= p0, \
'The maximum bulk pressure should be <= the ideal pressure.'
assert hrg.bulk_scale_factors(0) == (1, 1), \
'The scale factors should equal one at zero bulk pressure.'
nscale, pscale = hrg.bulk_scale_factors(random.uniform(.1, .9)*Pi_max)
assert nscale < 1 and pscale > 1, \
'Incorrect scale factors for positive bulk pressure.'
nscale, pscale = hrg.bulk_scale_factors(random.uniform(.1, .9)*Pi_min)
assert nscale > 1 and pscale < 1, \
'Incorrect scale factors for negative bulk pressure.'
nscale, pscale = hrg.bulk_scale_factors(-p0)
assert nscale > 1 and pscale == 0, \
'Incorrect scale factors for zero total pressure.'
assert hrg.bulk_scale_factors(-1.1*p0) == (nscale, pscale), \
'Scale factors should not change outside the Pi range.'
assert \
hrg.bulk_scale_factors(Pi_max) == hrg.bulk_scale_factors(1.5*Pi_max), \
'Scale factors should not change outside the Pi range.'
```
#### File: frzout/test/test_sampler.py
```python
import numpy as np
from .. import Surface, HRG, sample
def test_dtype():
surface = Surface([1, 0, 0], [1, 0, 0], [0, 0])
hrg = HRG(.15)
parts = sample(surface, hrg)
dt = parts.dtype
assert dt.names == ('ID', 'x', 'p'), 'Incorrect dtype fields.'
assert dt['ID'].shape == (), 'Incorrect ID field shape.'
assert dt['x'].shape == (4,), 'Incorrect x field shape.'
assert dt['p'].shape == (4,), 'Incorrect x field shape.'
def test_ymax():
hrg = HRG(.15)
ymax = np.random.uniform(.1, 1.)
surface = Surface(
np.array([[1., 0., 0.]]),
np.array([[1e3/hrg.density(), 0., 0.]]),
np.array([[0., 0.]]),
ymax=ymax
)
parts = sample(surface, hrg)
p = parts['p']
E, px, py, pz = p.T
y = .5*np.log((E + pz)/(E - pz))
assert np.all(np.fabs(y) < ymax), 'Rapidity outside ymax.'
def test_decay_f500():
hrg = HRG(.15, species=[9000221], decay_f500=True)
surface = Surface(
np.array([[1., 0., 0.]]),
np.array([[1e2/hrg.density(), 0., 0.]]),
np.array([[0., 0.]]),
)
parts = sample(surface, hrg)
i = parts['ID']
assert np.all((i == 111) | (i == 211) | (i == -211)), \
'f500 not decayed to pions.'
```
|
{
"source": "jfparadis/cosmic-swingset",
"score": 2
}
|
#### File: src/ag_pserver/main.py
```python
from twisted.internet.task import react, deferLater
from twisted.web import static, resource, server
from twisted.web.template import Element, XMLFile, renderer, flattenString
from twisted.internet import endpoints, defer, protocol
from twisted.python import usage
import wormhole
import treq
import os.path
import os
import json
import random
from twisted.python import log
import sys
log.startLogging(sys.stdout)
# TODO: Don't hardcode these.
INITIAL_TOKEN = '<PASSWORD>'
AG_BOOTSTRAP_PASSWORD = b'<PASSWORD>'
MAILBOX_URL = u"ws://relay.magic-wormhole.io:4000/v1"
#MAILBOX_URL = u"ws://10.0.2.24:4000/v1"
APPID = u"agoric.com/ag-testnet1/provisioning-tool"
htmldir = os.path.join(os.path.dirname(__file__), "html")
class SetConfigOptions(usage.Options):
pass
class AddPubkeysOptions(usage.Options):
optParameters = [
["controller", "c", "http://localhost:8002/vat", "controller's listening port for us to send control messages"],
]
class StartOptions(usage.Options):
optParameters = [
["mountpoint", "m", "/", "controller's top level web page"],
["listen", "l", "tcp:8001", "client-visible HTTP listening port"],
["controller", "c", "http://localhost:8002/vat", "controller's listening port for us to send control messages"],
]
class Options(usage.Options):
subCommands = [
['set-cosmos-config', None, SetConfigOptions, "Pipe output of 'ag-setup-cosmos show-config' to this command"],
['add-pubkeys', None, AddPubkeysOptions, 'Add public keys from saved database'],
['start', None, StartOptions, 'Start the HTTP server'],
]
optParameters = [
["home", None, os.path.join(os.environ["HOME"], '.ag-pserver'), "provisioning-server's state directory"],
]
class SendInputAndWaitProtocol(protocol.ProcessProtocol):
def __init__(self, d, input):
self.deferred = d
self.input = input
self.output = b''
def connectionMade(self):
self.transport.write(self.input)
self.transport.closeStdin()
def outReceived(self, data):
self.output += data
print(data.decode('latin-1'))
def errReceived(self, data):
print(data.decode('latin-1'), file=sys.stderr)
def processEnded(self, reason):
self.deferred.callback((reason.value.exitCode, self.output))
def cosmosConfigFile(home):
return os.path.join(home, 'cosmos-chain.json')
def pubkeyDatabase(home):
return os.path.join(home, 'pubkeys.jsona')
class ConfigElement(Element):
loader = XMLFile(os.path.join(htmldir, "index.html"))
@staticmethod
def gatherArgs(opts):
meta = {}
f = open(cosmosConfigFile(opts['home']))
config = f.read()
gr = '/usr/src/app/lib/git-revision.txt'
if os.path.exists(gr):
f = open(gr)
meta['package_git'] = f.read().strip()
else:
f = os.popen('git rev-parse --short HEAD')
sha = f.read().strip()
f = os.popen('git diff --quiet || echo -dirty')
meta['package_git'] = sha + f.read().strip()
pj = '/usr/src/app/package.json'
pjson = {}
if os.path.exists(pj):
f = open(pj)
pjson = json.load(f)
else:
pjpath = None
# Walk upwards from the current directory.
pj = os.path.abspath('package.json')
while pj != pjpath:
pjpath = pj
if os.path.exists(pjpath):
f = open(pjpath)
pjson = json.load(f)
break
pj = os.path.join(os.path.dirname(pjpath), '../package.json')
pj = os.path.abspath(pj)
meta['package_version'] = pjson.get('version', 'unknown')
meta['package_name'] = pjson.get('name', 'cosmic-swingset')
repo = pjson.get('repository', 'https://github.com/Agoric/cosmic-swingset')
cleanRev = meta['package_git'].replace('-dirty', '')
link = repo + '/commit/' + cleanRev
meta['package_repo'] = link
return [config, meta]
def __init__(self, config, meta):
self._config = config
self._meta = meta
@renderer
def config(self, request, tag):
tag.fillSlots(cosmos_config=self._config)
return tag
@renderer
def meta(self, request, tag):
tag.fillSlots(**self._meta)
return tag
class ResponseElement(ConfigElement):
loader = XMLFile(os.path.join(htmldir, "response-template.html"))
def __init__(self, code, nickname, *args):
super().__init__(*args)
self._code = code
self._nickname = nickname
@renderer
def code(self, request, tag):
return self._code
@renderer
def nickname(self, request, tag):
return self._nickname
class Provisioner(resource.Resource):
def __init__(self, reactor, o):
self.reactor = reactor
self.opts = o
@defer.inlineCallbacks
def build_page(self):
f = open(cosmosConfigFile(self.opts['home']))
config = f.read()
args = ConfigElement.gatherArgs(self.opts)
html = yield flattenString(None, ConfigElement(*args))
return html
def render_GET(self, req):
d = self.build_page()
def built(response):
req.write(response)
req.finish()
d.addCallback(built)
d.addErrback(log.err)
return server.NOT_DONE_YET
@defer.inlineCallbacks
def enablePubkey(reactor, opts, config, nickname, pubkey):
mobj = {
"type": "pleaseProvision",
"nickname": nickname,
"pubkey": pubkey,
}
# print("mobj:", mobj)
def ret(server_message):
return [mobj, server_message, config]
# FIXME: Make more resilient to DOS attacks, or attempts
# to drain all our agmedallions.
if INITIAL_TOKEN is not None:
retries = 10
code = None
while code != 0 and retries > 0:
if code is not None:
# Wait 3 seconds between sends.
yield deferLater(reactor, 3, lambda: None)
retries -= 1
rpcAddr = random.choice(config['rpcAddrs'])
print('transferring ' + INITIAL_TOKEN + ' to ' + pubkey + ' via ' + rpcAddr)
d = defer.Deferred()
processProtocol = SendInputAndWaitProtocol(d, AG_BOOTSTRAP_PASSWORD + b'\n')
program = 'ag-cosmos-helper'
reactor.spawnProcess(processProtocol, '/usr/local/bin/' + program, args=[
program, 'tx', 'send', config['bootstrapAddress'], pubkey,
INITIAL_TOKEN,
'--yes', '--chain-id', config['chainName'], '-ojson',
'--node',
'tcp://' + rpcAddr,
'--home', os.path.join(opts['home'], 'ag-cosmos-helper-statedir'),
'--broadcast-mode', 'block' # Don't return until committed.
])
code, output = yield d
if code == 0:
oj = json.loads(output.decode('utf-8'))
code = oj.get('code', code)
print('transfer returned ' + str(code))
if code != 0:
return ret({"ok": False, "error": 'transfer returned ' + str(code)})
controller_url = opts["controller"]
print('contacting ' + controller_url)
m = json.dumps(mobj)
# this HTTP request goes to the controller machine, where it should
# be routed to vat-provisioning.js and the pleaseProvision() method.
try:
resp = yield treq.post(controller_url, m.encode('utf-8'), reactor=reactor,
headers={
b'Content-Type': [b'application/json'],
b'Origin': [b'http://127.0.0.1'],
})
if resp.code < 200 or resp.code >= 300:
raise Exception('invalid response code ' + str(resp.code))
rawResp = yield treq.json_content(resp)
except Exception as e:
print('controller error', e)
return ret({"ok": False, "error": str(e)})
if not rawResp.get("ok"):
print("provisioning server error", rawResp)
return ret({"ok": False, "error": rawResp.get('rej')})
r = rawResp['res']
ingressIndex = r["ingressIndex"]
# this message is sent back to setup-solo/src/ag_setup_solo/main.py
server_message = {
"ok": True,
"gci": config['gci'],
"rpcAddrs": config['rpcAddrs'],
"chainName": config['chainName'],
"ingressIndex": ingressIndex,
}
print("send server_message", server_message)
return ret(server_message)
class RequestCode(resource.Resource):
def __init__(self, reactor, o):
self.reactor = reactor
self.opts = o
@defer.inlineCallbacks
def got_message(self, client_message, nickname):
cm = json.loads(client_message.decode("utf-8"))
f = open(cosmosConfigFile(self.opts['home']))
config = json.loads(f.read())
msgs = yield enablePubkey(self.reactor, self.opts, config, nickname, cm['pubkey'])
return msgs
def send_provisioning_response(self, msgs, w):
[mobj, server_message, config] = msgs
sm = json.dumps(server_message).encode("utf-8")
print("send provisioning response", server_message)
w.send_message(sm)
d = w.close()
def complete(_):
print("provisioning complete")
pkobj = {
'chainName': config['chainName'],
'pubkey': mobj['pubkey'],
'nickname': mobj['nickname'][:32],
}
print("save public key to database", pkobj)
pkobj_str = json.dumps(pkobj)
with open(pubkeyDatabase(self.opts['home']), 'a') as db:
db.write(pkobj_str + ',\n')
d.addCallbacks(complete,
lambda f: print("provisioning error", f))
@defer.inlineCallbacks
def process_wormhole(self, nickname):
w = wormhole.create(APPID, MAILBOX_URL, self.reactor)
w.allocate_code()
code = yield w.get_code()
d = w.get_message()
d.addCallback(self.got_message, nickname.decode('utf-8'))
d.addCallback(self.send_provisioning_response, w)
return code
@defer.inlineCallbacks
def build_provisioning_response(self, nickname):
code = yield self.process_wormhole(nickname)
args = ConfigElement.gatherArgs(self.opts)
html = yield flattenString(None, ResponseElement(code, nickname, *args))
return html
def render_POST(self, req):
nickname = req.args[b"nickname"][0]
print(nickname)
d = self.build_provisioning_response(nickname)
def built(response):
req.write(response)
req.finish()
d.addCallback(built)
d.addErrback(log.err)
return server.NOT_DONE_YET
def render_GET(self, req):
nickname = req.args[b"nickname"][0]
d = self.process_wormhole(nickname)
def built(code):
req.setHeader('Content-Type', 'text/plain; charset=UTF-8')
req.write((code + '\n').encode('utf-8'))
req.finish()
d.addCallback(built)
d.addErrback(log.err)
return server.NOT_DONE_YET
class ConfigJSON(resource.Resource):
def __init__(self, o):
self.opts = o
def render_GET(self, req):
f = open(cosmosConfigFile(self.opts['home']))
config = f.read()
req.setHeader('Content-Type', 'application/json')
return config.encode('utf-8')
def run_server(reactor, o):
print("dir is", __file__)
root = static.File(htmldir)
provisioner = Provisioner(reactor, o)
root.putChild(b"", provisioner)
root.putChild(b"index.html", provisioner)
root.putChild(b"request-code", RequestCode(reactor, o))
# Prefix the mountpoints.
revpaths = o['mountpoint'].split('/')
revpaths.reverse()
for dir in revpaths:
# print('mount root under ' + dir)
if dir != '':
r = resource.Resource()
r.putChild(dir.encode('utf-8'), root)
root = r
# Display the JSON config.
root.putChild(b"network-config", ConfigJSON(o))
site = server.Site(root)
s = endpoints.serverFromString(reactor, o["listen"])
s.listen(site)
print("server running")
return defer.Deferred()
def doEnablePubkeys(reactor, opts, config, pkobjs):
finished = defer.Deferred()
def showError(e):
print(e)
doLatest(None)
def doLatest(d):
if d is not None:
print(d)
if len(pkobjs) == 0:
finished.callback(d)
return
pkobj = pkobjs.pop()
try:
print('enabling', pkobj['chainName'], pkobj['nickname'], pkobj['pubkey'])
d = enablePubkey(reactor, opts, config, pkobj['nickname'], pkobj['pubkey'])
d.addErrback(showError)
d.addCallback(doLatest)
except Exception as e:
showError(e)
doLatest(None)
return finished
def main():
o = Options()
o.parseOptions()
if o.subCommand == 'set-cosmos-config':
try:
os.mkdir(o['home'])
except FileExistsError:
pass
fname = cosmosConfigFile(o['home'])
print('Reading %s from stdin; hit Ctrl-D to finish' % fname)
cfgJson = sys.stdin.read()
with open(fname, 'w') as f:
f.write(cfgJson)
elif o.subCommand == 'add-pubkeys':
# Now that we have our files, add all the accounts.
f = open(cosmosConfigFile(o['home']), 'r')
config = json.loads(f.read())
try:
f = open(pubkeyDatabase(o['home']))
pkobjs_str = f.read().strip(', \r\n');
pkobjs = json.loads('[' + pkobjs_str + ']')
except FileNotFoundError:
return
react(doEnablePubkeys, ({**o, **o.subOptions}, config, pkobjs))
elif o.subCommand == 'start':
react(run_server, ({**o, **o.subOptions},))
else:
print("Need either 'set-cosmos-config' or 'start'")
sys.exit(1)
```
|
{
"source": "jfparentledartech/DEFT",
"score": 2
}
|
#### File: lib/dataset/dataset_factory.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .datasets.mot import MOT, MOT_prediction
from .datasets.nuscenes import nuScenes, nuScenes_prediction
from .datasets.pixset import PixSet, PixSet_prediction
from .datasets.kitti_tracking import KITTITracking, KITTITracking_prediction
from .datasets.custom_dataset import CustomDataset
dataset_factory = {
"custom": CustomDataset,
"mot": MOT,
"nuscenes": nuScenes,
"pixset": PixSet,
"kitti_tracking": KITTITracking,
}
dataset_factory_prediction = {
"mot": MOT_prediction,
"nuscenes": nuScenes_prediction,
"pixset": PixSet_prediction,
"kitti_tracking": KITTITracking_prediction,
}
def get_dataset(dataset, prediction_model=False):
if prediction_model:
return dataset_factory_prediction[dataset]
else:
return dataset_factory[dataset]
```
#### File: lib/utils/image.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lib.utils.ddd_utils import compute_box_3d, project_to_image, alpha2rot_y
from lib.utils.ddd_utils import draw_box_3d, unproject_2d_to_3d
from tools.convert_pixset import box3d_from_loc_dim_rot
import numpy as np
import cv2
import random
import torch
from matplotlib.patches import Polygon
from pioneer.common import linalg
from matplotlib import pyplot as plt
from pioneer.das.api.samples import Image
from pioneer.das.api.platform import Platform
def flip(img):
return img[:, :, ::-1].copy()
# @numba.jit(nopython=True, nogil=True)
def transform_preds_with_trans(coords, trans):
# target_coords = np.concatenate(
# [coords, np.ones((coords.shape[0], 1), np.float32)], axis=1)
target_coords = np.ones((coords.shape[0], 3), np.float32)
target_coords[:, :2] = coords
target_coords = np.dot(trans, target_coords.transpose()).transpose()
return target_coords[:, :2]
def transform_preds(coords, center, scale, output_size):
target_coords = np.zeros(coords.shape)
trans = get_affine_transform(center, scale, 0, output_size, inv=1)
for p in range(coords.shape[0]):
target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)
return target_coords
def get_affine_transform(
center, scale, rot, output_size, shift=np.array([0, 0], dtype=np.float32), inv=0
):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale], dtype=np.float32)
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.0], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def crop(img, center, scale, output_size, rot=0):
trans = get_affine_transform(center, scale, rot, output_size)
dst_img = cv2.warpAffine(
img, trans, (int(output_size[0]), int(output_size[1])), flags=cv2.INTER_LINEAR
)
return dst_img
# @numba.jit(nopython=True, nogil=True)
def gaussian_radius(det_size, min_overlap=0.7):
height, width = det_size
a1 = 1
b1 = height + width
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
r3 = (b3 + sq3) / 2
return min(r1, r2, r3)
# @numba.jit(nopython=True, nogil=True)
def gaussian2D(shape, sigma=1):
m, n = [(ss - 1.0) / 2.0 for ss in shape]
y, x = np.ogrid[-m : m + 1, -n : n + 1]
# y, x = np.arange(-m, m + 1).reshape(-1, 1), np.arange(-n, n + 1).reshape(1, -1)
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
# @numba.jit(nopython=True, nogil=True)
def draw_umich_gaussian(heatmap, center, radius, k=1):
# import pdb; pdb.set_trace()
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
# import pdb; pdb.set_trace()
masked_heatmap = heatmap[y - top : y + bottom, x - left : x + right]
masked_gaussian = gaussian[
radius - top : radius + bottom, radius - left : radius + right
]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def draw_dense_reg(regmap, heatmap, center, value, radius, is_offset=False):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
value = np.array(value, dtype=np.float32).reshape(-1, 1, 1)
dim = value.shape[0]
reg = np.ones((dim, diameter * 2 + 1, diameter * 2 + 1), dtype=np.float32) * value
if is_offset and dim == 2:
delta = np.arange(diameter * 2 + 1) - radius
reg[0] = reg[0] - delta.reshape(1, -1)
reg[1] = reg[1] - delta.reshape(-1, 1)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top : y + bottom, x - left : x + right]
masked_regmap = regmap[:, y - top : y + bottom, x - left : x + right]
masked_gaussian = gaussian[
radius - top : radius + bottom, radius - left : radius + right
]
masked_reg = reg[:, radius - top : radius + bottom, radius - left : radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
idx = (masked_gaussian >= masked_heatmap).reshape(
1, masked_gaussian.shape[0], masked_gaussian.shape[1]
)
masked_regmap = (1 - idx) * masked_regmap + idx * masked_reg
regmap[:, y - top : y + bottom, x - left : x + right] = masked_regmap
return regmap
def draw_msra_gaussian(heatmap, center, sigma):
tmp_size = sigma * 3
mu_x = int(center[0] + 0.5)
mu_y = int(center[1] + 0.5)
w, h = heatmap.shape[0], heatmap.shape[1]
ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
if ul[0] >= h or ul[1] >= w or br[0] < 0 or br[1] < 0:
return heatmap
size = 2 * tmp_size + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
g = np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
g_x = max(0, -ul[0]), min(br[0], h) - ul[0]
g_y = max(0, -ul[1]), min(br[1], w) - ul[1]
img_x = max(0, ul[0]), min(br[0], h)
img_y = max(0, ul[1]), min(br[1], w)
heatmap[img_y[0] : img_y[1], img_x[0] : img_x[1]] = np.maximum(
heatmap[img_y[0] : img_y[1], img_x[0] : img_x[1]],
g[g_y[0] : g_y[1], g_x[0] : g_x[1]],
)
return heatmap
def grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
def lighting_(data_rng, image, alphastd, eigval, eigvec):
alpha = data_rng.normal(scale=alphastd, size=(3,))
image += np.dot(eigvec, eigval * alpha)
def blend_(alpha, image1, image2):
image1 *= alpha
image2 *= 1 - alpha
image1 += image2
def saturation_(data_rng, image, gs, gs_mean, var):
alpha = 1.0 + data_rng.uniform(low=-var, high=var)
blend_(alpha, image, gs[:, :, None])
def brightness_(data_rng, image, gs, gs_mean, var):
alpha = 1.0 + data_rng.uniform(low=-var, high=var)
image *= alpha
def contrast_(data_rng, image, gs, gs_mean, var):
alpha = 1.0 + data_rng.uniform(low=-var, high=var)
blend_(alpha, image, gs_mean)
def color_aug(data_rng, image, eig_val, eig_vec):
functions = [brightness_, contrast_, saturation_]
random.shuffle(functions)
gs = grayscale(image)
gs_mean = gs.mean()
for f in functions:
f(data_rng, image, gs, gs_mean, 0.4)
lighting_(data_rng, image, 0.1, eig_val, eig_vec)
def show_matching_hanlded_rectangle(img_pre, img_next, boxes_pre, boxes_next, labels):
img_p = img_pre.copy()
img_n = img_next.copy()
for box in boxes_pre[:, 0:4]:
img_p = cv2.rectangle(
img_p,
tuple(box[:2].astype(int)),
tuple((box[2:4]).astype(int)),
(255, 0, 0),
2,
)
for box in boxes_next[:, 0:4]:
img_n = cv2.rectangle(
img_n,
tuple(box[:2].astype(int)),
tuple((box[2:4]).astype(int)),
(255, 0, 0),
2,
)
h, w, c = img_p.shape
h, w, c = img_n.shape
img = np.concatenate([img_p, img_n], axis=0)
rows, cols = np.nonzero(labels)
for r, c in zip(rows, cols):
box_p = boxes_pre[r, 0:4]
box_n = boxes_next[c, 0:4]
center_p = (box_p[:2] + box_p[2:4]) / 2.0
center_n = (box_n[:2] + box_n[2:4]) / 2.0 + np.array([0, h])
img = cv2.line(
img,
tuple(center_p.astype(int)),
tuple(center_n.astype(int)),
(
(int)(np.random.randn() * 255),
(int)(np.random.randn() * 255),
(int)(np.random.randn() * 255),
),
2,
)
return img
def ResizeShuffleBoxes(max_object, boxes_pre, boxes_next, labels):
resize_f = lambda boxes: (
boxes.shape[0],
np.vstack((boxes, np.full((max_object - len(boxes), boxes.shape[1]), np.inf))),
)
size_pre, boxes_pre = resize_f(boxes_pre)
size_next, boxes_next = resize_f(boxes_next)
indexes_pre = np.arange(max_object)
indexes_next = np.arange(max_object)
np.random.shuffle(indexes_pre)
np.random.shuffle(indexes_next)
boxes_pre = boxes_pre[indexes_pre, :]
boxes_next = boxes_next[indexes_next, :]
labels = labels[indexes_pre, :]
labels = labels[:, indexes_next]
mask_pre = indexes_pre < size_pre
mask_next = indexes_next < size_next
# add false object label
false_object_pre = (labels.sum(1) == 0).astype(float)
false_object_pre[np.logical_not(mask_pre)] = 0.0
false_object_next = (labels.sum(0) == 0).astype(float)
false_object_next[np.logical_not(mask_next)] = 0.0
false_object_pre = np.expand_dims(false_object_pre, axis=1)
labels = np.concatenate((labels, false_object_pre), axis=1) # 60x61
false_object_next = np.append(false_object_next, [0])
false_object_next = np.expand_dims(false_object_next, axis=0)
labels = np.concatenate((labels, false_object_next), axis=0) # 60x61
mask_pre = np.append(mask_pre, [True]) # 61
mask_next = np.append(mask_next, [True]) # 61
return [boxes_pre, mask_pre], [boxes_next, mask_next], labels
def FormatBoxes(boxes_pre, boxes_next, labels):
# convert the center to [-1, 1]
f = lambda boxes: np.expand_dims(
np.expand_dims((boxes[:, :2] + boxes[:, 2:]) - 1, axis=1), axis=1
)
# remove inf
boxes_pre[0] = f(boxes_pre[0])
boxes_pre[0][boxes_pre[0] == np.inf] = 1.5
boxes_next[0] = f(boxes_next[0])
boxes_next[0][boxes_next[0] == np.inf] = 1.5
return boxes_pre, boxes_next, labels
def ToTensor(boxes_pre, boxes_next, labels):
boxes_pre[0] = torch.from_numpy(boxes_pre[0].astype(float)).float()
boxes_pre[1] = torch.from_numpy(boxes_pre[1].astype(np.uint8)).unsqueeze(0)
boxes_next[0] = torch.from_numpy(boxes_next[0].astype(float)).float()
boxes_next[1] = torch.from_numpy(boxes_next[1].astype(np.uint8)).unsqueeze(0)
labels = torch.from_numpy(labels).unsqueeze(0)
return boxes_pre[0], boxes_pre[1], boxes_next[0], boxes_next[1], labels
def ToPercentCoordinates(boxes_pre, boxes_next, img):
height, width, channels = img.shape
boxes_pre[:, 0] /= width
boxes_pre[:, 2] /= width
boxes_pre[:, 1] /= height
boxes_pre[:, 3] /= height
boxes_next[:, 0] /= width
boxes_next[:, 2] /= width
boxes_next[:, 1] /= height
boxes_next[:, 3] /= height
return boxes_pre, boxes_next
def convert_detection(detection, h, w):
"""
transform the current detection center to [-1, 1]
:param detection: detection
:return: translated detection
"""
# get the center, and format it in (-1, 1)
detection[:, 2] -= detection[:, 0]
detection[:, 3] -= detection[:, 1]
detection[:, 0] /= w
detection[:, 2] /= w
detection[:, 1] /= h
detection[:, 3] /= h
center = (2 * detection[:, 0:2] + detection[:, 2:4]) - 1.0
center = torch.from_numpy(center.astype(float)).float()
center.unsqueeze_(0)
center.unsqueeze_(2)
center.unsqueeze_(3)
if torch.cuda.is_available():
return center.cuda()
return center
def get_color(idx):
idx = idx * 3
color = ((37 * idx) % 255, (17 * idx) % 255, (29 * idx) % 255)
return color
def plot_tracking(image, tlwhs, obj_ids, scores=None, frame_id=0, fps=0.0, ids2=None):
im = np.ascontiguousarray(np.copy(image))
im_h, im_w = im.shape[:2]
top_view = np.zeros([im_w, im_w, 3], dtype=np.uint8) + 255
text_scale = max(1, image.shape[1] / 1600.0)
text_thickness = 1 if text_scale > 1.1 else 1
line_thickness = max(1, int(image.shape[1] / 500.0))
radius = max(5, int(im_w / 140.0))
cv2.putText(
im,
"frame: %d fps: %.2f num: %d" % (frame_id, fps, len(tlwhs)),
(0, int(15 * text_scale)),
cv2.FONT_HERSHEY_PLAIN,
text_scale,
(0, 0, 255),
thickness=2,
)
for i, tlwh in enumerate(tlwhs):
x1, y1, w, h = tlwh
intbox = tuple(map(int, (x1, y1, x1 + w, y1 + h)))
obj_id = int(obj_ids[i])
id_text = "{}".format(int(obj_id))
if ids2 is not None:
id_text = id_text + ", {}".format(int(ids2[i]))
_line_thickness = 1 if obj_id <= 0 else line_thickness
color = get_color(abs(obj_id))
cv2.rectangle(
im, intbox[0:2], intbox[2:4], color=color, thickness=line_thickness
)
cv2.putText(
im,
id_text,
(intbox[0], intbox[1] + 30),
cv2.FONT_HERSHEY_PLAIN,
text_scale,
(0, 0, 255),
thickness=text_thickness,
)
return im
def plot_tracking_ddd(
image,
tlwhs,
ddd_boxes,
obj_ids,
scores=None,
frame_id=0,
fps=0.0,
ids2=None,
calib=None,
trans_matrix=None,
camera_matrix=None,
distortion_coeffs=None,
classes=None
):
im = np.ascontiguousarray(np.copy(image))
im_h, im_w = im.shape[:2]
top_view = np.zeros([im_w, im_w, 3], dtype=np.uint8) + 255
text_scale = max(1, image.shape[1] / 1600.0)
text_thickness = 1 if text_scale > 1.1 else 1
line_thickness = max(1, int(image.shape[1] / 500.0))
radius = max(5, int(im_w / 140.0))
cv2.putText(
im,
"frame: %d fps: %.2f num: %d" % (frame_id, fps, len(ddd_boxes)),
(0, int(15 * text_scale)),
cv2.FONT_HERSHEY_PLAIN,
text_scale,
(0, 0, 255),
thickness=2,
)
for i, box3d in enumerate(ddd_boxes):
tlwh = tlwhs[i]
x1, y1, w, h = tlwh
intbox = tuple(map(int, (x1, y1, x1 + w, y1 + h)))
obj_id = int(obj_ids[i])
id_text = "{}".format(int(obj_id))
if ids2 is not None:
id_text = id_text + ", {}".format(int(ids2[i]))
id_text = f'{classes[i]}:{id_text}'
_line_thickness = 1 if obj_id <= 0 else line_thickness
color = get_color(abs(obj_id))
dim = box3d[:3]
loc = box3d[3:-1]
rot = box3d[-1]
# box_3d = compute_box_3d(dim, loc, rot)
# box_2d = project_to_image(box_3d, calib)
dist_coeffs = np.asarray(distortion_coeffs)
box3d_projected = box3d_from_loc_dim_rot(np.asarray(trans_matrix), loc, dim.tolist(), rot, camera_matrix, dist_coeffs)
top_pad = np.zeros((389, 1440, 3), dtype=float)
bottom_pad = np.zeros((378, 1440, 3), dtype=float)
im = np.concatenate((top_pad, (im / 255), bottom_pad))
im = draw_box_3d(im, box3d_projected, c=color, same_color=True)
im = np.uint8(im[389:-378]*255)
# plt.imshow(im)
# plt.scatter(ct[i][0], ct[i][1] - 389)
# plt.scatter(x1 + w / 2, y1 + h / 2)
# plt.show()
cv2.putText(
im,
id_text,
(intbox[0], intbox[1] + 30),
cv2.FONT_HERSHEY_PLAIN,
text_scale,
(0, 255, 0),
thickness=text_thickness,
)
return im
```
#### File: lib/utils/matching.py
```python
import lap
import numpy as np
import scipy
from cython_bbox import bbox_overlaps as bbox_ious
from scipy.spatial.distance import cdist
from lib.utils.tracking_utils import kalman_filter
import os.path, copy
from scipy.spatial import ConvexHull
# This function is taken from : https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/tracker/matching.py
def merge_matches(m1, m2, shape):
O, P, Q = shape
m1 = np.asarray(m1)
m2 = np.asarray(m2)
M1 = scipy.sparse.coo_matrix((np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P))
M2 = scipy.sparse.coo_matrix((np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q))
mask = M1 * M2
match = mask.nonzero()
match = list(zip(match[0], match[1]))
unmatched_O = tuple(set(range(O)) - set([i for i, j in match]))
unmatched_Q = tuple(set(range(Q)) - set([j for i, j in match]))
return match, unmatched_O, unmatched_Q
def _indices_to_matches(cost_matrix, indices, thresh):
matched_cost = cost_matrix[tuple(zip(*indices))]
matched_mask = matched_cost <= thresh
matches = indices[matched_mask]
unmatched_a = tuple(set(range(cost_matrix.shape[0])) - set(matches[:, 0]))
unmatched_b = tuple(set(range(cost_matrix.shape[1])) - set(matches[:, 1]))
return matches, unmatched_a, unmatched_b
# This function is taken from : https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/tracker/matching.py
def linear_assignment(cost_matrix, thresh):
if cost_matrix.size == 0:
return (
np.empty((0, 2), dtype=int),
tuple(range(cost_matrix.shape[0])),
tuple(range(cost_matrix.shape[1])),
)
matches, unmatched_a, unmatched_b = [], [], []
cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)
for ix, mx in enumerate(x):
if mx >= 0:
matches.append([ix, mx])
unmatched_a = np.where(x < 0)[0]
unmatched_b = np.where(y < 0)[0]
matches = np.asarray(matches)
return matches, unmatched_a, unmatched_b
# This function is taken from : https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/tracker/matching.py
def ious(atlbrs, btlbrs):
"""
Compute cost based on IoU
:type atlbrs: list[tlbr] | np.ndarray
:type atlbrs: list[tlbr] | np.ndarray
:rtype ious np.ndarray
"""
ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float)
if ious.size == 0:
return ious
ious = bbox_ious(
np.ascontiguousarray(atlbrs, dtype=np.float),
np.ascontiguousarray(btlbrs, dtype=np.float),
)
return ious
# Part of this function is taken from : https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/tracker/matching.py
def iou_distance(atracks, btracks, frame_id=0, use_prediction=True):
"""
Compute cost based on IoU
:type atracks: list[STrack]
:type btracks: list[STrack]
:rtype cost_matrix np.ndarray
"""
if (len(atracks) > 0 and isinstance(atracks[0], np.ndarray)) or (
len(btracks) > 0 and isinstance(btracks[0], np.ndarray)
):
atlbrs = atracks
btlbrs = btracks
else:
if use_prediction:
atlbrs = [track.prediction_at_frame_tlbr(frame_id) for track in atracks]
else:
atlbrs = [track.tlbr for track in atracks]
btlbrs = [track.tlbr for track in btracks]
_ious = ious(atlbrs, btlbrs)
cost_matrix = 1 - _ious
return cost_matrix
def iou_ddd_distance(atracks, btracks, frame_id=0, use_prediction=True):
"""
Compute cost based on IoU
:type atracks: list[STrack]
:type btracks: list[STrack]
:rtype cost_matrix np.ndarray
"""
if (len(atracks) > 0 and isinstance(atracks[0], np.ndarray)) or (
len(btracks) > 0 and isinstance(btracks[0], np.ndarray)
):
atlbrs = atracks
btlbrs = btracks
else:
atlbrs = [convert_3dbox_to_8corner(track.ddd_bbox) for track in atracks]
btlbrs = [convert_3dbox_to_8corner(track.ddd_bbox) for track in btracks]
iou_matrix = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float32)
if iou_matrix.size == 0:
return iou_matrix
for d, det in enumerate(btlbrs):
for t, trk in enumerate(atlbrs):
iou_matrix[t, d] = iou3d(det, trk)[0] # det: 8 x 3, trk: 8 x 3
iou_matrix = 1 - iou_matrix
return iou_matrix
def poly_area(x, y):
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def box3d_vol(corners):
""" corners: (8,3) no assumption on axis direction """
a = np.sqrt(np.sum((corners[0, :] - corners[1, :]) ** 2))
b = np.sqrt(np.sum((corners[1, :] - corners[2, :]) ** 2))
c = np.sqrt(np.sum((corners[0, :] - corners[4, :]) ** 2))
return a * b * c
def convex_hull_intersection(p1, p2):
""" Compute area of two convex hull's intersection area.
p1,p2 are a list of (x,y) tuples of hull vertices.
return a list of (x,y) for the intersection and its volume
"""
inter_p = polygon_clip(p1, p2)
if inter_p is not None:
hull_inter = ConvexHull(inter_p)
return inter_p, hull_inter.volume
else:
return None, 0.0
def polygon_clip(subjectPolygon, clipPolygon):
""" Clip a polygon with another polygon.
Args:
subjectPolygon: a list of (x,y) 2d points, any polygon.
clipPolygon: a list of (x,y) 2d points, has to be *convex*
Note:
**points have to be counter-clockwise ordered**
Return:
a list of (x,y) vertex point for the intersection polygon.
"""
def inside(p):
return (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * (p[0] - cp1[0])
def computeIntersection():
dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]]
dp = [s[0] - e[0], s[1] - e[1]]
n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
n2 = s[0] * e[1] - s[1] * e[0]
n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3]
outputList = subjectPolygon
cp1 = clipPolygon[-1]
for clipVertex in clipPolygon:
cp2 = clipVertex
inputList = outputList
outputList = []
s = inputList[-1]
for subjectVertex in inputList:
e = subjectVertex
if inside(e):
if not inside(s):
outputList.append(computeIntersection())
outputList.append(e)
elif inside(s):
outputList.append(computeIntersection())
s = e
cp1 = cp2
if len(outputList) == 0:
return None
return outputList
def convert_3dbox_to_8corner(bbox3d_input):
# compute rotational matrix around yaw axis
bbox3d = copy.copy(bbox3d_input)
# transform to kitti format first
bbox3d_nuscenes = copy.copy(bbox3d)
# kitti: [x, y, z, a, l, w, h]
bbox3d[0] = bbox3d_nuscenes[3]
bbox3d[1] = bbox3d_nuscenes[4]
bbox3d[2] = bbox3d_nuscenes[5]
bbox3d[3] = bbox3d_nuscenes[-1]
bbox3d[4] = bbox3d_nuscenes[2]
bbox3d[5] = bbox3d_nuscenes[1]
bbox3d[6] = bbox3d_nuscenes[0]
R = roty(bbox3d[3])
# 3d bounding box dimensions
l = bbox3d[4]
w = bbox3d[5]
h = bbox3d[6]
# 3d bounding box corners
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
# rotate and translate 3d bounding box
corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
corners_3d[0, :] = corners_3d[0, :] + bbox3d[0]
corners_3d[1, :] = corners_3d[1, :] + bbox3d[1]
corners_3d[2, :] = corners_3d[2, :] + bbox3d[2]
return np.transpose(corners_3d)
def roty(t):
""" Rotation about the y-axis. """
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])
def iou3d(corners1, corners2):
""" Compute 3D bounding box IoU.
Input:
corners1: numpy array (8,3), assume up direction is negative Y
corners2: numpy array (8,3), assume up direction is negative Y
Output:
iou: 3D bounding box IoU
iou_2d: bird's eye view 2D bounding box IoU
"""
# corner points are in counter clockwise order
rect1 = [(corners1[i, 0], corners1[i, 2]) for i in range(3, -1, -1)]
rect2 = [(corners2[i, 0], corners2[i, 2]) for i in range(3, -1, -1)]
area1 = poly_area(np.array(rect1)[:, 0], np.array(rect1)[:, 1])
area2 = poly_area(np.array(rect2)[:, 0], np.array(rect2)[:, 1])
inter, inter_area = convex_hull_intersection(rect1, rect2)
iou_2d = inter_area / (area1 + area2 - inter_area)
ymax = min(corners1[0, 1], corners2[0, 1])
ymin = max(corners1[4, 1], corners2[4, 1])
inter_vol = inter_area * max(0.0, ymax - ymin)
vol1 = box3d_vol(corners1)
vol2 = box3d_vol(corners2)
iou = inter_vol / (vol1 + vol2 - inter_vol)
return iou, iou_2d
def embedding_distance(tracks, detections, metric="cosine"):
"""
:param tracks: list[STrack]
:param detections: list[BaseTrack]
:param metric:
:return: cost_matrix np.ndarray
"""
cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float)
if cost_matrix.size == 0:
return cost_matrix
det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float)
track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float)
cost_matrix = np.maximum(
0.0, cdist(track_features, det_features, metric)
) # Nomalized features
return cost_matrix
def gate_cost_matrix(kf, cost_matrix, tracks, detections, only_position=True):
if cost_matrix.size == 0:
return cost_matrix
gating_dim = 2 if only_position else 4
gating_threshold = kalman_filter.chi2inv95[gating_dim]
measurements = np.asarray([det.to_xyah() for det in detections])
for row, track in enumerate(tracks):
gating_distance = kf.gating_distance(
track.mean, track.covariance, measurements, only_position
)
cost_matrix[row, gating_distance > (gating_threshold + 10)] = np.inf
return cost_matrix
# Part of this function is taken from : https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/tracker/matching.py
def fuse_motion(
kf,
cost_matrix,
tracks,
detections,
frame_id,
use_lstm=True,
only_position=True,
lambda_=0.9,
):
if cost_matrix.size == 0:
return cost_matrix
gating_dim = 2 if only_position else 4
gating_threshold = kalman_filter.chi2inv95[gating_dim]
if not use_lstm:
measurements = np.asarray([det.to_xyah() for det in detections])
for row, track in enumerate(tracks):
gating_distance = kf.gating_distance(
track.mean, track.covariance, measurements, only_position, metric="maha"
)
cost_matrix[row, gating_distance > 5.0 * gating_threshold] = np.inf
cost_matrix[row] = (
lambda_ * cost_matrix[row] + 0.05 * (1 - lambda_) * gating_distance
)
else:
measurements = np.asarray([det.to_xyah() for det in detections])
for row, track in enumerate(tracks):
if len(track.observations) >= 300:
gating_distance = kf.gating_distance(
track.prediction_at_frame(frame_id),
track.covariance,
measurements,
only_position,
metric="maha",
)
cost_matrix[row, gating_distance > 5.0 * gating_threshold] = np.inf
cost_matrix[row] = (
lambda_ * cost_matrix[row] + 0.05 * (1 - lambda_) * gating_distance
)
else:
gating_distance = kf.gating_distance(
track.prediction_at_frame(frame_id),
track.covariance,
measurements,
only_position,
metric="gaussian",
)
cost_matrix[row, gating_distance > 50] = np.inf
cost_matrix[row] = (
lambda_ * cost_matrix[row]
+ 0.0005 * (1 - lambda_) * gating_distance
)
return cost_matrix
def fuse_motion_ddd(
kf,
cost_matrix,
tracks,
detections,
frame_id,
use_lstm=True,
only_position=False,
lambda_=0.9,
use_prediction=False,
classe_name=None,
):
if cost_matrix.size == 0:
return cost_matrix
gating_dim = 7
gating_threshold = kalman_filter.chi2inv95[gating_dim]
measurements = np.asarray([det.ddd_bbox for det in detections])
for row, track in enumerate(tracks):
if use_prediction:
gating_distance = kf.gating_distance(
track.ddd_prediction_at_frame(frame_id),
track.covariance,
measurements,
only_position,
metric="gaussian",
)
else:
gating_distance = kf.gating_distance(
track.ddd_bbox,
track.covariance,
measurements,
only_position,
metric="gaussian",
)
thr = 0.2 * track.depth
if classe_name == "pedestrian":
if thr < 5:
thr = 5
else:
if thr < 10:
thr = 10
cost_matrix[row, gating_distance > thr] = np.inf
cost_matrix[row] = lambda_ * cost_matrix[row] + 0.001 * gating_distance
return cost_matrix
```
#### File: DEFT/src/test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import cv2
import matplotlib.pyplot as plt
import numpy as np
from progress.bar import Bar
import torch
import pickle
import motmetrics as mm
from lib.opts import opts
from lib.logger import Logger
from lib.utils.utils import AverageMeter
from lib.dataset.dataset_factory import dataset_factory
from lib.utils.pixset_metrics import compute_metrics
pixset_categories = [
'car',
'truck',
'bus',
'pedestrian',
'motorcyclist',
'cyclist',
'van'
]
opt = opts().parse()
filename = '../options/test_opt_pixset.txt'
with open(filename, 'wb') as f:
pickle.dump(opt, f)
# # print('dataset -> ', opt.dataset)
# print('lstm -> ', opt.lstm)
# print(f'saved {filename}')
# with open(filename, 'rb') as f:
# opt = pickle.load(f)
# print('use pixell ->', opt.use_pixell)
from lib.detector import Detector
from lib.utils.image import plot_tracking, plot_tracking_ddd
import json
min_box_area = 20
_vehicles = ["car", "truck", "bus", "van"]
_cycles = ["motorcyclist", "cyclist"]
_pedestrians = ["pedestrian"]
attribute_to_id = {
"": 0,
"cycle.with_rider": 1,
"cycle.without_rider": 2,
"pedestrian.moving": 3,
"pedestrian.standing": 4,
"pedestrian.sitting_lying_down": 5,
"vehicle.moving": 6,
"vehicle.parked": 7,
"vehicle.stopped": 8,
}
id_to_attribute = {v: k for k, v in attribute_to_id.items()}
nuscenes_att = np.zeros(8, np.float32)
class PrefetchDataset(torch.utils.data.Dataset):
def __init__(self, opt, dataset, pre_process_func):
self.images = dataset.images
self.load_image_func = dataset.coco.loadImgs
self.get_ann_ids = dataset.coco.getAnnIds
self.load_annotations = dataset.coco.loadAnns
self.img_dir = dataset.img_dir
self.pre_process_func = pre_process_func
self.get_default_calib = dataset.get_default_calib
self.opt = opt
def __getitem__(self, index):
self.images.sort() # TODO remove
img_id = self.images[index]
img_info = self.load_image_func(ids=[img_id])[0]
img_path = os.path.join(self.img_dir, img_info["file_name"])
image = cv2.imread(img_path)
annotation_ids = self.get_ann_ids(imgIds=[img_id])
annotations = self.load_annotations(ids=annotation_ids)
images, meta = {}, {}
for scale in opt.test_scales:
input_meta = {}
calib = (
img_info["calib"]
if "calib" in img_info
else self.get_default_calib(image.shape[1], image.shape[0])
)
input_meta["calib"] = calib
images[scale], meta[scale] = self.pre_process_func(image, scale, input_meta)
ret = {
"images": images,
"image": image,
"meta": meta,
"frame_id": img_info["frame_id"],
"annotations": annotations
}
if "frame_id" in img_info and img_info["frame_id"] == 1:
ret["is_first_frame"] = 1
ret["video_id"] = img_info["video_id"]
return img_id, ret, img_info
def __len__(self):
return len(self.images)
def prefetch_test(opt):
start_time = time.time()
show_image = True
if not opt.not_set_cuda_env:
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus_str
Dataset = dataset_factory[opt.test_dataset]
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
# split = "val" if not opt.trainval else "test"
split = "test"
# split = "val"
dataset = Dataset(opt, split)
detector = Detector(opt)
if opt.load_results != "":
load_results = json.load(open(opt.load_results, "r"))
for img_id in load_results:
for k in range(len(load_results[img_id])):
if load_results[img_id][k]["class"] - 1 in opt.ignore_loaded_cats:
load_results[img_id][k]["score"] = -1
else:
load_results = {}
data_loader = torch.utils.data.DataLoader(
PrefetchDataset(opt, dataset, detector.pre_process),
batch_size=1,
shuffle=False,
num_workers=0,
pin_memory=True,
)
results = {}
num_iters = len(data_loader) if opt.num_iters < 0 else opt.num_iters
bar = Bar("{}".format(opt.exp_id), max=num_iters)
time_stats = ["tot", "load", "pre", "net", "dec", "post", "merge", "track"]
avg_time_stats = {t: AverageMeter() for t in time_stats}
if opt.use_loaded_results:
for img_id in data_loader.dataset.images:
results[img_id] = load_results["{}".format(img_id)]
num_iters = 0
final_results = []
out_path = ""
if opt.dataset in ["nuscenes", "pixset"]:
ret = {
"meta": {
"use_camera": True,
"use_lidar": False,
"use_radar": False,
"use_map": False,
"use_external": False,
},
"results": {},
}
accumulators = [mm.MOTAccumulator(auto_id=True) for _ in pixset_categories]
for ind, (img_id, pre_processed_images, img_info) in enumerate(data_loader):
bar.next()
if ind >= num_iters:
break
if opt.dataset == "nuscenes":
sample_token = img_info["sample_token"][0]
sensor_id = img_info["sensor_id"].numpy().tolist()[0]
if opt.dataset == "pixset":
sample_token = img_info["sample_token"][0]
sensor_id = img_info["sensor_id"].numpy().tolist()[0]
if opt.tracking and ("is_first_frame" in pre_processed_images):
if "{}".format(int(img_id.numpy().astype(np.int32)[0])) in load_results:
pre_processed_images["meta"]["pre_dets"] = load_results[
"{}".format(int(img_id.numpy().astype(np.int32)[0]))
]
else:
print(
"No pre_dets for",
int(img_id.numpy().astype(np.int32)[0]),
". Use empty initialization.",
)
pre_processed_images["meta"]["pre_dets"] = []
if final_results and opt.dataset not in ["nuscenes", "pixset"]:
write_results(out_path, final_results, opt.dataset)
final_results = []
img0 = pre_processed_images["image"][0].numpy()
h, w, _ = img0.shape
detector.img_height = h
detector.img_width = w
if opt.dataset in ["nuscenes", "pixset"]:
save_video_name = os.path.join(
opt.dataset + "_videos/",
"MOT"
+ str(int(pre_processed_images["video_id"]))
+ "_"
+ str(int(img_info["sensor_id"]))
+ str(int(img_info["video_id"]))
+ ".avi",
)
elif opt.dataset == "kitti_tracking":
save_video_name = os.path.join(
opt.dataset + "_videos/",
"KITTI_" + str(int(pre_processed_images["video_id"])) + ".avi",
)
else:
save_video_name = os.path.join(
opt.dataset + "_videos/",
"MOT" + str(int(pre_processed_images["video_id"])) + ".avi",
)
results_dir = opt.dataset + "_results"
if not os.path.exists(opt.dataset + "_videos/"):
os.mkdir(opt.dataset + "_videos/")
if not os.path.exists(results_dir):
os.mkdir(results_dir)
for video in dataset.coco.dataset["videos"]:
video_id = video["id"]
file_name = video["file_name"]
if pre_processed_images[
"video_id"
] == video_id and opt.dataset not in ["nuscenes", "pixset"]:
out_path = os.path.join(results_dir, "{}.txt".format(file_name))
break
detector.reset_tracking(opt)
vw = cv2.VideoWriter(
save_video_name, cv2.VideoWriter_fourcc("M", "J", "P", "G"), 10, (w, h)
)
print("Start tracking video", int(pre_processed_images["video_id"]))
if opt.public_det:
if "{}".format(int(img_id.numpy().astype(np.int32)[0])) in load_results:
pre_processed_images["meta"]["cur_dets"] = load_results[
"{}".format(int(img_id.numpy().astype(np.int32)[0]))
]
else:
print("No cur_dets for", int(img_id.numpy().astype(np.int32)[0]))
pre_processed_images["meta"]["cur_dets"] = []
online_targets = detector.run(pre_processed_images, image_info=img_info)
online_tlwhs = []
online_ids = []
online_ddd_boxes = []
sample_results = []
classes = []
image = pre_processed_images["image"][0].numpy()
for acc_i in range(len(accumulators)):
gt_list, hyp_list, distances = compute_metrics(pre_processed_images['annotations'],
online_targets, eval_type='distance',
im=image, category=pixset_categories[acc_i])
accumulators[acc_i].update(gt_list, hyp_list, distances)
idx = 0
print(ind)
print(accumulators[idx].mot_events.loc[ind])
mh = mm.metrics.create()
summary = mh.compute(accumulators[idx], metrics=['num_frames', 'mota', 'precision', 'recall'], name=f'acc {pixset_categories[idx]}')
print(summary)
print('-----------------------------------------')
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
if tlwh[2] * tlwh[3] > min_box_area:
online_tlwhs.append(tlwh)
online_ids.append(tid)
classes.append(t.classe)
if opt.dataset in ["nuscenes", "pixset"]:
online_ddd_boxes.append(t.org_ddd_box)
class_name = t.classe
if class_name in _cycles:
att = id_to_attribute[np.argmax(nuscenes_att[0:2]) + 1]
elif class_name in _pedestrians:
att = id_to_attribute[np.argmax(nuscenes_att[2:5]) + 3]
elif class_name in _vehicles:
att = id_to_attribute[np.argmax(nuscenes_att[5:8]) + 6]
ddd_box = t.ddd_bbox.copy()
ddd_box_submission = t.ddd_submission.tolist()
translation, size, rotation = (
ddd_box_submission[:3],
ddd_box_submission[3:6],
ddd_box_submission[6:],
)
result = {
"sample_token": sample_token,
"translation": translation,
"size": size,
"rotation": rotation,
"velocity": [0, 0],
"detection_name": t.classe,
# "attribute_name": att,
"attribute_name": None,
"detection_score": t.score,
"tracking_name": t.classe,
"tracking_score": t.score,
"tracking_id": tid,
"sensor_id": sensor_id,
"det_id": -1,
}
sample_results.append(result.copy())
if opt.dataset in ["nuscenes", "pixset"]:
if sample_token in ret["results"]:
ret["results"][sample_token] = (
ret["results"][sample_token] + sample_results
)
else:
ret["results"][sample_token] = sample_results
final_results.append(
(pre_processed_images["frame_id"].cpu().item(), online_tlwhs, online_ids)
)
if show_image:
img0 = pre_processed_images["image"][0].numpy()
if opt.dataset in ["nuscenes", "pixset"]:
online_im = plot_tracking_ddd(
img0,
online_tlwhs,
online_ddd_boxes,
online_ids,
frame_id=pre_processed_images["frame_id"],
calib=img_info["calib"],
trans_matrix=img_info["trans_matrix"],
camera_matrix=img_info["camera_matrix"],
distortion_coeffs=img_info["distortion_coefficients"],
classes=classes,
)
else:
online_im = plot_tracking(
img0,
online_tlwhs,
online_ids,
frame_id=pre_processed_images["frame_id"],
)
vw.write(online_im)
if opt.dataset not in ["nuscenes", "pixset"] and final_results:
write_results(out_path, final_results, opt.dataset)
final_results = []
if opt.dataset in ["nuscenes", "pixset"]:
for sample_token in ret["results"].keys():
confs = sorted(
[
(-d["detection_score"], ind)
for ind, d in enumerate(ret["results"][sample_token])
]
)
ret["results"][sample_token] = [
ret["results"][sample_token][ind]
for _, ind in confs[: min(500, len(confs))]
]
mh = mm.metrics.create()
metrics = ['num_frames', 'mota', 'motp', 'precision', 'recall']
summary = mh.compute_many(
accumulators, names=pixset_categories, metrics=metrics, generate_overall=True
)
print(summary)
save_summary(summary, 'overall')
print('total test time', time.time() - start_time)
def save_summary(summary, acc_name):
with open(f"../pixset_results/test/{acc_name}.txt", "w") as text_file:
text_file.write(summary.to_string())
def _to_list(results):
for img_id in results:
for t in range(len(results[img_id])):
for k in results[img_id][t]:
if isinstance(results[img_id][t][k], (np.ndarray, np.float32)):
results[img_id][t][k] = results[img_id][t][k].tolist()
return results
def write_results(filename, results, data_type):
if data_type == "mot":
save_format = "{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n"
elif data_type == "kitti_tracking":
save_format = "{frame} {id} Car 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n"
else:
raise ValueError(data_type)
with open(filename, "w") as f:
for frame_id, tlwhs, track_ids in results:
if data_type == "kitti_tracking":
frame_id -= 1
for tlwh, track_id in zip(tlwhs, track_ids):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(
frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h
)
f.write(line)
if __name__ == "__main__":
# opt = opts().parse()
prefetch_test(opt)
```
#### File: DEFT/src/train_prediction.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import _init_paths
import os
import torch
import torch.utils.data
from lib.opts import opts
from lib.model.model import create_model, load_model, save_model
from lib.logger import Logger
from lib.dataset.dataset_factory import get_dataset
from progress.bar import Bar
import pickle
def get_optimizer(opt, model):
if opt.optim == "adam":
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
elif opt.optim == "sgd":
print("Using SGD")
optimizer = torch.optim.SGD(
model.parameters(), opt.lr, momentum=0.9, weight_decay=0.0001
)
else:
assert 0, opt.optim
return optimizer
class DecoderRNN(torch.nn.Module):
def __init__(self, num_hidden, opt):
super(DecoderRNN, self).__init__()
self.num_hidden = num_hidden
if opt.dataset in ["nuscenes", "pixset"]:
self.lstm = torch.nn.LSTM(18, self.num_hidden)
self.out1 = torch.nn.Linear(self.num_hidden, 64)
self.out2 = torch.nn.Linear(64, 4 * 4)
else:
self.lstm = torch.nn.LSTM(11, self.num_hidden)
self.out1 = torch.nn.Linear(self.num_hidden, 64)
# self.out2 = torch.nn.Linear(64, 4 * 5)
self.out2 = torch.nn.Linear(64, 4 * 4)
def forward(self, input_traj):
# Fully connected
input_traj = input_traj.permute(1, 0, 2)
output, (hn, cn) = self.lstm(input_traj)
x = self.out1(output[-1])
x = self.out2(x)
return x
def main(opt):
torch.manual_seed(opt.seed)
torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
Dataset = get_dataset(opt.dataset, prediction_model=True)
if not opt.not_set_cuda_env:
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus_str
opt.device = torch.device("cuda" if opt.gpus[0] >= 0 else "cpu")
device = opt.device
logger = Logger(opt)
# print("Creating model...")
model = DecoderRNN(128, opt)
optimizer = get_optimizer(opt, model)
start_epoch = 0
if opt.load_model_traj != "":
model, optimizer, start_epoch = load_model(
model, opt.load_model, opt, optimizer
)
loss_function = torch.nn.SmoothL1Loss()
for i, param in enumerate(model.parameters()):
param.requires_grad = True
# # Uncomment for subset
# trainset = Dataset(opt, "train")
#
# train_mask = list(range(0, int(len(trainset)/10), 1))
# train_subset = torch.utils.data.Subset(Dataset(opt, "train"), train_mask)
#
# print(len(train_subset))
#
# train_loader = torch.utils.data.DataLoader(
# train_subset,
# batch_size=1,
# shuffle=True,
# num_workers=16,
# pin_memory=True,
# drop_last=True,
# )
train_loader = torch.utils.data.DataLoader(
Dataset(opt, "train"),
batch_size=1,
shuffle=True,
num_workers=0,
pin_memory=True,
drop_last=True,
)
val_loader = torch.utils.data.DataLoader(
Dataset(opt, "val"),
batch_size=1,
shuffle=False,
num_workers=0,
pin_memory=True,
drop_last=True,
)
for state in optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device=device, non_blocking=True)
model = model.to(device)
loss_function = loss_function.to(device)
print("Starting training...")
for epoch in range(start_epoch + 1, opt.num_epochs + 1):
losses = []
val_losses = []
mark = epoch if opt.save_all else "last"
num_iters = len(train_loader) if opt.num_iters < 0 else opt.num_iters
bar = Bar("{}/{}".format(opt.task, opt.exp_id), max=num_iters)
model.train()
for iter_id, (inputs, targets) in enumerate(train_loader):
inputs = inputs.to(device=device).float()
targets = targets.to(device=device).view(1, -1).float()
outputs = model(inputs)
loss = loss_function(outputs, targets)
losses.append(loss.item())
if 100 * loss.item() < 20:
loss = 100 * loss
else:
loss = 10 * loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
if opt.print_iter > 0: # If not using progress bar
if iter_id % opt.print_iter == 0:
print("{}/{}| {}".format(opt.task, opt.exp_id, Bar.suffix))
del outputs, loss
model.eval()
for iter_id, (inputs, targets) in enumerate(val_loader):
inputs = inputs.to(device=device).float()
targets = targets.to(device=device).view(1, -1).float()
outputs = model(inputs)
loss = loss_function(outputs, targets)
val_losses.append(loss.item())
if opt.print_iter > 0: # If not using progress bar
if iter_id % opt.print_iter == 0:
print("{}/{}| {}".format(opt.task, opt.exp_id, Bar.suffix))
del outputs, loss
save_model(
os.path.join(opt.save_dir, "model_last.pth"), epoch, model, optimizer
)
logger.write("\n")
save_model(
os.path.join(opt.save_dir, "model_{}.pth".format(epoch)),
epoch,
model,
optimizer,
)
if epoch in opt.lr_step:
lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
Bar.suffix = "{phase}: [{0}/{1}]|Tot: {total:} |ETA: {eta:} |Loss: {loss:} ".format(
epoch,
opt.num_epochs,
phase="train",
total=bar.elapsed_td,
eta=bar.eta_td,
loss=np.mean(losses)
)
logger.write("epoch: {} |".format(epoch))
logger.scalar_summary("train_{}".format('loss'), np.mean(losses), epoch)
logger.scalar_summary("val_{}".format('loss'), np.mean(val_losses), epoch)
logger.write("{} {:8f} | ".format('loss', np.mean(losses)))
logger.write("{} {:8f} | ".format('val loss', np.mean(val_losses)))
bar.next()
logger.close()
if __name__ == "__main__":
opt = opts().parse()
filename = '../options/train_prediction_opt_pixset.txt'
with open(filename, 'wb') as f:
pickle.dump(opt, f)
# print(f'saved {filename}')
# with open(filename, 'rb') as f:
# opt = pickle.load(f)
# opt.print_iter = 1
main(opt)
```
|
{
"source": "jfparent/VoiceAssistantcouppe",
"score": 3
}
|
#### File: googlecalendar/googlecalendaradd/__init__.py
```python
from googlecalendar import AssistantHandlerGoogleCalendar
from utils import current_milli_time
import datetime as dt
import json
class AssistantHandlerGoogleCalendarAdd(AssistantHandlerGoogleCalendar):
def __init__(self):
AssistantHandlerGoogleCalendar.__init__(self,"googlecalendaradd")
def getHttpMethod(self):
return "POST"
def getEndpoint(self,parameters):
return "calendars/primary/events/quickAdd"
def getEndpointParameters(self,parameters):
return {"text":parameters["text"],"sendNotifications":True}
def getSpeech(self, parameters, data):
formatFromGoogle = '%Y-%m-%dT%H:%M:%SZ'
formatForOutput = '%Y-%m-%d %H:%M'
start = dt.datetime.strptime(data["start"]["dateTime"], formatFromGoogle).strftime(formatForOutput)
end = dt.datetime.strptime(data["end"]["dateTime"], formatFromGoogle).strftime(formatForOutput)
return "Added calendar appointment with the description \"" + data["summary"] + "\" starting on " + start
```
#### File: assistanthandlers/helloname/__init__.py
```python
from assistanthandlerbase import AssistantHandler
import urllib
class AssistantHandlerHelloName(AssistantHandler):
def __init__(self):
AssistantHandler.__init__(self,"helloname")
def getBaseUrl(self,parameters):
pass
def getEndpoint(self,parameters):
pass
def getEndpointParameters(self,parameters):
pass
def getSpeech(self, parameters, data):
return "Hello to you too " + parameters.get("name") + "!"
```
#### File: assistanthandlers/yahooweaher/__init__.py
```python
from assistanthandlerbase import AssistantHandler
import urllib
import json
class AssistantHandlerYahooWeather(AssistantHandler):
def __init__(self):
AssistantHandler.__init__(self,"yahooWeatherForecast")
def getBaseUrl(self,parameters):
return "https://query.yahooapis.com/v1/public/"
def getEndpoint(self,parameters):
return "yql"
def getEndpointParameters(self,parameters):
return {"q":self.makeYqlQuery(parameters),"format":"json"}
def makeYqlQuery(self, parameters):
city = parameters.get("geo-city")
if city is None:
return None
return "select * from weather.forecast where woeid in (select woeid from geo.places(1) where text='" + city + "')"
def getSpeech(self, parameters, data):
query = data.get('query')
if query is None:
return "No query"
result = query.get('results')
if result is None:
return "No results"
channel = result.get('channel')
if channel is None:
return "No channel"
item = channel.get('item')
location = channel.get('location')
units = channel.get('units')
if (location is None) or (item is None) or (units is None):
return "No location or item or units"
condition = item.get('condition')
if condition is None:
return "No condition"
# print(json.dumps(item, indent=4))
speech = "Today in " + location.get('city') + ": " + condition.get('text') + \
", the temperature is " + condition.get('temp') + " " + units.get('temperature')
print("Response:")
print(speech)
return speech
```
|
{
"source": "jfpettit/reinforcement-learning",
"score": 2
}
|
#### File: flare/kindling/buffers.py
```python
import numpy as np
import scipy
from typing import Optional, Any, Union
from flare.kindling.mpi_tools import mpi_statistics_scalar
import torch
class PGBuffer:
"""
A buffer for storing trajectories experienced by an agent interacting
with the environment, and using Generalized Advantage Estimation (GAE-Lambda)
for calculating the advantages of state-action pairs.
"""
def __init__(
self,
obs_dim: Union[tuple, int],
act_dim: Union[tuple, int],
size: int,
gamma: Optional[float] = 0.99,
lam: Optional[float] = 0.95,
):
self.obs_buf = np.zeros(self._combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(self._combined_shape(size, act_dim), dtype=np.float32)
self.adv_buf = np.zeros(size, dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.ret_buf = np.zeros(size, dtype=np.float32)
self.val_buf = np.zeros(size, dtype=np.float32)
self.logp_buf = np.zeros(size, dtype=np.float32)
self.gamma, self.lam = gamma, lam
self.ptr, self.path_start_idx, self.max_size = 0, 0, size
def store(
self,
obs: np.array,
act: np.array,
rew: Union[int, float, np.array],
val: Union[int, float, np.array],
logp: Union[float, np.array],
):
"""
Append one timestep of agent-environment interaction to the buffer.
"""
assert self.ptr < self.max_size # buffer has to have room so you can store
self.obs_buf[self.ptr] = obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.val_buf[self.ptr] = val
self.logp_buf[self.ptr] = logp
self.ptr += 1
def finish_path(self, last_val: Optional[Union[int, float, np.array]] = 0):
"""
Call this at the end of a trajectory, or when one gets cut off
by an epoch ending. This looks back in the buffer to where the
trajectory started, and uses rewards and value estimates from
the whole trajectory to compute advantage estimates with GAE-Lambda,
as well as compute the rewards-to-go for each state, to use as
the targets for the value function.
The "last_val" argument should be 0 if the trajectory ended
because the agent reached a terminal state (died), and otherwise
should be V(s_T), the value function estimated for the last state.
This allows us to bootstrap the reward-to-go calculation to account
for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
"""
path_slice = slice(self.path_start_idx, self.ptr)
rews = np.append(self.rew_buf[path_slice], last_val)
vals = np.append(self.val_buf[path_slice], last_val)
# the next two lines implement GAE-Lambda advantage calculation
deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
self.adv_buf[path_slice] = self._discount_cumsum(deltas, self.gamma * self.lam)
# the next line computes rewards-to-go, to be targets for the value function
self.ret_buf[path_slice] = self._discount_cumsum(rews, self.gamma)[:-1]
self.path_start_idx = self.ptr
def get(self):
"""
Call this at the end of an epoch to get all of the data from
the buffer, with advantages appropriately normalized (shifted to have
mean zero and std one). Also, resets some pointers in the buffer.
"""
assert self.ptr == self.max_size # buffer has to be full before you can get
self.ptr, self.path_start_idx = 0, 0
# the next two lines implement the advantage normalization trick
adv_mean, adv_std = mpi_statistics_scalar(self.adv_buf)
# adv_mean, adv_std = np.mean(self.adv_buf), np.std(self.adv_buf)
self.adv_buf = (self.adv_buf - adv_mean) / adv_std
return [self.obs_buf, self.act_buf, self.adv_buf, self.ret_buf, self.logp_buf]
def _combined_shape(
self, length: Union[int, np.array], shape: Optional[Union[int, tuple]] = None
):
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
def _discount_cumsum(self, x: np.array, discount: float):
"""
magic from rllab for computing discounted cumulative sums of vectors.
input:
vector x,
[x0,
x1,
x2]
output:
[x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2]
"""
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
class ReplayBuffer(PGBuffer):
"""
A replay buffer for off-policy RL agents.
"""
def __init__(
self, obs_dim: Union[tuple, int], act_dim: Union[tuple, int], size: int
):
self.obs1_buf = np.zeros(self._combined_shape(size, obs_dim), dtype=np.float32)
self.obs2_buf = np.zeros(self._combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(self._combined_shape(size, act_dim), dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(
self,
obs: np.array,
act: Union[float, int, np.array],
rew: Union[float, int],
next_obs: np.array,
done: bool,
):
"""
Append one timestep of agent-environment interaction to the buffer.
"""
self.obs1_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(0, self.size, size=batch_size)
batch = dict(
obs=self.obs1_buf[idxs],
obs2=self.obs2_buf[idxs],
act=self.act_buf[idxs],
rew=self.rew_buf[idxs],
done=self.done_buf[idxs],
)
return tuple(torch.as_tensor(v, dtype=torch.float32) for _, v in batch.items())
def get(self):
return [self.obs1_buf, self.obs2_buf, self.act_buf, self.rew_buf, self.done_buf]
```
#### File: flare/kindling/neuralnets.py
```python
import numpy as np
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy import signal
from flare.kindling.utils import NetworkUtils as netu
import gym
from scipy.signal import lfilter
from typing import Optional, Iterable, List, Dict, Callable, Union, Tuple
from flare.kindling.utils import conv2d_output_shape, conv2d_output_size
class MLP(nn.Module):
r"""
A class for building a simple MLP network.
Args:
layer_sizes (list or tuple): Layer sizes for the network.
Example::
sizes = (4, 64, 64, 2)
mlp = MLP(sizes)
activations (Function): Activation function for MLP net.
out_act (Function): Output activation function
out_squeeze (bool): Whether to squeeze the output of the network.
"""
def __init__(
self,
layer_sizes: Union[List, Tuple],
activations: Optional[Callable] = torch.tanh,
out_act: Optional[bool] = None,
out_squeeze: Optional[bool] = False,
):
super(MLP, self).__init__()
self.layers = nn.ModuleList()
self.activations = activations
self.out_act = out_act
self.out_squeeze = out_squeeze
for i, l in enumerate(layer_sizes[1:]):
self.layers.append(nn.Linear(layer_sizes[i], l))
def forward(self, x: torch.Tensor) -> torch.Tensor:
for l in self.layers[:-1]:
x = self.activations(l(x))
if self.out_act is None:
x = self.layers[-1](x)
else:
x = self.out_act(self.layers[-1](x))
return torch.squeeze(x, -1) if self.out_squeeze else x
class CNN(nn.Module):
"""
Create a PyTorch CNN module.
:param kernel_size: Convolutional kernel size
:param stride: convolutional kernel stride
:param outpu_size: size of network output
:param input_channels: number of channels in the input
:param output_activation: if any, activation to apply to the output layer
:param input_height: size of one side of input (currently assumes square input)
:param channels: List of channel sizes for each convolutional layer
:param linear_layer_sizes: list of (if any) sizes of linear layers to add after convolutional layers
:param activation: activation function
:param dropout_layers: if any, layers to apply dropout to
:param dropout_p: probability of dropout to use
:param out_squeeze: whether to squeeze the output
"""
def __init__(self,
input_channels: int,
input_height: int,
output_size: int,
kernel_size: int = 3,
stride: int = 1,
channels: list = [64, 64],
linear_layer_sizes: list = [512],
activation: Callable = torch.relu,
output_activation: Callable = None,
dropout_layers: list = None,
dropout_p: float = None,
out_squeeze: bool = False):
super(CNN, self).__init__()
conv_sizes = [input_channels] + channels
self.layers = nn.ModuleList()
self.activation = activation
self.output_activation = output_activation
self.out_squeeze = out_squeeze
self.dropout_p = dropout_p
self.dropout_layers = dropout_layers
self.hw=input_height
for i, l in enumerate(conv_sizes[1:]):
self.hw = conv2d_output_size(kernel_size=kernel_size, stride=stride, sidesize=self.hw)
self.layers.append(nn.Conv2d(conv_sizes[i], l, kernel_size=kernel_size, stride=stride))
self.hw = (self.hw, self.hw)
conv_out_size = 1
for num in self.hw:
conv_out_size *= num
conv_out_size *= conv_sizes[-1]
linear_sizes = [conv_out_size] + linear_layer_sizes + [output_size]
self.layers.append(nn.Flatten())
for i, l in enumerate(linear_sizes[1:]):
self.layers.append(nn.Linear(linear_sizes[i], l))
def forward(self, x: torch.Tensor) -> torch.Tensor:
for l in self.layers[:-1]:
x = self.activation(l(x))
print(l)
if self.dropout_layers is not None and l in self.dropout_layers:
x = F.dropout(x, p=self.dropout_p)
if self.output_activation is None:
x = self.layers[-1](x)
else:
x = self.output_activation(self.layers[-1](x))
return x.squeeze() if self.out_squeeze else x
class Actor(nn.Module):
def action_distribution(self, states):
raise NotImplementedError
def logprob_from_distribution(self, policy, action):
raise NotImplementedError
def forward(self, x, a = None):
policy = self.action_distribution(x)
logp_a = None
if a is not None:
logp_a = self.logprob_from_distribution(policy, a)
return policy, logp_a
class CategoricalPolicy(Actor):
r"""
A class for a Categorical Policy network. Used in discrete action space environments. The policy is an :func:`~MLP`.
Args:
state_features (int): Dimensionality of the state space.
action_dim (int): Dimensionality of the action space.
hidden_sizes (list or tuple): Hidden layer sizes.
activation (Function): Activation function for the network.
out_activation (Function): Output activation function for the network.
"""
def __init__(
self,
state_features: int,
action_dim: int,
hidden_sizes: Union[List, Tuple],
activation: Callable,
out_activation: Callable,
useconv: bool = False,
channels: int = 3,
height: int = 64,
):
super().__init__()
if not useconv:
self.net = MLP(
[state_features] + list(hidden_sizes) + [action_dim], activations=activation
)
elif useconv:
self.net = CNN(
channels, height, action_dim
)
def action_distribution(self, x):
logits = self.net(x)
return torch.distributions.Categorical(logits=logits)
def logprob_from_distribution(self, policy, actions):
return policy.log_prob(actions)
class GaussianPolicy(Actor):
r"""
A class for a Gaussian Policy network. Used in continuous action space environments. The policy is an :func:`~MLP`.
Args:
state_features (int): Dimensionality of the state space.
action_dim (int): Dimensionality of the action space.
hidden_sizes (list or tuple): Hidden layer sizes.
activation (Function): Activation function for the network.
out_activation (Function): Output activation function for the network.
"""
def __init__(
self,
state_features: int,
action_dim: int,
hidden_sizes: Union[List, Tuple],
activation: Callable,
out_activation: Callable,
useconv: bool = False,
channels: int = 3,
height: int = 64,
):
super().__init__()
if not useconv:
self.net = MLP(
[state_features] + list(hidden_sizes) + [action_dim],
activations=activation,
out_act=out_activation,
)
elif useconv:
self.net = CNN(
channels, height, action_dim
)
self.logstd = nn.Parameter(-0.5 * torch.ones(action_dim, dtype=torch.float32))
def action_distribution(self, states):
mus = self.net(states)
std = torch.exp(self.logstd)
return torch.distributions.Normal(mus, std)
def logprob_from_distribution(self, policy, actions):
return policy.log_prob(actions).sum(axis=-1)
class FireActorCritic(nn.Module):
r"""
An Actor Critic class for Policy Gradient algorithms.
Has built-in capability to work with continuous (gym.spaces.Box) and discrete (gym.spaces.Discrete) action spaces. The policy and value function are both :func:`~MLP`. If working with a different action space, the user can pass in a custom policy class for that action space as an argument.
Args:
state_features (int): Dimensionality of the state space.
action_space (gym.spaces.Space): Action space of the environment.
hidden_sizes (list or tuple): Hidden layer sizes.
activation (Function): Activation function for the network.
out_activation (Function): Output activation function for the network.
policy (nn.Module): Custom policy class for an environment where the action space is not gym.spaces.Box or gym.spaces.Discrete
"""
def __init__(
self,
state_features: int,
action_space: int,
hidden_sizes: Optional[Union[Tuple, List]] = (32, 32),
activation: Optional[Callable] = torch.tanh,
out_activation: Optional[Callable] = None,
policy: Optional[nn.Module] = None,
useconv: Optional[bool] = False,
channels: Optional[int] = 3,
height: Optional[int] = 64
):
super(FireActorCritic, self).__init__()
obs_dim = state_features
if isinstance(action_space, gym.spaces.Discrete):
act_dim = action_space.n
self.policy = CategoricalPolicy(
obs_dim,
act_dim,
hidden_sizes,
activation,
out_activation,
useconv=useconv,
channels=channels,
height=height
)
elif isinstance(action_space, gym.spaces.Box):
act_dim = action_space.shape[0]
self.policy = GaussianPolicy(
obs_dim,
act_dim,
hidden_sizes,
activation,
out_activation,
useconv=useconv,
channels=channels,
height=height
)
else:
self.policy = policy(
obs_dim,
action_space,
hidden_sizes,
activation,
out_activation,
useconv=useconv,
channels=channels,
height=height
)
self.value_f = MLP(
[state_features] + list(hidden_sizes) + [1],
activations=activation,
out_squeeze=True,
)
def step(self, x):
with torch.no_grad():
policy = self.policy.action_distribution(x)
action = policy.sample()
logp_action = self.policy.logprob_from_distribution(policy, action)
value = self.value_f(x)
return action.numpy(), logp_action.numpy(), value.numpy()
def act(self, x):
return self.step(x)[0]
class MLPQActor(nn.Module):
r"""
An actor for Q policy gradient algorithms.
The policy is an :func:`~MLP`. This differs from the :func:`~FireActorCritic` class because the output from the policy network is scaled to action space limits on the forward pass.
Args:
state_features (int): Dimensionality of the state space.
action_dim (int): Dimensionality of the action space.
hidden_sizes (list or tuple): Hidden layer sizes.
activation (Function): Activation function for the network.
action_limit (float or int): Limits of the action space.
"""
def __init__(
self,
state_features: int,
action_dim: int,
hidden_sizes: Union[list, tuple],
activation: Callable,
action_limit: Union[float, int],
):
super(MLPQActor, self).__init__()
policy_layer_sizes = [state_features] + list(hidden_sizes) + [action_dim]
self.policy = MLP(policy_layer_sizes, activation, torch.tanh)
self.action_limit = action_limit
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Return output from the policy network scaled to the limits of the env action space."""
return self.action_limit * self.policy(x)
class MLPQFunction(nn.Module):
r"""
A Q function network for Q policy gradient methods.
The Q function is an :func:`~MLP`. It always takes in a (state, action) pair and returns a Q-value estimate for that pair.
Args:
state_features (int): Dimensionality of the state space.
action_dim (int): Dimensionality of the action space.
hidden_sizes (list or tuple): Hidden layer sizes.
activation (Function): Activation function for the network.
"""
def __init__(
self,
state_features: int,
action_dim: int,
hidden_sizes: Union[tuple, list],
activation: Callable,
):
super().__init__()
self.qfunc = MLP(
[state_features + action_dim] + list(hidden_sizes) + [1], activation
)
def forward(self, x: torch.Tensor, a: torch.Tensor) -> torch.Tensor:
"""
Return Q-value estimate for state, action pair (x, a).
Args:
x (torch.Tensor): Environment state.
a (torch.Tensor): Action taken by the policy.
"""
q = self.qfunc(torch.cat([x, a], dim=-1))
return torch.squeeze(q, -1) # Critical to ensure q has right shape.
class FireDDPGActorCritic(nn.Module):
r"""
An Actor Critic for the DDPG algorithm.
The policy is an :func:`~MLPQActor` and the q-value function is an :func:`~MLPQFunction`.
Args:
state_features (int): Dimensionality of the state space.
action_space (gym.spaces.Box): Environment action space.
hidden_sizes (list or tuple): Hidden layer sizes.
activation (Function): Activation function for the network.
"""
def __init__(
self,
state_features: int,
action_space: gym.spaces.Box,
hidden_sizes: Optional[Union[tuple, list]] = (256, 256),
activation: Optional[Callable] = torch.relu,
):
super().__init__()
obs_dim = state_features
act_dim = action_space.shape[0]
act_limit = action_space.high[0]
# build policy and value functions
self.policy = MLPQActor(obs_dim, act_dim, hidden_sizes, activation, act_limit)
self.qfunc = MLPQFunction(obs_dim, act_dim, hidden_sizes, activation)
def act(self, x: torch.Tensor) -> torch.Tensor:
"""
Get an action from the policy.
Args:
x (torch.Tensor): Observations from the environment.
"""
with torch.no_grad():
return self.policy(x).numpy()
class FireTD3ActorCritic(nn.Module):
r"""
Actor Critic for the TD3 algorithm.
The policy is an :func:`~MLPQActor` and the q-function is an :func:`~MLPQFunction`.
Args:
state_features (int): Dimensionality of the state space.
action_space (gym.spaces.Box): Environment action space.
hidden_sizes (list or tuple): Hidden layer sizes.
activation (Function): Activation function for the network.
"""
def __init__(
self,
state_features: int,
action_space: gym.spaces.Box,
hidden_sizes: Optional[Union[list, tuple]] = (256, 256),
activation: Optional[Callable] = torch.relu,
):
super(FireTD3ActorCritic, self).__init__()
obs_dim = state_features
act_dim = action_space.shape[0]
act_limit = action_space.high[0]
# build policy and value functions
self.policy = MLPQActor(obs_dim, act_dim, hidden_sizes, activation, act_limit)
self.qfunc1 = MLPQFunction(obs_dim, act_dim, hidden_sizes, activation)
self.qfunc2 = MLPQFunction(obs_dim, act_dim, hidden_sizes, activation)
def act(self, x: torch.Tensor) -> torch.Tensor:
"""
Get an action from the policy.
Args:
x (torch.Tensor): Observations from the environment.
"""
with torch.no_grad():
return self.policy(x).numpy()
LOG_STD_MAX = 2
LOG_STD_MIN = -20
class SquashedGaussianMLPActor(nn.Module):
"""
GaussianMLP Actor for SAC. From https://github.com/openai/spinningup/blob/master/spinup/algos/pytorch/sac/core.py
Policy network is an :func:`~MLP` with heads for mean and log standard deviation of the action distribution.
Args:
state_features (int): Dimensionality of the state space.
action_dim (int): Dimensionality of the action space.
hidden_sizes (list or tuple): Hidden layer sizes.
activation (Function): Activation function for the network.
action_limit (float or int): Limit of the action space.
"""
def __init__(
self,
state_features: int,
action_dim: int,
hidden_sizes: Union[list, tuple],
activation: Callable,
action_limit: Union[float, int],
):
super().__init__()
self.net = MLP([state_features] + list(hidden_sizes), activation, activation)
self.mu_layer = nn.Linear(hidden_sizes[-1], action_dim)
self.log_std_layer = nn.Linear(hidden_sizes[-1], action_dim)
self.act_limit = action_limit
def forward(
self, x: torch.Tensor, deterministic: bool = False, with_logprob: bool = True
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Get an action and action log prob from the policy.
Args:
x (torch.Tensor): state from the environment.
deterministic (bool): whether to act deterministically or not.
with_logprob (bool): whether to return with action log probability or not.
"""
net_out = self.net(x)
mu = self.mu_layer(net_out)
log_std = self.log_std_layer(net_out)
log_std = torch.clamp(log_std, LOG_STD_MIN, LOG_STD_MAX)
std = torch.exp(log_std)
# Pre-squash distribution and sample
pi_distribution = torch.distributions.Normal(mu, std)
if deterministic:
# Only used for evaluating policy at test time.
pi_action = mu
else:
pi_action = pi_distribution.rsample()
if with_logprob:
# Compute logprob from Gaussian, and then apply correction for Tanh squashing.
# NOTE: The correction formula is a little bit magic. To get an understanding
# of where it comes from, check out the original SAC paper (arXiv 1801.01290)
# and look in appendix C. This is a more numerically-stable equivalent to Eq 21.
# Try deriving it yourself as a (very difficult) exercise. :)
logp_pi = pi_distribution.log_prob(pi_action).sum(axis=-1)
logp_pi -= (2 * (np.log(2) - pi_action - F.softplus(-2 * pi_action))).sum(
axis=1
)
else:
logp_pi = None
pi_action = torch.tanh(pi_action)
pi_action = self.act_limit * pi_action
return pi_action, logp_pi
class FireSACActorCritic(nn.Module):
"""
An SAC Actor Critic class. From https://github.com/openai/spinningup/blob/master/spinup/algos/pytorch/sac/core.py
The policy is a :func:`~SquashedGaussianMLPActor` and the q-functions are both :func:`~MLPQFunctions`.
Args:
state_features (int): Dimensionality of state space.
action_space (gym.spaces.Box): Environment action space.
hidden_sizes (list or tuple): Hidden layer sizes.
activation (Function): Activation function for the networks.
"""
def __init__(
self,
state_features: int,
action_space: gym.spaces.Box,
hidden_sizes: Optional[Union[tuple, list]] = (256, 256),
activation: Optional[Callable] = torch.relu,
):
super().__init__()
obs_dim = state_features
act_dim = action_space.shape[0]
act_limit = action_space.high[0]
# build policy and value functions
self.policy = SquashedGaussianMLPActor(
obs_dim, act_dim, hidden_sizes, activation, act_limit
)
self.qfunc1 = MLPQFunction(obs_dim, act_dim, hidden_sizes, activation)
self.qfunc2 = MLPQFunction(obs_dim, act_dim, hidden_sizes, activation)
def act(self, x: torch.Tensor, deterministic: bool = False) -> np.ndarray:
r"""
Get action from policy.
Args:
x (torch.Tensor): State from the environment.
deterministic (bool): Whether to act deterministically.
"""
with torch.no_grad():
a, _ = self.policy(x, deterministic, False)
return a.numpy()
class FireQActorCritic(nn.Module):
r"""
Generic Q Actor Critic class.
Policy is an :func:`~MLP`. Q function is a :func:`~MLP` as well.
Args:
state_features (int): Dimensionality of state space.
action_space (gym.spaces.Box): Environment action space.
hidden_sizes (tuple or list): Hidden layer sizes.
activation (Function): Activation function for the networks.
out_activation (Function): Output activation for the networks.
"""
def __init__(
self,
state_features: int,
action_space: gym.spaces.Box,
hidden_sizes: Optional[Union[Tuple, List]] = (256, 128),
activation: Optional[Callable] = torch.relu,
out_activation: Optional[Callable] = nn.Identity,
):
super(FireQActorCritic, self).__init__()
action_dim = action_space.shape[0]
self.policy = MLP(
[state_features] + list(hidden_sizes) + [action_dim],
activations=activation,
out_act=out_activation,
)
self.qfunc = MLP(
[state_features] + list(hidden_sizes) + [action_dim],
activations=activation,
out_squeeze=True,
)
def forward(
self, x: torch.Tensor, a: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
r"""
Get action, q value estimates for action taken, and q value estimates for previous actions.
Args:
x (torch.Tensor): State from the environment.
a (torch.Tensor): Action taken in the environment.
"""
act = self.policy(x)
q = self.qfunc(torch.cat(x, a, dim=1))
q_act = self.qfunc(torch.cat(x, act, dim=1))
return act, q, q_act
```
#### File: flare/polgrad/ppo.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as f
import numpy as np
import gym
import pybullet_envs
import time
import flare.kindling as fk
from flare.kindling import utils
from typing import Optional, Any, Union, Callable, Tuple, List
import pytorch_lightning as pl
from torch.utils.data import DataLoader, Dataset
import sys
from flare.polgrad import BasePolicyGradient
class PPO(BasePolicyGradient):
def __init__(
self,
env,
ac = fk.FireActorCritic,
hidden_sizes = (64, 64),
steps_per_epoch = 4000,
minibatch_size = None,
gamma = 0.99,
lam = 0.97,
pol_lr = 3e-4,
val_lr = 1e-3,
train_iters = 80,
clipratio = 0.2,
maxkl = 0.01,
seed = 0,
hparams = None
):
super().__init__(
env,
ac,
hidden_sizes=hidden_sizes,
steps_per_epoch=steps_per_epoch,
minibatch_size=minibatch_size,
gamma=gamma,
lam=lam,
pol_lr=pol_lr,
val_lr=val_lr,
train_iters=train_iters,
seed = seed,
hparams= hparams
)
self.clipratio = clipratio
self.maxkl = maxkl
def calc_pol_loss(self, logps, logps_old, advs):
ratio = torch.exp(logps - logps_old)
clipped_adv = torch.clamp(ratio, 1 - self.clipratio, 1 + self.clipratio) * advs
pol_loss = -(torch.min(ratio * advs, clipped_adv)).mean()
kl = (logps_old - logps).mean().item()
return pol_loss, kl
def backward(self, trainer, loss, optimizer, optimizer_idx):
pass
def training_step(self, batch, batch_idx, optimizer_idx):
states, actions, advs, rets, logps_old = batch
if optimizer_idx == 0:
stops = 0
stopslst = []
policy, logps = self.ac.policy(states, actions)
pol_loss_old, kl = self.calc_pol_loss(logps, logps_old, advs)
for i in range(self.train_iters):
self.policy_optimizer.zero_grad()
policy, logps = self.ac.policy(states, actions)
pol_loss, kl = self.calc_pol_loss(logps, logps_old, advs)
if kl > 1.5 * self.maxkl:
stops += 1
stopslst.append(i)
break
pol_loss.backward()
self.policy_optimizer.step()
log = {
"PolicyLoss": pol_loss_old.item(),
"DeltaPolLoss": (pol_loss - pol_loss_old).item(),
"KL": kl,
"Entropy": policy.entropy().mean().item(),
"TimesEarlyStopped": stops,
"AvgEarlyStopStep": np.mean(stopslst) if len(stopslst) > 0 else 0
}
loss = pol_loss_old
elif optimizer_idx == 1:
values_old = self.ac.value_f(states)
val_loss_old = self.calc_val_loss(values_old, rets)
for i in range(self.train_iters):
self.value_optimizer.zero_grad()
values = self.ac.value_f(states)
val_loss = self.calc_val_loss(values, rets)
val_loss.backward()
self.value_optimizer.step()
delta_val_loss = (val_loss - val_loss_old).item()
log = {"ValueLoss": val_loss_old.item(), "DeltaValLoss": delta_val_loss}
loss = val_loss
self.tracker_dict.update(log)
return {"loss": loss, "log": log, "progress_bar": log}
def learn(
env_name,
epochs: Optional[int] = 100,
minibatch_size: Optional[int] = None,
steps_per_epoch: Optional[int] = 4000,
hidden_sizes: Optional[Union[Tuple, List]] = (64, 32),
gamma: Optional[float] = 0.99,
lam: Optional[float] = 0.97,
hparams = None,
seed = 0
):
from flare.polgrad.base import runner
minibatch_size = 4000 if minibatch_size is None else minibatch_size
runner(
env_name,
PPO,
epochs=epochs,
minibatch_size=minibatch_size,
hidden_sizes=(64, 32),
gamma=gamma,
lam=lam,
hparams=hparams,
seed = seed
)
```
#### File: flare/qpolgrad/ddpg.py
```python
import numpy as np
import torch
import gym
import torch.nn.functional as F
from termcolor import cprint
from flare.qpolgrad.base import BaseQPolicyGradient
import flare.kindling as fk
from flare.kindling import ReplayBuffer
from typing import Optional, Union, Callable, List, Tuple
class DDPG(BaseQPolicyGradient):
"""
Implementation of the Deep Deterministic Policy Gradient (DDPG) algorithm.
"""
def __init__(
self,
env_fn: Callable,
actorcritic: Callable = fk.FireDDPGActorCritic,
epochs: int = 100,
seed: Optional[int] = 0,
steps_per_epoch: Optional[int] = 4000,
replay_size: Optional[int] = int(1e6),
gamma: Optional[float] = 0.99,
polyak: Optional[float] = 0.95,
pol_lr: Optional[float] = 1e-3,
q_lr: Optional[float] = 1e-3,
hidden_sizes: Optional[Union[tuple, list]] = (256, 128),
bs: Optional[int] = 100,
warmup_steps: Optional[int] = 10000,
update_after: Optional[int] = 1000,
update_every: Optional[int] = 50,
act_noise: Optional[float] = 0.1,
buffer: Optional[float] = ReplayBuffer,
hparams = None
):
super().__init__(
env_fn,
actorcritic,
epochs=epochs,
seed=seed,
steps_per_epoch=steps_per_epoch,
replay_size=replay_size,
gamma=gamma,
polyak=polyak,
pol_lr=pol_lr,
q_lr=q_lr,
hidden_sizes=hidden_sizes,
bs=bs,
warmup_steps=warmup_steps,
update_after=update_after,
update_every=update_every,
act_noise=act_noise,
hparams=hparams
)
def configure_optimizers(self):
self.policy_optimizer = torch.optim.Adam(self.ac.policy.parameters(), lr=self.pol_lr)
self.q_optimizer = torch.optim.Adam(self.ac.qfunc.parameters(), lr=self.q_lr)
return self.policy_optimizer, self.q_optimizer
def calc_pol_loss(self, states):
q_pi = self.ac.qfunc(states, self.ac.policy(states))
return -q_pi.mean()
def calc_qfunc_loss(self, data):
o, o2, a, r, d = data
q = self.ac.qfunc(o, a)
# Bellman backup for Q function
with torch.no_grad():
q_pi_targ = self.ac_targ.qfunc(o2, self.ac_targ.policy(o2))
backup = r + self.gamma * (1 - d) * q_pi_targ
# MSE loss against Bellman backup
loss_q = ((q - backup) ** 2).mean()
# Useful info for logging
loss_info = dict(MeanQValues=q.mean().detach().numpy())
return loss_q, loss_info
def training_step(self, batch, batch_idx, optimizer_idx):
if optimizer_idx == 0:
self.policy_optimizer.zero_grad()
policy_loss = self.calc_pol_loss(batch[0])
policy_loss.backward()
self.policy_optimizer.step()
log = {
"PolicyLoss": policy_loss
}
loss = policy_loss
if optimizer_idx == 1:
# First run one gradient descent step for Q.
self.q_optimizer.zero_grad()
q_loss, loss_info = self.calc_qfunc_loss(batch)
q_loss.backward()
self.q_optimizer.step()
# Freeze Q-network so you don't waste computational effort
# computing gradients for it during the policy learning step.
for p in self.ac.qfunc.parameters():
p.requires_grad = False
# Unfreeze Q-network so you can optimize it at next DDPG step.
for p in self.ac.qfunc.parameters():
p.requires_grad = True
# Finally, update target networks by polyak averaging.
with torch.no_grad():
for p, p_targ in zip(self.ac.parameters(), self.ac_targ.parameters()):
# NB: We use an in-place operations "mul_", "add_" to update target
# params, as opposed to "mul" and "add", which would make new tensors.
p_targ.data.mul_(self.polyak)
p_targ.data.add_((1 - self.polyak) * p.data)
log = dict(QLoss=q_loss, **loss_info)
loss = q_loss
self.tracker_dict.update(log)
return {"loss": loss, "log": log, "progress_bar": log}
def backward(self, trainer, loss, optimizer, optimizer_idx):
pass
def optimizer_step(
self,
epoch,
batch_idx,
optimizer,
optimizer_idx,
second_order_closure = None
):
optimizer.zero_grad()
def learn(
env_name,
epochs: Optional[int] = 100,
batch_size: Optional[int] = None,
steps_per_epoch: Optional[int] = 4000,
hidden_sizes: Optional[Union[Tuple, List]] = (256, 256),
gamma: Optional[float] = 0.99,
hparams = None,
seed = 0
):
from flare.qpolgrad.base import runner
batch_size = 100 if batch_size is None else batch_size
runner(
env_name,
DDPG,
fk.FireDDPGActorCritic,
epochs=epochs,
bs=batch_size,
hidden_sizes=hidden_sizes,
gamma=gamma,
hparams=hparams,
seed = seed
)
```
|
{
"source": "jfpettit/rl_bolts",
"score": 2
}
|
#### File: rl_bolts/rl_bolts/algorithms.py
```python
__all__ = ['PPO']
# Cell
import numpy as np
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
from rl_bolts import neuralnets as nns
from rl_bolts import losses as l
from .env_wrappers import BestPracticesWrapper, ToTorchWrapper, StateNormalizeWrapper
from .buffers import PGBuffer
from .datasets import PolicyGradientRLDataset
from .loops import polgrad_interaction_loop
import rl_bolts.utils as utils
import pytorch_lightning as pl
from argparse import Namespace
from typing import Optional, Union
# Cell
class PPO(pl.LightningModule):
"""
Implementation of the Proximal Policy Optimization (PPO) algorithm. See the paper: https://arxiv.org/abs/1707.06347
It is a PyTorch Lightning Module. See their docs: https://pytorch-lightning.readthedocs.io/en/latest/
Args:
- env (str): Environment to run in. Handles vector observation environments with either gym.spaces.Box or gym.spaces.Discrete
action space.
- hidden_sizes (tuple): Hidden layer sizes for actor-critic network.
- gamma (float): Discount factor.
- lam (float): Lambda factor for GAE-Lambda calculation.
- clipratio (float): Clip ratio for PPO-clip objective.
- train_iters (int): How many steps to take over the latest data batch.
- batch_size (int): How many interactions to collect per update.
- pol_lr (float): Learning rate for the policy optimizer.
- val_lr (float): Learning rate for the value optimizer.
- maxkl (float): Max allowed KL divergence between policy updates.
- seed (int): Random seed for pytorch and numpy
- evaluate (bool): Whether to run eval episodes at the end of each epoch. Saves episodes using gym.wrappers.Monitor.
- monitor_dir (str): Directory for monitor to write to. Default is /tmp
"""
def __init__(
self,
env: str,
hidden_sizes: Optional[tuple] = (32, 32),
gamma: Optional[float] = 0.99,
lam: Optional[float] = 0.97,
clipratio: Optional[float] = 0.2,
train_iters: Optional[int] = 80,
batch_size: Optional[int] = 4000,
pol_lr: Optional[float] = 3e-4,
val_lr: Optional[float] = 1e-3,
maxkl: Optional[float] = 0.01,
seed: Optional[int] = 0,
evaluate: Optional[bool] = True,
monitor_dir: Optional[str] = 'video_results'
):
super().__init__()
np.random.seed(seed)
torch.manual_seed(seed)
hparams = Namespace(
**{
'env':env,
'hidden_sizes':hidden_sizes,
'gamma':gamma,
'lam':lam,
'clipratio':clipratio,
'train_iters':train_iters,
'batch_size':batch_size,
'pol_lr':pol_lr,
'val_lr':val_lr,
'maxkl':maxkl
}
)
self.hparams = hparams
env = gym.make(env)
self.env = ToTorchWrapper(env)
self.actor_critic = nns.ActorCritic(
self.env.observation_space.shape[0],
self.env.action_space,
hidden_sizes=hidden_sizes,
)
self.gamma = gamma
self.clipratio = clipratio
self.train_iters = train_iters
self.batch_size = batch_size
self.pol_lr = pol_lr
self.val_lr = val_lr
self.maxkl = maxkl
self.evaluate = evaluate
if self.evaluate:
eval_env = gym.wrappers.Monitor(env, monitor_dir, force=True)
self.eval_env = ToTorchWrapper(eval_env)
self.tracker_dict = {}
self.buffer = PGBuffer(
self.env.observation_space.shape,
self.env.action_space.shape,
size = self.batch_size,
gamma = self.gamma
)
self.inner_loop()
def configure_optimizers(self):
self.policy_optimizer = torch.optim.Adam(self.actor_critic.policy.parameters(), lr=self.pol_lr)
self.value_optimizer = torch.optim.Adam(self.actor_critic.value_f.parameters(), lr=self.val_lr)
return self.policy_optimizer, self.value_optimizer
def forward(self, x, a = None):
out = self.actor_critic(x, a)
return out
def training_step(self, batch, batch_idx, optimizer_idx):
states, actions, advs, rets, logps_old = batch
if optimizer_idx == 0:
stops = 0
stopslst = []
policy, logps = self.actor_critic.policy(states, actions)
pol_loss_old, kl = l.ppo_clip_policy_loss(logps, logps_old, advs, clipratio=self.clipratio)
for i in range(self.train_iters):
self.policy_optimizer.zero_grad()
policy, logps = self.actor_critic.policy(states, actions)
pol_loss, kl = l.ppo_clip_policy_loss(logps, logps_old, advs, clipratio=self.clipratio)
if kl > 1.5 * self.maxkl:
stops += 1
stopslst.append(i)
break
pol_loss.backward()
self.policy_optimizer.step()
log = {
"PolicyLoss": pol_loss_old.item(),
"DeltaPolLoss": (pol_loss - pol_loss_old).item(),
"KL": kl,
"Entropy": policy.entropy().mean().item(),
"TimesEarlyStopped": stops,
"AvgEarlyStopStep": np.mean(stopslst) if len(stopslst) > 0 else 0
}
loss = pol_loss_old
elif optimizer_idx == 1:
values_old = self.actor_critic.value_f(states)
val_loss_old = l.actor_critic_value_loss(values_old, rets)
for i in range(self.train_iters):
self.value_optimizer.zero_grad()
values = self.actor_critic.value_f(states)
val_loss = l.actor_critic_value_loss(values, rets)
val_loss.backward()
self.value_optimizer.step()
delta_val_loss = (val_loss - val_loss_old).item()
log = {"ValueLoss": val_loss_old.item(), "DeltaValLoss": delta_val_loss}
loss = val_loss
self.tracker_dict.update(log)
log.update(self.tracker_dict)
return {"loss": loss, "log": log, "progress_bar": log}
def inner_loop(self) -> None:
buffer, infos, _ = polgrad_interaction_loop(self.env, self.actor_critic, self.buffer, self.batch_size)
self.data = buffer.get()
self.tracker_dict.update(infos)
def on_epoch_end(self):
utils.printdict(self.tracker_dict)
self.tracker_dict = {}
self.inner_loop()
self.eval_episodes(n_episodes=1)
def train_dataloader(self):
dataset = PolicyGradientRLDataset(self.data)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size, sampler=None, num_workers=4)
return dataloader
def backward(self, *args, **kwargs):
pass
def eval_episodes(self, n_episodes = 3):
if self.evaluate:
with torch.no_grad():
rets_lst = []
lens_lst = []
for i in range(n_episodes):
episode_return = 0
episode_length = 0
obs = self.eval_env.reset()
done = False
while not done:
action, logp, value = self.actor_critic.step(obs)
obs, r, done, _ = self.eval_env.step(action)
episode_return += r
episode_length += 1
if done:
rets_lst.append(episode_return)
lens_lst.append(episode_length)
episode_return = 0
episode_length = 0
obs = self.eval_env.reset()
dct = {
"NumEvalEpisodes": n_episodes,
"MeanEvalEpReturn": np.mean(rets_lst),
"StdEvalEpReturn": np.std(rets_lst),
"MeanEvalEpLength": np.mean(lens_lst),
"StdEvalEpLength": np.std(lens_lst)
}
self.tracker_dict.update(dct)
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.