metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "johnsbuck/CountdownGameSolver",
"score": 4
} |
#### File: CountdownGameSolver/Map/InclusionMap.py
```python
class TreeNode(object):
def __init__(self, key):
self.key = key
self.forest = []
self.node_forest = []
self.forest_iter_index = 0
self.node_iter_index = 0
def add_tree(self, tree):
self.forest.append(tree)
def init_iterator(self, start_index=0):
self.node_iter_index = start_index
def next_tree(self):
if self.forest_iter_index < len(self.forest):
self.forest_iter_index += 1
return self.forest[self.forest_iter_index - 1]
elif self.node_iter_index < len(self.node_forest):
next_node = self.node_forest[self.node_iter_index]
next_node.init_iterator(self.node_iter_index)
tree = next_node.next_tree()
if tree is None:
self.node_iter_index += 1
return self.next_tree()
else:
return tree
else:
return None
def reset_iterator(self):
self.forest_iter_index = 0
self.node_iter_index = 0
for node in self.node_forest:
node.reset_iterator()
def __str__(self):
return str(self.key)
def __repr__(self):
return self.__str__()
class InclusionMap(object):
"""The mapping used to organize and initialize TreeNodes in relation
to each other.
Args:
size (int): The size of the number list used in the Numbers Round.
"""
def __init__(self, size=6):
# The main dict map used to store each TreeNode
self.map = {}
# The secondary storage of all trees
self.tree_list = []
# Add size of number listing
self._size = size
# Initializing the map values and TreeNodes
self._generate_map(list(range(self._size)))
self._generate_refs()
def add(self, new_tree):
key = tuple(set(list(range(self._size))) - set(new_tree.key))
if key != ():
self.map[key].add_tree(new_tree)
self.tree_list.append(new_tree)
def get_trees(self, key):
tree_list = []
node = self.map[key]
node.reset_iterator()
next_tree = node.next_tree()
while next_tree is not None:
tree_list.append(next_tree)
next_tree = node.next_tree()
return tree_list
def all_trees(self):
return self.tree_list
def _generate_map(self, numbers, s=(), idx=0):
"""Generates the main map used to explore the set of NumberTrees.
Args:
numbers (list[int]): The set of numbers that can be used.
s (tuple): The string that is used as the key for the dictionary
idx (int): The starting index for reading in numbers
"""
# If the index is the at the end of the list, exit function
if idx == len(numbers):
return
for i in range(idx, len(numbers)):
num = numbers[i]
# For each number, create a new tuple key and new TreeNode object
s1 = s + (num,)
self.map[s1] = TreeNode(s1)
# Create new keys and TreeNodes based on the current key
self._generate_map(numbers, s1, i+1)
def _generate_refs(self):
"""Adds references from one TreeNode to another TreeNode
This method constructs a structure between different TreeNodes with the following properties:
1. A TreeNode with a key of length n will only reference to other TreeNodes with a key of length n+1.
2. A TreeNode with a key of set k will only reference other TreeNodes that are a superset of set k.
Example:
TreeNode of key (0, 1, 3) connects to TreeNodes of keys (0, 1, 2, 3), (0, 1, 3, 4), (0, 1, 3, 5), etc.
TreeNode of key (0) connects to TreeNodes of keys (0, 1), (0, 2), etc.
TreeNode of key (0, 1, 2, 3, ..., n) of max length n will connect to no TreeNodes.
"""
# Obtain list of keys and sort based on length
keys = list(self.map.keys())
keys.sort(key=lambda x: len(x))
# Add references to each TreeNode in the map
for key in keys:
for ref_key in keys:
# Keys are sorted by length, we have passed all the keys we care about
if len(ref_key) > (len(key) + 1):
break
# Reference key isn't proper length, skip reference key
if len(ref_key) != (len(key) + 1):
continue
# Reference key isn't superset of current key, skip reference key
if not all(item in ref_key for item in key):
continue
# Properties validated; adding reference to specific TreeNode
self.map[key].node_forest.append(self.map[ref_key])
if __name__ == "__main__":
test = InclusionMap(6)
keys = list(test.map.keys())
keys.sort(key=lambda x: len(x))
print(len(keys), keys)
print()
for key in test.map:
print(key, test.map[key].node_forest)
``` |
{
"source": "johnsbuck/Final-Fantasy-Elminination-Rounds-Data-Analysis",
"score": 4
} |
#### File: Final-Fantasy-Elminination-Rounds-Data-Analysis/ff_graph_views/line_graphs.py
```python
import matplotlib.pyplot as plt
import matplotlib.cm as mplcm
import matplotlib.colors as colors
def plot_percentages(data):
# Number of Final Fantasy Games (excluding Total Votes)
NUM_GAMES = len(data) - 1
# Number of Rounds (excluding Total Votes and final game not to be eliminated)
NUM_ROUNDS = len(data) - 2
cmap = plt.cm.tab20
cmaplist = [cmap(i * (1 / NUM_GAMES)) for i in range(NUM_GAMES)]
fig = plt.figure()
ax = fig.add_subplot(111)
for key in data:
if key != "Total Votes":
# Convert vote data to percentage of total votes
y_data = [int((data[key][i] / data["Total Votes"][i]) * 100) for i in range(len(data[key]))]
# Set X values to start at 1
x_data = list(range(1, len(y_data) + 1))
# Plot Lines and Points
ax.plot(x_data, y_data, label="Final Fantasy " + key, color=cmaplist[int(key) - 1])
ax.scatter(list(range(1, len(y_data) + 1)), y_data, color=cmaplist[int(key) - 1])
plt.title("Final Fantasy Games Elimination Rounds - Results")
# Set X ticks to be every round of elimination [1, NUM_GAMES - 1]
plt.xticks(list(range(1, NUM_ROUNDS + 1)))
plt.xlabel("Nth Round of Votes")
# Set Y ticks to be every 5 percent instead of every 10 percent.
plt.yticks(list(range(0, 105, 5)))
plt.ylabel("Percentage (%)")
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
def plot_votes(data, log=False):
# Number of Data Rows
NUM_ROWS = len(data)
# Number of Rounds (minus total votes and final game not eliminated)
NUM_ROUNDS = len(data) - 2
cmap = plt.cm.tab20
cmaplist = [cmap(i * (1 / NUM_ROWS)) for i in range(NUM_ROWS)]
cmaplist = {k: x for k, x in zip(data.keys(), cmaplist)}
fig = plt.figure()
ax = fig.add_subplot(111)
for key in data:
label = key
if label != "Total Votes":
label = "Final Fantasy " + label
x_data = list(range(1, len(data[key]) + 1))
ax.plot(x_data, data[key], label=label)
ax.scatter(list(range(1, len(data[key]) + 1)), data[key])
plt.title("Final Fantasy Games Elimination Rounds - Results")
plt.xticks(list(range(1, NUM_ROUNDS + 1)))
plt.xlabel("Nth Round of Votes")
# Set to log scale if log parameter is true
if log:
plt.yscale("log")
plt.ylabel("# of Votes (log10)")
else:
plt.ylabel("# of Votes")
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
``` |
{
"source": "johnsbuck/game_track_app",
"score": 3
} |
#### File: game_track_app/tools/event_csv_import.py
```python
import csv
from datetime import datetime
class EventDataImporter:
@staticmethod
def generate_dataset(filename="../data/Schedule-Grid view.csv", encoding="utf-8-sig"):
reader = csv.reader(open(filename, 'r', encoding=encoding))
columns = next(reader)
data = []
for line in reader:
notes = line[0]
if line[4] != '':
notes += '\n' + line[4]
if line[3] != '-':
notes += "\nPercent Completed: " + line[3]
if line[5] != '':
notes += "\nMinutes Played: " + line[5]
data.append({"game": line[1], "date": datetime.strptime(line[2], "%m/%d/%Y"), "notes": notes})
return data
def main():
data = EventDataImporter.generate_dataset()
print(data)
if __name__ == "__main__":
main()
``` |
{
"source": "johnsbuck/MapGeneration",
"score": 3
} |
#### File: MapGeneration/NoiseGenerator/INoise.py
```python
from abc import ABCMeta, abstractmethod
class INoise(metaclass=ABCMeta):
def __init__(self):
raise NotImplementedError("This object is an interface that has no implementation.")
@property
@abstractmethod
def NOISE_LIST(self):
raise NotImplementedError("This object is an interface that has no implementation.")
@abstractmethod
def noise1d(self, point, frequency):
raise NotImplementedError("This object is an interface that has no implementation.")
@abstractmethod
def noise2d(self, point, frequency):
raise NotImplementedError("This object is an interface that has no implementation.")
@abstractmethod
def noise3d(self, point, frequency):
raise NotImplementedError("This object is an interface that has no implementation.")
```
#### File: MapGeneration/Random/RandomOrg.py
```python
import requests
import json
class RandomOrg(object):
""" Random.org RNG API
This class is used to connect to a Random.org Account for access to their API for generating random
integers and decimals for various uses.
"""
def __init__(self, api_key):
"""Constructor
Args:
api_key (str): The API key used by Random.org. Must be obtain through Random.org Account.
"""
self._api_key = api_key
@staticmethod
def __call__(method, params, identifier=42):
""" Generalizes the request done to Random.org for a given method and parameters given.
Args:
method (str): A method defined by Random.org that is available for use. (i.e. generateIntegers)
params (dict): A dictionary containing things such as the API key. Go to Random.org for an example.
identifier (int): An integer used to confirm that the request received is the one sent.
Returns:
(dict) A JSON response from Random.org converted into a Python dictionary.
"""
request = {
"jsonrpc": "2.0",
"method": method,
"params": params,
"id": identifier
}
response = requests.post("https://api.random.org/json-rpc/2/invoke",
data=json.dumps(request),
headers={'content-type': 'application/json'},
timeout=120.0)
if 'error' in response.json():
raise ConnectionError("Error received from Random.org!" +
"\n\tCode: " + str(response.json()['error']['code']) +
"\n\tMessage: " + response.json()['error']['message'])
return response.json()
def set_api_key(self, api_key):
""" Sets the API key to a different value than what was initialized
Args:
api_key (str): The API key used by Random.org. Must be obtain through Random.org Account.
"""
self._api_key = api_key
def randint(self, low, high, n=1, replacement=True):
""" Obtains n random integers within the user-defined range (inclusive).
Args:
low (int): The lowest integer value to receive.
high (int): The highest integer value to receive.
n (int): The number of integer values to receives. (Default: 1)
replacement (bool): If true, the numbers may not be unique (like rolling two 6s from two dice rolls).
If false, will make all values unique. (Default: true)
Returns:
(list) A list of integers from the received response.
"""
params = {
"apiKey": self._api_key,
"n": n,
"min": low,
"max": high,
"replacement": replacement
}
response = self.__call__("generateIntegers", params)
return response['result']['random']['data']
def random(self, n=1, decimal_places=8, replacement=True):
"""Obtains n random decimals from a uniform distribution [0, 1].
Args:
n (int): The number of integer values to receives. (Default: 1)
decimal_places (int): The number of decimal places willing to have in the random decimal. (Default: 8)
replacement (bool): If true, the numbers may not be unique (like rolling two 6s from two dice rolls).
If false, will make all values unique.
Returns:
(list) A list of integers from the received response.
"""
params = {
"apiKey": self._api_key,
"n": n,
"decimalPlaces": decimal_places,
"replacement": replacement
}
response = self.__call__("generateDecimalFractions", params)
return response['result']['random']['data']
def gauss(self, mu=0., sigma=1., n=1, significant_digits=8):
"""Obtains n decimals from a user-defined gaussian distribution.
Args:
mu (float): The mean of the guassian distribution. (Default: 0)
sigma (float): The standard deviation of the guassian distribution. (Default: 1)
n (int): The number of integer values to receives. (Default: 1)
significant_digits (int): The number of decimal places willing to have in the random decimal. (Default: 8)
Returns:
"""
params = {
"apiKey": self._api_key,
"n": n,
"mean": mu,
"standardDeviation": sigma,
"significantDigits": significant_digits
}
response = self.__call__("generateGaussian", params)
return response['result']['random']['data']
def choice(self, seq):
"""Returns a random object from the list given by the user.
Args:
seq (list): A list of objects to choose from.
Returns:
(Object) An object from the list given by the user.
"""
if len(seq) == 0:
raise IndexError("The length of the sequence is less than 1.")
pick = self.randint(0, len(seq))
return seq[pick]
```
#### File: MapGeneration/Utilities/sorts.py
```python
def mergesort(points, idx, p=None, r=None, descending=False):
if p is None:
p = 0
if r is None:
r = len(points) - 1
if p < r:
m = int((p + r - 1)/2)
mergesort(points, idx, p, m, descending)
mergesort(points, idx, m+1, r, descending)
_merge(points, p, m, r, idx, descending)
def _merge(points, start, mid, end, idx, descending):
left = points[start:mid+1]
right = points[mid+1:end+1]
k = start
while len(left) != 0 and len(right) != 0:
if not descending:
if left[0, idx] <= right[0, idx]:
points[k] = left.pop(0)
else:
points[k] = right.pop(0)
else:
if left[0, idx] >= right[0, idx]:
points[k] = left.pop(0)
else:
points[k] = right.pop(0)
k += 1
while len(left) != 0:
points[k] = left.pop(0)
k += 1
while len(right) != 0:
points[k] = right.pop(0)
k += 1
def quicksort(points, idx, p=None, r=None, descending=False):
if p is None:
p = 0
if r is None:
r = len(points) - 1
if p >= r:
return
q = _partition(points, idx, p, r, descending)
quicksort(points, idx, p, q, descending)
quicksort(points, idx, q, r, descending)
def quickselect(points, rank, idx, p=None, r=None):
if rank < 0 or rank > len(points) - 1:
raise ValueError("Rank must be a viable index of points")
if p is None:
p = 0
if r is None:
r = len(points) - 1
q = _partition(points, p, r, idx)
if q < rank:
quickselect(points, rank, idx, p, q)
elif q > rank:
quickselect(points, rank, idx, q, r)
def _partition(points, p, r, idx, descending=False):
q = p
for i in range(p, r + 1):
if not descending:
if points[r].coord[idx] < points[i].coord[idx]:
_swap(points, q, i)
q += 1
else:
if points[r].coord[idx] > points[i].coord[idx]:
_swap(points, q, i)
q += 1
_swap(points, q, r)
return q
def _swap(points, i, j):
temp = points[i]
points[i] = points[j]
points[j] = temp
``` |
{
"source": "johnsbuck/nanogiants-hackathon-2021",
"score": 4
} |
#### File: scripts/tools/csv_file_tools.py
```python
import csv
def get_csv_data(file, row_checker=lambda x: True):
"""
Args:
file (str): File path for opening the csv file.
row_checker (function): Function that returns True if row is valid, otherwise False.
Default function that returns True.
Returns:
List[List]: A list containing each row from the CSV file.
"""
data = []
with open(file, "r", encoding="utf-8") as f:
reader = csv.reader(f)
for row in reader:
if not row_checker(row):
continue
data.append(row)
return data
``` |
{
"source": "johnsca/caas",
"score": 2
} |
#### File: mysql/reactive/mysql.py
```python
import yaml
from charms.layer.basic import pod_spec_set
from charms.reactive import when, when_not
from charms.reactive.flags import set_flag, get_state
from charmhelpers.core.hookenv import (
log,
metadata,
status_set,
config,
network_get,
relation_id,
resource_get,
)
@when_not('mysql.configured')
def config_gitlab():
status_set('maintenance', 'Configuring mysql container')
spec = make_pod_spec()
log('set pod spec:\n{}'.format(spec))
pod_spec_set(spec)
set_flag('mysql.configured')
status_set('maintenance', 'Creating mysql container')
def make_pod_spec():
with open('reactive/spec_template.yaml') as spec_file:
pod_spec_template = spec_file.read()
md = metadata()
cfg = config()
user = cfg.get('user')
set_flag('user', user)
password = cfg.get('password')
set_flag('password', password)
database = cfg.get('database')
set_flag('database', database)
root_password = cfg.get('root_password')
set_flag('root_password', root_password)
# Grab the details from resource-get.
mysql_image_details_path = resource_get("mysql_image")
if not mysql_image_details_path:
raise Exception("unable to retrieve mysql image details")
with open(mysql_image_details_path, "rt") as f:
mysql_image_details = yaml.load(f)
docker_image_path = mysql_image_details['registrypath']
docker_image_username = mysql_image_details['username']
docker_image_password = mysql_image_details['password']
data = {
'name': md.get('name'),
'docker_image_path': docker_image_path,
'docker_image_username': docker_image_username,
'docker_image_password': docker_image_password,
'port': cfg.get('mysql_port'),
'user': user,
'password': password,
'database': database,
'root_password': <PASSWORD>,
}
data.update(cfg)
return pod_spec_template % data
@when('server.database.requested')
def provide_database(mysql):
log('db requested')
for request, application in mysql.database_requests().items():
log('request -> {0} for app -> {1}'.format(request, application))
database_name = get_state('database')
user = get_state('user')
password = get_state('password')
log('db params: {0}:{1}@{2}'.format(user, password, database_name))
info = network_get('server', relation_id())
log('network info {0}'.format(info))
mysql.provide_database(
request_id=request,
host=info['ingress-addresses'][0],
port=3306,
database_name=database_name,
user=user,
password=password,
)
``` |
{
"source": "johnsca/charm-helpers",
"score": 2
} |
#### File: contrib/storage/test_bcache.py
```python
import os
import shutil
import json
from mock import patch
from testtools import TestCase
from tempfile import mkdtemp
from charmhelpers.contrib.storage.linux import bcache
test_stats = {
'bypassed': '128G\n',
'cache_bypass_hits': '1132623\n',
'cache_bypass_misses': '0\n',
'cache_hit_ratio': '64\n',
'cache_hits': '12177090\n',
'cache_miss_collisions': '7091\n',
'cache_misses': '6717011\n',
'cache_readaheads': '0\n',
}
tmpdir = 'bcache-stats-test.'
cacheset = 'abcde'
cachedev = 'sdfoo'
class BcacheTestCase(TestCase):
def setUp(self):
super(BcacheTestCase, self).setUp()
self.sysfs = sysfs = mkdtemp(prefix=tmpdir)
self.addCleanup(shutil.rmtree, sysfs)
p = patch('charmhelpers.contrib.storage.linux.bcache.SYSFS', new=sysfs)
p.start()
self.addCleanup(p.stop)
self.cacheset = '{}/fs/bcache/{}'.format(sysfs, cacheset)
os.makedirs(self.cacheset)
self.devcache = '{}/block/{}/bcache'.format(sysfs, cachedev)
for n in ['register', 'register_quiet']:
with open('{}/fs/bcache/{}'.format(sysfs, n), 'w') as f:
f.write('foo')
for kind in self.cacheset, self.devcache:
for sub in bcache.stats_intervals:
intvaldir = '{}/{}'.format(kind, sub)
os.makedirs(intvaldir)
for fn, val in test_stats.items():
with open(os.path.join(intvaldir, fn), 'w') as f:
f.write(val)
def test_get_bcache_fs(self):
bcachedirs = bcache.get_bcache_fs()
assert len(bcachedirs) == 1
assert next(iter(bcachedirs)).cachepath.endswith('/fs/bcache/abcde')
@patch('charmhelpers.contrib.storage.linux.bcache.os.listdir')
def test_get_bcache_fs_nobcache(self, mock_listdir):
mock_listdir.side_effect = OSError(
'[Errno 2] No such file or directory:...')
bcachedirs = bcache.get_bcache_fs()
assert bcachedirs == []
def test_get_stats_global(self):
out = bcache.get_stats_action(
'global', 'hour')
out = json.loads(out)
assert len(out.keys()) == 1
k = next(iter(out.keys()))
assert k.endswith(cacheset)
assert out[k]['bypassed'] == '128G'
def test_get_stats_dev(self):
out = bcache.get_stats_action(
cachedev, 'hour')
out = json.loads(out)
assert len(out.keys()) == 1
k = next(iter(out.keys()))
assert k.endswith('sdfoo/bcache')
assert out[k]['cache_hit_ratio'] == '64'
``` |
{
"source": "johnsca/interface-benchmark",
"score": 2
} |
#### File: johnsca/interface-benchmark/requires.py
```python
from charms.reactive import RelationBase
from charms.reactive import hook
from charms.reactive import scopes
class BenchmarkRequires(RelationBase):
scope = scopes.SERVICE
def registered(self):
"""
Returns a dict mapping each service name to a list of the benchmarks
that service has registered.
"""
result = {}
for conv in self.conversations():
service = conv.scope
benchmarks = conv.get_remote('benchmarks', '').split(',')
result[service] = benchmarks
return result
@hook('{requires:benchmark}-relation-joined')
def joined(self):
conv = self.conversation()
conv.set_state('{relation_name}.joined')
@hook('{requires:benchmark}-relation-changed')
def changed(self):
conv = self.conversation()
benchmarks = conv.get_remote('benchmarks', '').split(',')
conv.toggle_state('{relation_name}.registered', benchmarks)
@hook('{requires:benchmark}-relation-departed')
def departed(self):
conv = self.conversation()
conv.remove_state('{relation_name}.joined')
conv.remove_state('{relation_name}.registered')
``` |
{
"source": "johnsca/interface-mapred-slave",
"score": 2
} |
#### File: johnsca/interface-mapred-slave/requires.py
```python
import json
from charms.reactive import RelationBase
from charms.reactive import hook
from charms.reactive import scopes
from jujubigdata import utils
class NodeManagerRequires(RelationBase):
scope = scopes.UNIT
@hook('{requires:mapred-slave}-relation-joined')
def joined(self):
conv = self.conversation()
conv.set_state('{relation_name}.joined')
conv.remove_state('{relation_name}.departing')
@hook('{requires:mapred-slave}-relation-departed')
def departed(self):
conv = self.conversation()
conv.remove_state('{relation_name}.joined')
conv.set_state('{relation_name}.departing')
def dismiss(self):
for conv in self.conversations():
conv.remove_state('{relation_name}.departing')
def nodes(self):
return [conv.scope.replace('/', '-') for conv in self.conversations()]
def hosts_map(self):
result = {}
for conv in self.conversations():
host = conv.scope.replace('/', '-')
addr = conv.get_remote('private-address', '')
ip = utils.resolve_private_address(addr)
result.update({ip: host})
return result
def send_spec(self, spec):
for conv in self.conversations():
conv.set_remote('spec', json.dumps(spec))
def send_resourcemanagers(self, resourcemanagers):
for conv in self.conversations():
conv.set_remote('resourcemanagers', json.dumps(resourcemanagers))
def send_ports(self, port, hs_http, hs_ipc):
for conv in self.conversations():
conv.set_remote(data={
'port': port,
'historyserver_http': hs_http,
'historyserver_ipc': hs_ipc,
})
def send_ssh_key(self, ssh_key):
for conv in self.conversations():
conv.set_remote('ssh-key', ssh_key)
def send_hosts_map(self, hosts_map):
for conv in self.conversations():
conv.set_remote('etc_hosts', json.dumps(hosts_map))
``` |
{
"source": "johnsca/interface-monitor",
"score": 2
} |
#### File: johnsca/interface-monitor/requires.py
```python
from charms.reactive import scopes
from charms.reactive import RelationBase
from charms.reactive import hook
class MonitoringRequires(RelationBase):
scope = scopes.UNIT
@hook('{requires:monitor}-relation-joined')
def joined(self):
self.set_state('{relation_name}.joined')
@hook('{requires:monitor}-relation-departed')
def departed(self):
self.remove_state('{relation_name}.joined')
def endpoints(self):
"""
Returns a list of host addresses.
"""
return [conv.get_remote('private-address')
for conv in self.conversations()]
``` |
{
"source": "johnsca/jujuresources",
"score": 2
} |
#### File: docs/exts/clidoc.py
```python
import re
import argparse
import sphinx.ext.autodoc
def is_cli(obj):
return hasattr(obj, '_subcommand_args') or hasattr(obj, '_subcommand_argsets')
class CLIDoc(sphinx.ext.autodoc.FunctionDocumenter):
"""
Automatically generate documentation for CLI entry-points.
"""
def _get_usage(self):
"""
Build usage string from argparser args.
"""
parser = argparse.ArgumentParser()
parser.prog = 'juju-resources {}'.format(self.object_name)
for set_name, set_args in getattr(self.object, '_subcommand_argsets', {}).items():
for ap_args, ap_kwargs in set_args:
parser.add_argument(*ap_args, **ap_kwargs)
for ap_args, ap_kwargs in getattr(self.object, '_subcommand_args', []):
parser.add_argument(*ap_args, **ap_kwargs)
usage = parser.format_usage()
usage = re.sub(r'\n *', ' ', usage)
return usage.strip()
def add_content(self, more_content, no_docstring=False):
if is_cli(self.object):
# add usage
self.add_line('**%s**' % self._get_usage(), '<clidoc>')
self.add_line('', '<clidoc>')
# add description
super(CLIDoc, self).add_content(more_content, no_docstring)
if is_cli(self.object):
# add parameter docs
sourcename = u'args of %s' % self.fullname
lines = []
for set_name, set_args in getattr(self.object, '_subcommand_argsets', {}).items():
for ap_args, ap_kwargs in set_args:
lines.append(':param {}: {}'.format(' '.join(ap_args), ap_kwargs.get['help']))
for ap_args, ap_kwargs in getattr(self.object, '_subcommand_args', []):
lines.append(':param {}: {}'.format(' '.join(ap_args), ap_kwargs['help']))
for i, line in enumerate(lines):
self.add_line(line, sourcename, i)
def filter_cli(app, what, name, obj, skip, options):
if type(obj).__name__ == 'function' and obj.__module__ == 'jujuresources.cli':
return not is_cli(obj)
return skip
def setup(app):
app.add_autodocumenter(CLIDoc)
app.connect('autodoc-skip-member', filter_cli)
```
#### File: jujuresources/jujuresources/__init__.py
```python
import os
import contextlib
import subprocess
try:
from urllib.request import urlopen # Python 3
from urllib.parse import urlparse
except ImportError:
from urllib import urlopen # Python 2
from urlparse import urlparse
import yaml
from jujuresources.backend import ResourceContainer
from jujuresources.backend import PyPIResource
from jujuresources.backend import ALL
__all__ = ['fetch', 'verify', 'install', 'resource_path', 'resource_spec',
'ALL', 'config_get', 'juju_log']
resources_cache = {}
def config_get(option_name):
"""
Helper to access a Juju config option when charmhelpers is not available.
:param str option_name: Name of the config option to get the value of
"""
try:
raw = subprocess.check_output(['config-get', option_name, '--format=yaml'])
return yaml.load(raw.decode('UTF-8'))
except ValueError:
return None
def juju_log(message, level='DEBUG'):
"""
Helper to send Juju log messages when charmhelpers is not available.
:param str message: Message to log
:param str level: Log level (DEBUG, INFO, ERROR, WARNING, CRITICAL; default: DEBUG)
"""
subprocess.check_call(['juju-log', '-l', level, message])
def _load(resources_yaml, output_dir=None):
if (resources_yaml, output_dir) not in resources_cache:
url = resources_yaml
parsed_url = urlparse(url)
if not parsed_url.scheme:
url = 'file://%s' % os.path.join(os.getcwd(), url)
with contextlib.closing(urlopen(url)) as fp:
resdefs = yaml.load(fp)
_output_dir = output_dir or resdefs.get('options', {}).get('output_dir', 'resources')
resources = ResourceContainer(_output_dir)
for name, resource in resdefs.get('resources', {}).items():
resources.add_required(name, resource)
for name, resource in resdefs.get('optional_resources', {}).items():
resources.add_optional(name, resource)
resources_cache[(resources_yaml, output_dir)] = resources
return resources_cache[(resources_yaml, output_dir)]
def _invalid(resources, which):
invalid = set()
for resource in resources.subset(which):
if not resource.verify():
invalid.add(resource.name)
return invalid
def _fetch(resources, which, mirror_url, force=False, reporthook=None):
invalid = _invalid(resources, which)
for resource in resources.subset(which):
if resource.name not in invalid and not force:
continue
if reporthook:
reporthook(resource.name)
resource.fetch(mirror_url)
def _install(resources, which, mirror_url, destination, skip_top_level):
success = True
pypi_resources = []
for resource in resources.subset(which):
if isinstance(resource, PyPIResource):
# group pypi resources to reduce subprocess calls
pypi_resources.append(resource)
else:
success = resource.install(destination, skip_top_level) and success
if pypi_resources:
success = PyPIResource.install_group(pypi_resources, mirror_url) and success
return success
def invalid(which=None, resources_yaml='resources.yaml'):
"""
Return a list of the names of the resources which do not
pass :func:`verify`.
:param list which: A name, or a list of one or more resource names, to
fetch. If ommitted, all non-optional resources are verified.
You can also pass ``jujuresources.ALL`` to fetch all optional *and*
required resources.
:param str resources_yaml: Location of the yaml file containing the
resource descriptions (default: ``./resources.yaml``).
Can be a local file name or a remote URL.
"""
resources = _load(resources_yaml, None)
return _invalid(resources, which)
def verify(which=None, resources_yaml='resources.yaml'):
"""
Verify if some or all resources previously fetched with :func:`fetch_resources`,
including validating their cryptographic hash.
:param list which: A list of one or more resource names to
check. If ommitted, all non-optional resources are verified.
You can also pass ``jujuresources.ALL`` to fetch all optional and
required resources.
:param str resources_yaml: Location of the yaml file containing the
resource descriptions (default: ``resources.yaml``).
Can be a local file name or a remote URL.
:param str output_dir: Override ``output_dir`` option from `resources_yaml`
(this is intended for mirroring via the CLI and it is not recommended
to be used otherwise)
:return: True if all of the resources are available and valid, otherwise False.
"""
resources = _load(resources_yaml, None)
return not _invalid(resources, which)
def fetch(which=None, mirror_url=None, resources_yaml='resources.yaml',
force=False, reporthook=None):
"""
Attempt to fetch all resources for a charm.
:param list which: A name, or a list of one or more resource names, to
fetch. If ommitted, all non-optional resources are fetched.
You can also pass ``jujuresources.ALL`` to fetch all optional *and*
required resources.
:param str mirror_url: Fetch resources from the given mirror.
:param str resources_yaml: Location of the yaml file containing the
resource descriptions (default: ``./resources.yaml``).
Can be a local file name or a remote URL.
:param force bool: Force re-downloading of valid resources.
:param func reporthook: Callback for reporting download progress.
Will be called once for each resource, just prior to fetching, and will
be passed the resource name.
:return: True or False indicating whether the resources were successfully
downloaded.
"""
resources = _load(resources_yaml, None)
if reporthook is None:
reporthook = lambda r: juju_log('Fetching %s' % r, level='INFO')
_fetch(resources, which, mirror_url, force, reporthook)
failed = _invalid(resources, which)
if failed:
juju_log('Failed to fetch resource%s: %s' % (
's' if len(failed) > 1 else '',
', '.join(failed)
), level='WARNING')
else:
juju_log('All resources successfully fetched', level='INFO')
return not failed
def resource_path(resource_name, resources_yaml='resources.yaml'):
"""
Get the local path for a named resource that has been fetched.
This may return ``None`` if the local path cannot be determined
(for example, if the resource has not been fetched yet and needs
to be resolved). Even if it returns a path, that path is not
guaranteed to exist or be valid; you should always confirm that
a resource is available using :func:`verify` or :func:`fetch`
before using it.
:param str resource_name: The name of a resource to resolve.
:param str resources_yaml: Location of the yaml file containing the
resource descriptions (default: ``./resources.yaml``).
Can be a local file name or a remote URL.
"""
resources = _load(resources_yaml, None)
return resources[resource_name].destination
def resource_spec(resource_name, resources_yaml='resources.yaml'):
"""
Get the spec for a named resource. This would be the URL for URL
resources, the Python package spec for PyPI resources, the full
path for local file resources, etc.
:param str resource_name: The name of a resource to resolve.
:param str resources_yaml: Location of the yaml file containing the
resource descriptions (default: ``./resources.yaml``).
Can be a local file name or a remote URL.
"""
resources = _load(resources_yaml, None)
return resources[resource_name].spec
def resource_defined(resource_name, resources_yaml='resources.yaml'):
"""
Check whether a given resource is defined.
:param str resource_name: The name of a resource to resolve.
:param str resources_yaml: Location of the yaml file containing the
resource descriptions (default: ``./resources.yaml``).
Can be a local file name or a remote URL.
"""
resources = _load(resources_yaml, None)
return resource_name in resources
def install(which=None, mirror_url=None, destination=None, skip_top_level=False,
resources_yaml='resources.yaml'):
"""
Install one or more resources.
The resource(s) will be fetched, if necessary, and different resource
types are handled appropriately (e.g., PyPI resources are installed
with ``pip``, archive file resources are extracted, non-archive file
resources are copied, etc).
For PyPI resources, this is roughly equivalent to the following::
pip install `juju-resources resource_spec $resource` -i $mirror_url
:param list which: A name, or a list of one or more resource names, to
fetch. If ommitted, all non-optional resources are installed.
:param str mirror_url: Fetch resources from the given mirror.
:param str destination: Destination to which to extract or copy file resources.
:param bool skip_top_level: When extracting archive file resources, skip
all members that are at the top level of the archive and instead extract
all nested members directly into ``destination``. E.g., an archive
containing ``foo/bar.txt`` and ``foo/qux/baz.txt`` will be extracted as
``destination/bar.txt`` and ``destination/qux/baz.txt``.
:param str resources_yaml: Location of the yaml file containing the
resource descriptions (default: ``resources.yaml``).
Can be a local file name or a remote URL.
:returns: True if all resources were successfully installed.
"""
resources = _load(resources_yaml, None)
return _install(resources, which, mirror_url, destination, skip_top_level)
``` |
{
"source": "johnsca/layer-apache-flume-twitter",
"score": 2
} |
#### File: layer-apache-flume-twitter/reactive/flume_twitter.py
```python
from charms.reactive import when, when_not
from charms.reactive import set_state, remove_state
from charms.reactive.helpers import any_file_changed
from charmhelpers.core import hookenv
from charms.layer.apache_flume_base import Flume
API_CRED_OPTIONS = [
'twitter_access_token',
'twitter_access_token_secret',
'twitter_consumer_key',
'twitter_consumer_secret',
]
@when('flume-base.installed')
def verify_config():
if all(map(hookenv.config().get, API_CRED_OPTIONS)):
set_state('flume-twitter.config.valid')
else:
remove_state('flume-twitter.config.valid')
hookenv.status_set('blocked', 'Twitter Streaming API credentials must be set')
@when('flume-base.installed', 'flume-twitter.config.valid')
@when_not('flume-sink.joined')
def waiting_for_flume_connection():
hookenv.status_set('blocked', 'Waiting for connection to Flume Sink')
@when('flume-base.installed', 'flume-twitter.config.valid', 'flume-sink.joined')
@when_not('flume-sink.ready')
def waiting_for_flume_available(sink): # pylint: disable=unused-argument
hookenv.status_set('blocked', 'Waiting for Flume Sink')
@when('flume-base.installed', 'flume-twitter.config.valid', 'flume-sink.ready')
def configure_flume(sink):
hookenv.status_set('maintenance', 'Configuring Flume')
flume = Flume()
flume.configure_flume({'agents': sink.agents()})
if any_file_changed([flume.config_file]):
flume.restart()
hookenv.status_set('active', 'Ready')
set_state('flume-twitter.started')
@when('flume-twitter.started')
@when_not('flume-twitter.config.valid')
def stop_flume():
flume = Flume()
flume.stop()
remove_state('flume-twitter.started')
@when('flume-twitter.started')
@when_not('flume-sink.ready')
def agent_disconnected():
stop_flume()
``` |
{
"source": "johnsca/layered-gobblin-charm",
"score": 2
} |
#### File: layered-gobblin-charm/reactive/gobblin.py
```python
from charms.reactive import when, when_not
from charms.reactive import set_state, remove_state
from charmhelpers.core import hookenv
from charms.layer.gobblin import Gobblin
from charms.layer.hadoop_client import get_dist_config
@when_not('hadoop.ready')
def report_blocked():
hookenv.status_set('blocked', 'Waiting for relation to Hadoop Plugin')
@when('hadoop.ready')
@when_not('gobblin.installed')
def install_gobblin(hadoop):
gobblin = Gobblin(hadoop.version(), get_dist_config())
if gobblin.verify_resources():
hookenv.status_set('maintenance', 'Installing Gobblin')
gobblin.install()
set_state('gobblin.installed')
@when('gobblin.installed', 'hadoop.ready')
@when_not('gobblin.started')
def configure_gobblin(hadoop):
hookenv.status_set('maintenance', 'Setting up Gobblin')
gobblin = Gobblin(hadoop.version(), get_dist_config())
gobblin.setup_gobblin(hadoop.namenodes()[0], hadoop.hdfs_port())
set_state('gobblin.started')
hookenv.status_set('active', 'Ready')
@when('gobblin.started')
@when_not('hadoop.ready')
def stop_gobblin():
remove_state('gobblin.started')
``` |
{
"source": "johnsca/melddict",
"score": 2
} |
#### File: johnsca/melddict/tests.py
```python
import pytest
from copy import deepcopy
from melddict import MeldDict
def test_add():
d1 = MeldDict({'a': 'a',
'b': 'b',
'c': {'a': 'c.a', 'b': 'c.b'},
'd': ['d.0'],
'e': 'e',
'f': 'f'})
d2 = {'b': 'B',
'c': {'b': 'c.B', 'c': 'c.c'},
'd': ['d.1'],
'e': ['e.0'],
'f': {'f': 'f.f'},
'g': 'g'}
with pytest.raises(TypeError):
d1.add('foo')
res = d1.add(d2)
assert d1 == {'a': 'a',
'b': 'B',
'c': {'a': 'c.a', 'b': 'c.B', 'c': 'c.c'},
'd': ['d.0', 'd.1'],
'e': ['e.0'],
'f': {'f': 'f.f'},
'g': 'g'}
assert res is d1
d3 = MeldDict({'a': ['a.0']})
d3.meld_iters = False
d3.add({'a': ['a.1']})
assert d3 == {'a': ['a.1']}
def test_subtract():
d1 = MeldDict({'a': 'a',
'b': 'b',
'c': {'a': 'c.a', 'b': 'c.b'},
'd': ['d.0', 'd.1']})
d2 = {'b': 'B',
'c': {'b': 'c.B', 'c': 'c.c'},
'd': ['d.1', 'd.2'],
'f': 'f'}
with pytest.raises(TypeError):
d1.subtract('foo')
res = d1.subtract(d2)
assert d1 == {'a': 'a',
'c': {'a': 'c.a'},
'd': ['d.0']}
assert res is d1
d3 = MeldDict({'a': ['a.0', 'a.1'], 'b': 'b'})
d3.meld_iters = False
d3.subtract({'a': ['a.1']})
assert d3 == {'b': 'b'}
d4 = MeldDict({'a': ['a.0'],
'b': {'a': 'b.a'}})
d4.remove_emptied = True
d4.subtract(d4)
assert d4 == {}
def test_add_operators():
d1 = MeldDict({'a': 'a',
'b': 'b',
'c': {'a': 'c.a', 'b': 'c.b'},
'd': ['d.0'],
'e': 'e',
'f': 'f'})
d2 = {'b': 'B',
'c': {'b': 'c.B', 'c': 'c.c'},
'd': ['d.1'],
'e': ['e.0'],
'f': {'f': 'f.f'},
'g': 'g'}
dr = {'a': 'a',
'b': 'B',
'c': {'a': 'c.a', 'b': 'c.B', 'c': 'c.c'},
'd': ['d.0', 'd.1'],
'e': ['e.0'],
'f': {'f': 'f.f'},
'g': 'g'}
drr = {'a': 'a',
'b': 'b',
'c': {'a': 'c.a', 'b': 'c.b', 'c': 'c.c'},
'd': ['d.1', 'd.0'],
'e': 'e',
'f': 'f',
'g': 'g'}
assert d1 + d2 == dr
assert d2 + d1 == drr
d3 = MeldDict(deepcopy(d1))
d3 += d2
assert d3 == dr
d4 = deepcopy(d2)
d4 += d1
assert d4 == drr
def test_subtract_operators():
d1 = MeldDict({'a': 'a',
'b': 'b',
'c': {'a': 'c.a', 'b': 'c.b'},
'd': ['d.0', 'd.1']})
d2 = {'b': 'B',
'c': {'b': 'c.B', 'c': 'c.c'},
'd': ['d.1', 'd.2'],
'f': 'f'}
dr = {'a': 'a',
'c': {'a': 'c.a'},
'd': ['d.0']}
drr = {'c': {'c': 'c.c'},
'd': ['d.2'],
'f': 'f'}
assert d1 - d2 == dr
assert d2 - d1 == drr
d3 = MeldDict(deepcopy(d1))
d3 -= d2
assert d3 == dr
d4 = deepcopy(d2)
d4 -= d1
assert d4 == drr
``` |
{
"source": "johnscancella/open-oni",
"score": 2
} |
#### File: open-oni/core/context_processors.py
```python
from django.conf import settings
from django.core.cache import cache
from core import models
from core import solr_index
from core.forms import _fulltext_range, CityForm
def extra_request_info(request):
"""
Add some extra useful stuff into the RequestContext.
"""
fulltext_range = _fulltext_range()
return {
'BASE_URL': settings.BASE_URL,
'city_form': CityForm(),
'fulltext_enddate': fulltext_range[1],
'fulltext_startdate': fulltext_range[0],
'omniture_url': settings.OMNITURE_SCRIPT if "OMNITURE_SCRIPT" in dir(settings) else None,
'project_name': settings.PROJECT_NAME if "PROJECT_NAME" in dir(settings) else None,
'sharetool_url': settings.SHARETOOL_URL if "SHARETOOL_URL" in dir(settings) else None,
'site_title': settings.SITE_TITLE if "SITE_TITLE" in dir(settings) else None,
}
def cors(request):
"""
Add CORS headers so that the JSON can be used easily from JavaSript
without requiring proxying.
"""
pass
def newspaper_info(request):
info = cache.get("newspaper_info")
if info is None:
total_page_count = solr_index.page_count()
titles_with_issues = models.Title.objects.filter(has_issues=True)
titles_with_issues_count = titles_with_issues.count()
_places = models.Place.objects.filter(titles__in=titles_with_issues)
states_with_issues = sorted(set(place.state for place in _places if place.state is not None))
_languages = models.Language.objects.filter(titles__in=titles_with_issues)
languages_with_issues = sorted(set((lang.code, lang.name) for lang in _languages))
# TODO: might make sense to add a Ethnicity.has_issue model field
# to save having to recompute this all the time, eventhough it
# shouldn't take more than 1/2 a second, it all adds up eh?
ethnicities_with_issues = []
for e in models.Ethnicity.objects.all():
# fliter out a few ethnicities, not sure why really
if e.has_issues and e.name not in ["African", "Canadian", "Welsh"]:
ethnicities_with_issues.append(e.name)
info = {'titles_with_issues_count': titles_with_issues_count,
'states_with_issues': states_with_issues,
'languages_with_issues': languages_with_issues,
'ethnicities_with_issues': ethnicities_with_issues,
'total_page_count': total_page_count}
cache.set("newspaper_info", info)
return info
```
#### File: management/commands/index_pages.py
```python
from django.core.management.base import BaseCommand
from core.management.commands import configure_logging
from core.solr_index import index_pages
configure_logging("index_pages_logging.config", "index_pages.log")
class Command(BaseCommand):
def handle(self, **options):
index_pages()
```
#### File: management/commands/index.py
```python
import logging
from django.core.management.base import BaseCommand
from core.management.commands import configure_logging
from core.solr_index import index_titles, index_pages
configure_logging("index_logging.config", "index.log")
_logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "index all titles and pages ; " + \
"you may (or may not) want to zap_index before"
def handle(self, **options):
_logger.info("indexing titles")
index_titles()
_logger.info("finished indexing titles")
_logger.info("indexing pages")
index_pages()
_logger.info("finished indexing pages")
```
#### File: management/commands/index_titles.py
```python
import logging
from django.core.management.base import BaseCommand
from core.management.commands import configure_logging
from core.solr_index import index_titles
configure_logging("index_titles_logging.config", "index_titles.log")
_logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, **options):
_logger.info("indexing titles")
index_titles()
_logger.info("finished indexing titles")
```
#### File: management/commands/load_copyright.py
```python
import os
import logging
import datetime
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from core.load_copyright import loadCopyright
from core.management.commands import configure_logging
configure_logging("load_copyright_logging.config", "load_copyright.log")
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Add copyright uris and labels to the Copyright table from input file."
def add_arguments(self, parser):
parser.add_argument('filepath', help="Path to input file")
def handle(self, *args, **options):
filepath = options['filepath']
try:
loadCopyright(filepath)
except Exception as e:
LOGGER.exception(e)
raise CommandError("unable to load copyrights. check the load_batch log for clues")
```
#### File: management/commands/queue_process_coordinates.py
```python
import os
import logging
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from core.management.commands import configure_logging
from core import tasks
configure_logging('queue_process_coordinates.config',
'queue_process_coordinates_%s.log' % os.getpid())
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
)
help = "queue the word coordinates of a batch to be processed"
args = '<batch name>'
def handle(self, batch_name, *args, **options):
if len(args)!=0:
raise CommandError('Usage is queue_process_coordinates %s' % self.args)
try:
tasks.process_coordinates.delay(batch_name)
except Exception, e:
LOGGER.exception(e)
raise CommandError("unable to process coordinates. check the queue_load_batch log for clues")
```
#### File: core/templatetags/custom_filters.py
```python
from django import template
from rfc3339 import rfc3339
from core.utils import url
from core.utils.utils import label
register = template.Library()
@register.filter(name='rfc3339')
def rfc3339_filter(d):
return rfc3339(d)
@register.filter(name='pack_url')
def pack_url(value, default='-'):
return url.pack_url_path(value, default)
@register.filter(name='label')
def _label(value):
return label(value)
```
#### File: core/tests/test_holding_loader.py
```python
import os.path
from django.test import TestCase
from core.models import Title
from core.title_loader import TitleLoader
from core.holding_loader import HoldingLoader
import core
class HoldingLoaderTests(TestCase):
fixtures = ['test/countries.json', 'test/languages.json', 'test/institutions.json']
def test_holdings(self):
# title data
titlexml = os.path.join(os.path.dirname(core.__file__),
'test-data', 'title.xml')
# holdings data
holdingsxml = os.path.join(os.path.dirname(core.__file__),
'test-data', 'holdings.xml')
# first need to load the titles so we can link against them
title_loader = TitleLoader()
title_loader.load_file(titlexml)
# now load the holdings from the same file
holding_loader = HoldingLoader()
holding_loader.load_file(holdingsxml)
# fetch the title and see that holdings are attached
t = Title.objects.get(lccn='sn83030846')
holdings = list(t.holdings.all())
self.assertEqual(len(holdings), 10)
h = holdings[1]
self.assertEqual(h.institution.name, 'Colgate Univ')
self.assertEqual(h.type, 'Original')
self.assertEqual(h.description, '<1876:5:18> <1884:1:10> <1885:9:3>')
self.assertEqual(h.description_as_list(), [u'<1876:5:18>', u'<1884:1:10>', u'<1885:9:3>'])
self.assertEqual(str(h.last_updated), '01/1992')
```
#### File: core/utils/__init__.py
```python
import datetime
from django.utils import datetime_safe
def strftime(d, fmt):
"""works with datetimes with years less than 1900
"""
return datetime_safe.new_datetime(d).strftime(fmt)
```
#### File: open-oni/scripts/extract_marc_values.py
```python
import glob
import operator
import os
import sys
from random import choice
from pymarc import map_xml
from django.db.models import Count
from core.models import Country
from core.title_loader import _extract as extract
# FOLDER should be the location of the marcxml files.
SOURCE = sys.argv[1]
FIELD = sys.argv[2]
try:
SUBFIELD = sys.argv[3]
except IndexError:
SUBFIELD = None
values = []
def parse_record(record, field=FIELD, subfield=SUBFIELD):
value = extract(record, field, subfield)
if value:
rec_id = extract(record, '010', 'a')
if not rec_id:
rec_id = extract(record, '004')
values.append((rec_id, value))
if __name__ == '__main__':
if os.path.isdir(SOURCE):
marc_xml_dir = os.listdir(SOURCE)
for xml_file in marc_xml_dir:
marc_file = os.path.join(SOURCE, xml_file)
map_xml(parse_record, open(marc_file, 'r'))
else:
map_xml(parse_record, open(SOURCE, 'r'))
# all values
#for value in values:
# print str(value[0]), ',',value[1]
total = len(values)
#Get a sample of 50 random values for that field
for i in range(50):
try:
random_value = choice(values)
values.remove(random_value)
print ','.join([random_value[0], random_value[1]])
except IndexError:
continue
print "TOTAL # OF RECORDS: %s" % total
``` |
{
"source": "johnschultz/pyramid_swagger",
"score": 3
} |
#### File: pyramid_swagger/pyramid_swagger/spec.py
```python
from pkg_resources import resource_filename
import jsonschema
import simplejson
from jsonschema.validators import RefResolver
def validate_swagger_schemas(resource_listing, resources):
"""Validate the structure of Swagger schemas against the spec.
:param resource_listing: A swagger resource listing
:type resource_listing: dict
:param resources: A list of filespaths to Swagger API declarations
:type resources: [string]
:raises: jsonschema ValidationErrors on malformed schemas
"""
validate_resource_listing(resource_listing)
for resource in resources:
with open(resource) as resource_file:
resource_json = simplejson.load(resource_file)
validate_api_declaration(resource_json)
def validate_resource_listing(resource_listing):
resource_spec_path = resource_filename(
'pyramid_swagger',
'swagger_spec_schemas/v1.2/resourceListing.json'
)
validate_jsonschema(resource_spec_path, resource_listing)
def validate_api_declaration(api_declaration_json):
"""Validate a swagger schema.
"""
api_spec_path = resource_filename(
'pyramid_swagger',
'swagger_spec_schemas/v1.2/apiDeclaration.json'
)
validate_jsonschema(api_spec_path, api_declaration_json)
def validate_jsonschema(spec_path, json_object):
with open(spec_path) as schema_file:
schema = simplejson.loads(schema_file.read())
resolver = RefResolver(
"file://{0}".format(spec_path),
schema
)
jsonschema.validate(json_object, schema, resolver=resolver)
```
#### File: pyramid_swagger/pyramid_swagger/tween.py
```python
from __future__ import unicode_literals
from collections import namedtuple
from contextlib import contextmanager
import functools
import logging
import re
from pyramid.interfaces import IRoutesMapper
import jsonschema.exceptions
import simplejson
from pyramid_swagger.exceptions import RequestValidationError
from pyramid_swagger.exceptions import ResponseValidationError
from pyramid_swagger.model import PathNotMatchedError
log = logging.getLogger(__name__)
DEFAULT_EXCLUDED_PATHS = [
r'^/static/?',
r'^/api-docs/?'
]
class Settings(namedtuple(
'Settings',
[
'schema',
'validate_request',
'validate_response',
'validate_path',
'exclude_paths',
'exclude_routes',
]
)):
"""A settings object for configuratble options.
:param schema: a :class:`pyramid_swagger.model.SwaggerSchema`
:param validate_swagger_spec: check Swagger files for correctness.
:param validate_request: check requests against Swagger spec.
:param validate_response: check responses against Swagger spec.
:param validate_path: check if request path is in schema. If disabled
and path not found in schema, request / response validation is skipped.
:param exclude_paths: list of paths (in regex format) that should be
excluded from validation.
:rtype: namedtuple
:param exclude_routes: list of route names that should be excluded from
validation.
"""
@contextmanager
def noop_context(request, response=None):
yield
def _get_validation_context(registry):
validation_context_path = registry.settings.get(
'pyramid_swagger.validation_context_path',
)
if validation_context_path:
m = re.match(
'(?P<module_path>.*)\.(?P<contextmanager_name>.*)',
validation_context_path,
)
module_path = m.group('module_path')
contextmanager_name = m.group('contextmanager_name')
return getattr(
__import__(module_path, fromlist=contextmanager_name),
contextmanager_name,
)
else:
return noop_context
def validation_tween_factory(handler, registry):
"""Pyramid tween for performing validation.
Note this is very simple -- it validates requests, responses, and paths
while delegating to the relevant matching view.
"""
settings = load_settings(registry)
route_mapper = registry.queryUtility(IRoutesMapper)
def validator_tween(request):
# We don't have access to this yet but let's go ahead and build the
# matchdict so we can validate it and use it to exclude routes from
# validation.
route_info = route_mapper(request)
if should_exclude_request(settings, request, route_info):
return handler(request)
validation_context = _get_validation_context(registry)
try:
validator_map = settings.schema.validators_for_request(request)
except PathNotMatchedError as exc:
if settings.validate_path:
with validation_context(request):
raise RequestValidationError(str(exc))
else:
return handler(request)
if settings.validate_request:
request_data = handle_request(
PyramidSwaggerRequest(request, route_info),
validation_context,
validator_map)
def swagger_data(_):
return request_data
request.set_property(swagger_data)
response = handler(request)
if settings.validate_response:
with validation_context(request, response=response):
validate_response(response, validator_map.response)
return response
return validator_tween
class PyramidSwaggerRequest(object):
"""Adapter for a :class:`pyramid.request.Request` which exposes request
data for casting and validation.
"""
FORM_TYPES = [
'application/x-www-form-urlencoded',
'multipart/form-data',
]
def __init__(self, request, route_info):
self.request = request
self.route_info = route_info
@property
def query(self):
return self.request.GET
@property
def path(self):
return self.route_info.get('match') or {}
@property
def headers(self):
return self.request.headers
@property
def form(self):
# Don't read the POST dict unless the body is form encoded
if self.request.headers.get('Content-Type') in self.FORM_TYPES:
return self.request.POST
return {}
@property
def body(self):
return getattr(self.request, 'json_body', {})
def handle_request(request, validation_context, validator_map):
"""Validate the request against the swagger spec and return a dict with
all parameter values available in the request, casted to the expected
python type.
:param request: a :class:`PyramidSwaggerRequest` to validate
:param validation_context: a context manager for wrapping validation
errors
:param validator_map: a :class:`pyramid_swagger.load_schema.ValidatorMap`
used to validate the request
:returns: a :class:`dict` of request data for each parameter in the swagger
spec
"""
request_data = {}
validation_pairs = []
for validator, values in [
(validator_map.query, request.query),
(validator_map.path, request.path),
(validator_map.form, request.form),
(validator_map.headers, request.headers),
]:
values = cast_params(validator.schema, values)
validation_pairs.append((validator, values))
request_data.update(values)
# Body is a special case because the key for the request_data comes
# from the name in the schema, instead of keys in the values
if validator_map.body.schema:
param_name = validator_map.body.schema['name']
validation_pairs.append((validator_map.body, request.body))
request_data[param_name] = request.body
with validation_context(request):
validate_request(validation_pairs)
return request_data
def load_settings(registry):
return Settings(
schema=registry.settings['pyramid_swagger.schema'],
validate_request=registry.settings.get(
'pyramid_swagger.enable_request_validation',
True
),
validate_response=registry.settings.get(
'pyramid_swagger.enable_response_validation',
True
),
validate_path=registry.settings.get(
'pyramid_swagger.enable_path_validation',
True
),
exclude_paths=get_exclude_paths(registry),
exclude_routes=set(registry.settings.get(
'pyramid_swagger.exclude_routes',
) or []),
)
def get_exclude_paths(registry):
"""Compiles a list of paths that should not be validated against.
:rtype: list of compiled validation regexes
"""
# TODO(#63): remove deprecated `skip_validation` setting in v2.0.
regexes = registry.settings.get(
'pyramid_swagger.skip_validation',
registry.settings.get(
'pyramid_swagger.exclude_paths',
DEFAULT_EXCLUDED_PATHS
)
)
# being nice to users using strings :p
if not isinstance(regexes, list) and not isinstance(regexes, tuple):
regexes = [regexes]
return [re.compile(r) for r in regexes]
def should_exclude_request(settings, request, route_info):
disable_all_validation = not any((
settings.validate_request,
settings.validate_response,
settings.validate_path
))
return (
disable_all_validation or
should_exclude_path(settings.exclude_paths, request.path) or
should_exclude_route(settings.exclude_routes, route_info)
)
def should_exclude_path(exclude_path_regexes, path):
# Skip validation for the specified endpoints
return any(r.match(path) for r in exclude_path_regexes)
def should_exclude_route(excluded_routes, route_info):
return (
route_info.get('route') and
route_info['route'].name in excluded_routes
)
def validation_error(exc_class):
def decorator(f):
@functools.wraps(f)
def _validate(*args, **kwargs):
try:
return f(*args, **kwargs)
except jsonschema.exceptions.ValidationError as exc:
# This will alter our stack trace slightly, but Pyramid knows
# how to render it. And the real value is in the message
# anyway.
raise exc_class(str(exc))
return _validate
return decorator
CAST_TYPE_TO_FUNC = {
'integer': int,
'float': float,
'boolean': bool,
}
def cast_request_param(param_type, param_name, param_value):
"""Try to cast a request param (e.g. query arg, POST data) from a string to
its specified type in the schema. This allows validating non-string params.
:param param_type: name of the type to be casted to
:type param_type: string
:param param_name: param name
:type param_name: string
:param param_value: param value
:type param_value: string
"""
try:
return CAST_TYPE_TO_FUNC.get(param_type, lambda x: x)(param_value)
except ValueError:
log.warn("Failed to cast %s value of %s to %s",
param_name, param_value, param_type)
# Ignore type error, let jsonschema validation handle incorrect types
return param_value
@validation_error(RequestValidationError)
def validate_request(validation_pairs):
for validator, values in validation_pairs:
validator.validate(values)
def cast_params(schema, values):
if not schema:
return {}
def get_type(param_name):
return schema['properties'].get(param_name, {}).get('type')
return dict(
(k, cast_request_param(get_type(k), k, v))
for k, v in values.items()
)
@validation_error(ResponseValidationError)
def validate_response(response, validator):
"""Validates response against our schemas.
:param response: the response object to validate
:type response: :class:`pyramid.response.Response`
:param validator: validator for the response
:type validator: :class`:pyramid_swagger.load_schema.SchemaValidator`
"""
# Short circuit if we are supposed to not validate anything.
if (
validator.schema.get('type') == 'void' and
response.body in (None, b'', b'{}', b'null')
):
return
validator.validate(prepare_body(response))
def prepare_body(response):
# content_type and charset must both be set to access response.text
if response.content_type is None or response.charset is None:
raise ResponseValidationError(
'Response validation error: Content-Type and charset must be set'
)
if 'application/json' in response.content_type:
return simplejson.loads(response.text)
else:
return response.text
``` |
{
"source": "johnschwarz/PsychicTest",
"score": 4
} |
#### File: johnschwarz/PsychicTest/PsyTest.py
```python
import time
import random
print("Try to influence the direction of the X on this line")
print("----------X----------")
time.sleep(0.4)
direction = input("Type 'positive' for right or 'negative' for left\n")
direction.lower()
while direction !='positive' or direction !='negative':
if direction == 'positive':
print()
break
elif direction == 'negative':
print()
break
else:
print("Try to influence the outcome of random numbers.")
time.sleep(0.4)
direction = input("Type 'positive' or 'negative'\n")
time.sleep(0.2)
print ("You chose", direction)
time.sleep(1)
print ("Visualize your", direction, "energy.")
time.sleep(1)
print ("Get Ready!")
time.sleep(0.5)
print("3")
time.sleep(1)
print("2")
time.sleep(1)
print("1")
time.sleep(1)
print("Go")
position = 10
line = '-'*21
def adjustLine(roll):
time.sleep(0.3)
global position
global line
line = '-'*21
line_list = list(line)
position += roll
line_list[ position] = 'X'
line = "".join(line_list)
return line
for x in range(10):
print(adjustLine(1 if random.random() < 0.5 else -1))
time.sleep(0.4)
print("Calculating...")
time.sleep(1)
print("..")
time.sleep(1)
if direction == 'positive' and position > 10 or direction == 'negative' and position < 10:
print("You did it!", "\nYour conscious efforts made the X move.")
else:
print("Your psychic powers need improvement!")
``` |
{
"source": "johnschwenck/ERCOT_scraper",
"score": 3
} |
#### File: johnschwenck/ERCOT_scraper/ERCOT_scraper_refactor.py
```python
import sys
import os
from bs4 import BeautifulSoup
import requests
import zipfile, io
from datetime import datetime
import re
from threading import Thread
import timeit
import time
# Holy Grail of ERCOT Web Scrape, main function, call function with or without an intended path, if no path is given path defaults to script's folder
# If no path exists in folder script will generate it
def data_extract(user_path=None):
'''Downloads desired datatypes from ERCOT MIS.
If no path is given, function will check if a path exists in this file's folder and created the needed files if they do not exist.
Will require user input to select which datatypes need to be downloaded.
Args:
user_path: string with the file path where the user wants to put the downloaded files, can be empty
Returns:
Will download files either to the desired path or to the folder where this file is located.
'''
# If path is not given, run function for user to setup path
start1 = timeit.default_timer()
# This will either find an existing path or generate a new path
if user_path is None:
setup = user_setup()
ERCOT_path = setup[1]
DataDict = setup[0]
else:
# Check that there actually is a directory with necessary folders
# TODO: make this directory check its own function which returns a boolean if it completed correctly or not
if os.path.isdir(str(user_path)+'\\ERCOT Data') == False:
# If there is not, have the user select if they wish to create new folders in the given directory
newsetup = input('No folders found! \nRedirecting to user setup...\n \nWould you like to create the necessary folders in current directory ('+os.path.join(user_path,'')+') or would you like to choose another location and run setup assistant?\n 1) Create folders in '+user_path+' \n 2) Choose alternative location\n 3) Exit')
# Runs function to create new folders
if newsetup is str(1):
tmp_path = dir_create(user_path)
setup = user_setup(tmp_path)
DataDict = setup[0]
ERCOT_path = setup[1]
# Runs function to choose new path
elif newsetup is str(2):
setup = user_setup()
DataDict = setup[0]
ERCOT_path = setup[1]
# Exits session
elif newsetup is str(3):
sys.exit('Leaving current state... \n\nSession aborted.')
# Error message and exit
else:
sys.exit('ERROR: Could not complete setup. Please try again.')
# If there is a directory with the necessary folders, run the rest of the setup functions
else:
ERCOT_path = user_path
DataDict = DataDict_setup(ERCOT_path)
# Run function for user to select data sets
user_selections = source_selection()
# For loop to loop through user's requested data sets and to find the download links and download all files
for selection in user_selections:
for count, value in enumerate(DataDict[selection]):
# Define what data set is being downloaded and the links for it
start = timeit.default_timer()
data_name = DataDict[selection][count][0]
ERCOT_link = DataDict[selection][count][1]
filepath = DataDict[selection][count][2]
# Runs the function to get the download links of the required data set
download_links = scrape_for_download(ERCOT_link)
# Downloads the files of the required data set
download_files_from_array(download_links, filepath)
# Confirm that the dataset has been downloaded
stop = timeit.default_timer()
print('Downloaded files for ' + selection + " " + data_name + ' in ' + str(round((stop-start), 2)) + ' seconds')
# Artificial 5 second wait to keep firewall happy, we don't need speed anyways
time.sleep(5)
# When all datasets are downloaded, print confirmation and return to given/found/created path
print('Job complete')
start2 = timeit.default_timer()
print('Took ' + str(round((start2-start1), 2)) + ' seconds')
os.chdir(ERCOT_path)
# Creates a dictionary of the different links to each dataset from ERCOT MIS:
def DataDict_setup(ERCOT_path):
'''Creates the dictionary of different datasets and links to those datasets, must provide path argument
Args:
ERCOT_path: string with the file path where the user wants to put the downloaded files
Returns:
DataDict: dictionary of the different datatypes, which includes their name, link, and desired download path
'''
DataDict = {'DAM':[ ['Total Energy Purchased',"http://mis.ercot.com/misapp/GetReports.do?reportTypeId=12333&reportTitle=DAM%20Total%20Energy%20Purchased&showHTMLView=&mimicKey",ERCOT_path + '/ERCOT Data/Market Information/DAM/Energy Purchased & Sold/Purchased'],
['Total Energy Sold',"http://mis.ercot.com/misapp/GetReports.do?reportTypeId=12334&reportTitle=DAM%20Total%20Energy%20Sold&showHTMLView=&mimicKey",ERCOT_path + '/ERCOT Data/Market Information/DAM/Energy Purchased & Sold/Sold'],
['DAM SPPs',"http://mis.ercot.com/misapp/GetReports.do?reportTypeId=12331&reportTitle=DAM%20Settlement%20Point%20Prices&showHTMLView=&mimicKey",ERCOT_path + '/ERCOT Data/Market Information/DAM/SPP'],
['Shadow Prices',"http://mis.ercot.com/misapp/GetReports.do?reportTypeId=12332&reportTitle=DAM%20Shadow%20Prices&showHTMLView=&mimicKey",ERCOT_path + '/ERCOT Data/Market Information/DAM/Shadow Prices'],
['PTP Obligations',"http://mis.ercot.com/misapp/GetReports.do?reportTypeId=13042&reportTitle=DAM%20PTP%20Obligation%20and%20Option%20Results%20by%20Settlement%20Point&showHTMLView=&mimicKey",ERCOT_path + '/ERCOT Data/Market Information/DAM/PTP Obligations'],
['PTP Option Prices',"http://mis.ercot.com/misapp/GetReports.do?reportTypeId=10042&reportTitle=Day-Ahead%20Point-to-Point%20Option%20Price%20Report&showHTMLView=&mimicKey",ERCOT_path + '/ERCOT Data/Market Information/DAM/PTP Option Prices'],
['Historical SPPs (Hourly) by Load Zone and Hub',"http://mis.ercot.com/misapp/GetReports.do?reportTypeId=13060&reportTitle=Historical%20DAM%20Load%20Zone%20and%20Hub%20Prices&showHTMLView=&mimicKey",ERCOT_path + '/ERCOT Data/Market Information/DAM/Historical SPP - LZ & Hub']],
'RTM':[['RTM SPPs',"http://mis.ercot.com/misapp/GetReports.do?reportTypeId=12301&reportTitle=Settlement%20Point%20Prices%20at%20Resource%20Nodes,%20Hubs%20and%20Load%20Zones&showHTMLView=&mimicKey",ERCOT_path + '/ERCOT Data/Market Information/RTM/SPP'],
['SCED Shadow Prices',"http://mis.ercot.com/misapp/GetReports.do?reportTypeId=12302&reportTitle=SCED%20Shadow%20Prices%20and%20Binding%20Transmission%20Constraints&showHTMLView=&mimicKey",ERCOT_path + '/ERCOT Data/Market Information/RTM/SCED Shadow Prices'],
['Historical SPPs (15 min) by Load Zone and Hub',"http://mis.ercot.com/misapp/GetReports.do?reportTypeId=13061&reportTitle=Historical%20RTM%20Load%20Zone%20and%20Hub%20Prices&showHTMLView=&mimicKey",ERCOT_path + '/ERCOT Data/Market Information/RTM/Historical SPP - LZ & Hub']],
'LMP':[['DAM Hourly LMPs',"http://mis.ercot.com/misapp/GetReports.do?reportTypeId=12328&reportTitle=DAM%20Hourly%20LMPs&showHTMLView=&mimicKey",ERCOT_path + '/ERCOT Data/Market Information/DAM/LMP - Hourly DAM'],
['RTM Resource Node LMPs (5 min)',"http://mis.ercot.com/misapp/GetReports.do?reportTypeId=12300&reportTitle=LMPs%20by%20Resource%20Nodes,%20Load%20Zones%20and%20Trading%20Hubs&showHTMLView=&mimicKey",ERCOT_path + '/ERCOT Data/Market Information/RTM/LMP - Resource Node - Load Zone - Hub'],
['RTM Electric Bus LMPs (5 min)',"http://mis.ercot.com/misapp/GetReports.do?reportTypeId=11485&reportTitle=LMPs%20by%20Electrical%20Bus&showHTMLView=&mimicKey",ERCOT_path + '/ERCOT Data/Market Information/RTM/LMP - Electrical Bus']],
'Load':[['Real Time Load - Actual vs Forecast (Hourly) (System Wide)',"http://mis.ercot.com/misapp/GetReports.do?reportTypeId=13499&reportTitle=Hourly%20Real-Time%20Load%20vs.%20Actual%20Report&showHTMLView=&mimicKey",ERCOT_path + '/ERCOT Data/Load/Actual/Real Time Load - Actual vs Forecast (Hourly)'],
['Actual System Load (Hourly by Weather Zone)',"http://mis.ercot.com/misapp/GetReports.do?reportTypeId=13101&reportTitle=Actual%20System%20Load%20by%20Weather%20Zone&showHTMLView=&mimicKey",ERCOT_path + '/ERCOT Data/Load/Actual/Actual System Load - Weather Zone'], # <-- make sure to remove the "]"
['Intrahour Load Forecast (by Weather Zone)',"http://mis.ercot.com/misapp/GetReports.do?reportTypeId=16553&reportTitle=Intra-Hour%20Load%20Forecast%20by.%20Weather%20Zonet&showHTMLView=&mimicKey",ERCOT_path + '/ERCOT Data/Load/Actual/IntraHour Forecast - Weather Zone'],
['Seven Day Load Forecast (Hourly by Weather Zone)',"http://mis.ercot.com/misapp/GetReports.do?reportTypeId=12312&reportTitle=Seven-Day%20Load%20Forecast%20by%20Weather%20Zone&showHTMLView=&mimicKey",ERCOT_path + '/ERCOT Data/Load/Actual/Seven-Day Load Forecast - Weather Zone']],
'Generation':[['System Wide & LZ Wind Power Production: Actual vs Forecast (Hourly)',"http://mis.ercot.com/misapp/GetReports.do?reportTypeId=13028&reportTitle=Wind%20Power%20Production%20-%20Hourly%20Averaged%20Actual%20and%20Forecasted%20Values&showHTMLView=&mimicKey",ERCOT_path + '/ERCOT Data/Generation/Wind/System Wide & LZ Wind Production - Act vs Fcast'],
['IntraHour Wind Production Actual - Load Zone',"http://mis.ercot.com/misapp/GetReports.do?reportTypeId=14788&reportTitle=Wind%20Power%20Production%20-%20Actual%205-Minute%20Actual%20and%20Averaged%20Values%20by%20Geographical%20Region&showHTMLView=&mimicKey",ERCOT_path + '/ERCOT Data/Generation/Wind/IntraHour - Actual'], # <-- make sure to remove the "]" and add a ","
['IntraHour Wind Production Forecast - Load Zone',"http://mis.ercot.com/misapp/GetReports.do?reportTypeId=16554&reportTitle=Intra-Hour%20Wind%20Power%20Forecast%20By%20Geographical%20Region&showHTMLView=&mimicKey",ERCOT_path + '/ERCOT Data/Generation/Wind/IntraHour - Forecast']]
}
print("Dictionary of links created")
return (DataDict)
# Create necessary folders within specified path:
def dir_create(user_path):
'''General function to take a path and create the necessary folders for the ERCOT Data
Args:
user_path: string with the file path where the user wants to put the downloaded files
Returns:
user_path
'''
if os.path.splitdrive(user_path)[1] == '\\':
user_path = os.path.splitdrive(user_path)[0]
else:
user_path = os.path.split(user_path)[0] + os.path.split(user_path)[1]
os.mkdir(str(user_path)+'\\ERCOT Data')
os.mkdir(str(user_path)+'\\ERCOT Data\\Market Information')
os.mkdir(str(user_path)+'\\ERCOT Data\\Market Information\\DAM')
os.mkdir(str(user_path)+'\\ERCOT Data\\Market Information\\DAM\\Energy Purchased & Sold')
os.mkdir(str(user_path)+'\\ERCOT Data\\Market Information\\DAM\\Energy Purchased & Sold\\Purchased')
os.mkdir(str(user_path)+'\\ERCOT Data\\Market Information\\DAM\\Energy Purchased & Sold\\Sold')
os.mkdir(str(user_path)+'\\ERCOT Data\\Market Information\\DAM\\LMP - Hourly DAM')
os.mkdir(str(user_path)+'\\ERCOT Data\\Market Information\\DAM\\PTP Obligations')
os.mkdir(str(user_path)+'\\ERCOT Data\\Market Information\\DAM\\PTP Option Prices')
os.mkdir(str(user_path)+'\\ERCOT Data\\Market Information\\DAM\\Shadow Prices')
os.mkdir(str(user_path)+'\\ERCOT Data\\Market Information\\DAM\\SPP')
os.mkdir(str(user_path)+'\\ERCOT Data\\Market Information\\RTM')
os.mkdir(str(user_path)+'\\ERCOT Data\\Market Information\\RTM\\Historical SPP - LZ & Hub')
os.mkdir(str(user_path)+'\\ERCOT Data\\Market Information\\RTM\\LMP - Electrical Bus')
os.mkdir(str(user_path)+'\\ERCOT Data\\Market Information\\RTM\\LMP - Resource Node - Load Zone - Hub')
os.mkdir(str(user_path)+'\\ERCOT Data\\Market Information\\RTM\\SPP')
os.mkdir(str(user_path)+'\\ERCOT Data\\Market Information\\RTM\\SCED Shadow Prices')
os.mkdir(str(user_path)+'\\ERCOT Data\\Generation')
os.mkdir(str(user_path)+'\\ERCOT Data\\Generation\\Fuel Mix')
os.mkdir(str(user_path)+'\\ERCOT Data\\Generation\\Wind')
os.mkdir(str(user_path)+'\\ERCOT Data\\Generation\\Wind\\Historical Hourly Wind Output')
os.mkdir(str(user_path)+'\\ERCOT Data\\Generation\\Wind\\IntraHour - Actual')
os.mkdir(str(user_path)+'\\ERCOT Data\\Generation\\Wind\\IntraHour - Forecast')
os.mkdir(str(user_path)+'\\ERCOT Data\\Generation\\Wind\\System Wide & LZ Wind Production - Act vs Fcast')
os.mkdir(str(user_path)+'\\ERCOT Data\\Generation\\Wind\\Wind Integration Reports')
os.mkdir(str(user_path)+'\\ERCOT Data\\Load')
os.mkdir(str(user_path)+'\\ERCOT Data\\Load\\Actual System Load - Weather Zone')
os.mkdir(str(user_path)+'\\ERCOT Data\\Load\\IntraHour Forecast - Weather Zone')
os.mkdir(str(user_path)+'\\ERCOT Data\\Load\\Real Time Load - Actual vs Forecast (Hourly)')
os.mkdir(str(user_path)+'\\ERCOT Data\\Load\\Seven-Day Load Forecast - Weather Zone')
return user_path
# Pick from a list of suggested path locations for setting up necessary folders
# TODO: make the style/best practices better
def user_setup(user_path = None):
'''Use this function to have the user specify the desired path to create ERCOT Data folders
Args:
user_path: string with the file path where the user wants to put the downloaded files, is allowed to be empty
Returns:
DataDict: dictionary of the different datatypes, which includes their name, link, and desired download path
ERCOT_path: string with the file path where the user wants to put the downloaded files, is allowed to be empty
'''
if user_path is None:
user_path = input("Choose path for creating necessary folders: \n 1) (Current Directory) "+os.getcwd()+" \n 2) (External Hard Drive) E:\ \n 3) (External Hard Drive) F:\ \n 4) Other \n 5) Exit \n").upper()
if user_path == str(5):
sys.exit("Leaving current state... \n\nSession aborted.")
elif user_path == str(1):
user_path = os.path.join(os.getcwd(),'')
if os.path.isdir(str(user_path)+'\\ERCOT Data') == False:
try:
yn = input('No folders found! Need to create necessary directory... \n Proceed? y/n \n').lower()
if yn == 'y':
dir_create(user_path)
else:
sys.exit("Leaving current state... \n\nSession aborted.")
except ValueError:
print('ERROR: unrecognized character')
else:
print('Folders already exist!')
print('\nERCOT_path: \n' + user_path+"\n")
elif user_path == str(2):
user_path = 'E:\\'
if os.path.isdir(str(user_path)+'\\ERCOT Data') == False:
try:
yn = input('No folders found! Need to create necessary directory... \n Proceed? y/n \n').lower()
if yn == 'y':
dir_create(user_path)
else:
sys.exit("Leaving current state... \n\nSession aborted.")
except ValueError:
print('ERROR: unrecognized character')
else:
print('Folders already exist!')
print('\nERCOT_path: \n' + user_path+"\n")
elif user_path == str(3):
user_path = 'F:\\'
if os.path.isdir(str(user_path)+'\\ERCOT Data') == False:
try:
yn = input('No folders found! Need to create necessary directory... \n Proceed? y/n \n').lower()
if yn == 'y':
dir_create(user_path)
else:
sys.exit("Leaving current state... \n\nSession aborted.")
except ValueError:
print('ERROR: unrecognized character')
else:
print('Folders already exist!')
print('\nERCOT_path: \n' + user_path+"\n")
elif user_path == str(4):
user_path = input('Location to file path: \n')
user_path = os.path.split(user_path)[0] + os.path.split(user_path)[1]
if os.path.isdir(str(user_path)+'\\ERCOT Data') == False:
try:
yn = input('No folders found! Need to create necessary directory... \n Proceed? y/n \n').lower()
if yn == 'y':
dir_create(user_path)
else:
sys.exit("Leaving current state... \n\nSession aborted.")
except ValueError:
print('ERROR: unrecognized character')
else:
print('Folders already exist!')
print('\nERCOT Data folders created in:\n'+user_path+"\n")
else:
sys.exit('ERROR: Could not find path')
else:
if os.path.splitdrive(user_path)[1] == '\\': # determine whether path is the actual drive (i.e. C:\\)
user_path = os.path.splitdrive(user_path)[0]#+'\\'
#elif os.path.split(user_path)[1] != '':
# path = os.path.split(user_path)[0] + os.path.split(user_path)[1]
else:
user_path = os.path.split(user_path)[0] + os.path.split(user_path)[1]
ERCOT_path = user_path
ERCOT_path = os.path.split(ERCOT_path)[0] + os.path.split(ERCOT_path)[1]
DataDict = DataDict_setup(ERCOT_path)
return (DataDict,ERCOT_path)
# Allows the user to select what they want to download from the given list
# TODO: comment and make style/best practices edits
def source_selection():
'''Allows the user to select what datatypes they would want to download
Returns:
userselection: a list of what datatypes the user wants to download
'''
# Use this function to eliminate duplicate entries such as 112 or 1233 etc
def uniqueCharacters(str):
# If at any time we encounter 2 same characters, return false
for i in range(len(str)):
for j in range(i + 1,len(str)):
if(str[i] == str[j]):
return False
# If no duplicate characters encountered, return true
return True
source_options = {
'1':'DAM',
'2':'RTM',
'3':'LMP',
'4':'Load',
'5':'Generation',
'6':'All',
'7':'Exit'}
omit = set('890')
while True:
try:
sourcetype = input('\nWhich source type would you like to combine?\nSelect all that apply:\n\n 1) DAM\n 2) RTM\n 3) LMP\n 4) Load\n 5) Generation\n 6) All\n 7) Exit\n')
if (sourcetype.count('6') == 0 and sourcetype.count('7') == 0 and uniqueCharacters(sourcetype) and int(sourcetype) >= 1 and int(sourcetype) <= 55555 and any(n in sourcetype for n in omit) == False):
break
elif sourcetype == '6' or sourcetype == '7':
break
else:
print('ERROR: Incorrect entry. Please use only numeric values from 1 - 7 according to the above options')
continue
except ValueError:
print('ERROR: Incorrect entry. Please use only numeric values from 1 - 7 according to the above options')
continue
if sourcetype == str(7):
sys.exit('Leaving Session... \n\nSession aborted.')
elif sourcetype == str(6):
sourcetype = ['DAM','RTM','LMP','Load','Generation']
elif sourcetype != str(6):
source_selection = []
for i in sourcetype:
source_selection.append(source_options[i])
sourcetype = source_selection
userselection = sourcetype
return(userselection)
# This function scrapes the given ERCOT MIS table for the download links
def scrape_for_download(ERCOT_link):
'''Scrapes the given webpage for all download links that are not xml, returns an array of download links to iterate through
Args:
ERCOT_link: a string containing the link of the MIS directory for this datatype
Returns:
download_links: an array containing all the direct download links for all downloadable files in the MIS directory for that datatype
'''
# bs4 stuff to read the html
requests_response = requests.get(ERCOT_link)
htmldata = requests_response.text
htmlpage = BeautifulSoup(htmldata, features="lxml")
# Generate the empty array for the download links
download_links = []
# First for loop loops through the first layer of of table data. At first glance this does not make sense, but for some reason this is needed otherwise it will create duplicates
for table in htmlpage.find_all('tr'):
# Second for loop loops through the second layer of the table data, this is the row data.
for row in table.find_all('tr'):
# If the row does not include 'xml' and includes 'href', we want to find the hyperlink and save the link to the array
if 'xml' not in str(row) and 'href' in str(row):
current_link = row.find('a').get('href')
download_links.append('http://mis.ercot.com' + current_link)
# Return the array of download links
return download_links
# This function starts the threads to download any files from the links from the array of download links given, and leave a text file listing urls and items downloaded
def download_files_from_array(download_links, filepath):
'''Creates the threads to download files from the array of links given,
and each thread then calls download_file_from_link to download the file
Args:
download_links: an array containing all the direct download links for all downloadable files in the MIS directory for that datatype
filepath: string with the file path where the user wants to put the downloaded files
'''
# Generate the output file name
current_time = datetime.now().strftime("%H%M%S")
outputfilename = 'output' + str(current_time) + '.txt'
numberoffiles = 0
# Download in batches of 100 so I don't get caught by CDN
batchsize = 100
for batch in range(0, len(download_links), batchsize):
# Load up the different threads for this download, dramatically speeds up the download
threads = []
download_links_batch = download_links[batch:batch+batchsize]
# First for loop creates a thread for each link to download it
for link in download_links_batch:
threads.append(Thread(target=download_file_from_link, args=(link,filepath,)))
# Second for loop starts all the different threads
for total in threads:
total.start()
# Third for loop waits for all the threads to finish
for total in threads:
total.join()
# Use 'with open' to automatically close the file when we're done with the for loops
with open(filepath + '/' + outputfilename,'w') as outputfile:
for link in download_links:
# In each for loop iteration, write the url of the file downloaded
outputfile.writelines(link + '\n')
numberoffiles = numberoffiles + 1
# Output last line in text file to record how many files were downloaded
outputfile.writelines('Downloaded ' + str(numberoffiles) + ' files')
# This function downloads files from the given link
def download_file_from_link(link, filepath):
'''Downloads files from the given link
Args:
link: a direct download link
filepath: string with the file path where the user wants to put the downloaded files
Raises:
404: can raise a 404 error with the download link if the server refuses connection twice, or if connection fails twice
FileExistsError: [WinError 183]: sometimes the check if the zipped file already exists fails
I do not think this is a worry
'''
# Get the html data from the link, if the first try fails (most probably because it was refused by the server), wait 10 seconds and try again
# I know doing a non-specific 'except' is bad, but any exceptions raised would be because of a connection error
try:
download_url = requests.get(link)
except:
time.sleep(10)
download_url = requests.get(link)
# Get the header information of the link, if getting the header fails, reacquire the html data from the link and try again
# Should only need to retry once
# I know doing a non-specific 'except' is bad, but any exceptions raised would be because of a connection error
try:
file_info = download_url.headers['content-disposition']
except:
download_url = requests.get(link)
file_info = download_url.headers['content-disposition']
# Regex the file information to get the name
filename = re.findall("filename=(.+)", file_info)[0]
# Check to see if file is a zip, if not just import it
if '.zip' in filename:
# If the file is a zip, edit the name to check if the underlying file already exists
filename = filename[:-4]
filename = filename.replace('_', '.')
# If the file does not already exist in the folder, download the zip and extract it to the folder
if not(os.path.isfile(filepath + '/' + filename)):
# Use the io.BytesIO so I do not have to download the zip file to disk before I extract it
zip = zipfile.ZipFile(io.BytesIO(download_url.content))
zip.extractall(filepath)
else:
with(open(filepath + '/' + filename)) as filewrite:
filewrite.write(download_url.content)
``` |
{
"source": "john-science/ADVECTOR",
"score": 3
} |
#### File: examples/data_downloaders/get_ECCO_credentials.py
```python
import subprocess
import tempfile
from getpass import getpass
def get_ECCO_credentials():
print(
"Download requires authentication. "
"You can find your WebDAV credenials and/or "
"create an account here: https://ecco.jpl.nasa.gov/drive/"
)
test_url = "https://ecco.jpl.nasa.gov/drive/files/README"
while True:
user = input("Enter your WebDAV username: ")
password = getpass("Enter your WebDAV password: ")
with tempfile.TemporaryDirectory() as tmp_dir:
wget_command = (
f"wget --user {user} --password {password} -P {tmp_dir} {test_url}"
)
response = subprocess.getoutput(wget_command)
if "200 OK" in response:
print("Authentication Successful.")
return user, password
elif response.count("401 Unauthorized") == 2:
# always 1 count because of the way the server handles
# the request. Second count means the user/pass is bad.
print("Authentication failed. Try again.")
print("Find your WebDAV credenials here: https://ecco.jpl.nasa.gov/drive/")
else:
raise RuntimeError(
"Unexpected failure while authenticating credentials. Aborting.\n"
f"Test wget command: '{wget_command}'\n"
f"Output from test wget command:\n '{response}'"
)
```
#### File: ADVECTOR/examples/ECCO_advect_2D.py
```python
from datetime import datetime, timedelta
from pathlib import Path
import xarray as xr
from ADVECTOR.examples.helpers.generate_sourcefiles import generate_2D_sourcefile
from ADVECTOR.run_advector_2D import run_advector_2D
from ADVECTOR.plotting.plot_advection import (
animate_ocean_advection,
plot_ocean_trajectories,
)
def main():
data_root = Path(input("Input path to example data directory: "))
output_root = Path(input("Input path to directory for outputfiles: "))
output_root.mkdir(exist_ok=True)
ADVECTION_START = datetime(2015, 1, 1)
ADVECTION_END = datetime(2015, 2, 1)
sourcefile_path = output_root / "2D_uniform_source_2015.nc"
generate_2D_sourcefile(
num_particles=5000,
release_date_range=(
ADVECTION_START,
ADVECTION_START + (ADVECTION_END - ADVECTION_START) / 2,
),
out_path=sourcefile_path,
)
out_dir = output_root / sourcefile_path.stem
out_dir.mkdir(parents=True, exist_ok=True)
def preprocess_currents(currents: xr.Dataset) -> xr.Dataset:
return currents.rename(
{
"longitude": "lon",
"latitude": "lat",
"Z": "depth",
"EVEL": "U",
"NVEL": "V",
}
)
def preprocess_wind(wind: xr.Dataset) -> xr.Dataset:
return wind.rename({"uwnd": "U", "vwnd": "V", "level": "depth"})
out_paths = run_advector_2D(
output_directory=str(out_dir),
sourcefile_path=str(sourcefile_path),
u_water_path=str(data_root / "EVEL_2015_01*.nc"),
v_water_path=str(data_root / "NVEL_2015_01*.nc"),
water_preprocessor=preprocess_currents,
u_wind_path=str(data_root / "uwnd.10m.gauss.2015.nc"),
v_wind_path=str(data_root / "vwnd.10m.gauss.2015.nc"),
wind_preprocessor=preprocess_wind,
windage_coeff=0.005, # fraction of windspeed transferred to particle
eddy_diffusivity=200, # m^2 / s
advection_start_date=ADVECTION_START,
timestep=timedelta(hours=1),
num_timesteps=24 * (ADVECTION_END - ADVECTION_START).days,
save_period=4,
overwrite_existing_files=True,
)
for path in out_paths:
print("Animating trajectories...")
animate_ocean_advection(outputfile_path=path, save=False)
print("Plotting trajectories...")
plot_ocean_trajectories(outputfile_path=path)
if __name__ == "__main__":
main()
```
#### File: ADVECTOR/examples/ECCO_advect_3D.py
```python
from datetime import datetime, timedelta
from pathlib import Path
import xarray as xr
from ADVECTOR.examples.helpers.generate_sourcefiles import generate_3D_sourcefile
from ADVECTOR.examples.helpers.generate_configfile import generate_sample_configfile
from ADVECTOR.plotting.plot_advection import animate_ocean_advection
from ADVECTOR.run_advector_3D import run_advector_3D
def main():
data_root = Path(input("Input path to example data directory: "))
output_root = Path(input("Input path to directory for outputfiles: "))
output_root.mkdir(exist_ok=True)
ADVECTION_START = datetime(2015, 1, 1)
ADVECTION_END = datetime(2015, 2, 1)
# generate a sourcefile
sourcefile_path = output_root / "3D_uniform_source_2015.nc"
generate_3D_sourcefile(
num_particles=5000,
density_range=(800, 1000),
radius_range=(1e-6, 1),
corey_shape_factor_range=(0.15, 1),
release_date_range=(
ADVECTION_START,
ADVECTION_START + (ADVECTION_END - ADVECTION_START) / 2,
),
depth_range=(0, 0),
out_path=sourcefile_path,
)
# generate a configfile
configfile_path = output_root / "config.nc"
generate_sample_configfile(out_path=configfile_path)
def preprocess_currents(currents: xr.Dataset) -> xr.Dataset:
return currents.rename(
{
"longitude": "lon",
"latitude": "lat",
"Z": "depth",
"EVEL": "U",
"NVEL": "V",
"WVELMASS": "W",
}
)
def preprocess_wind(wind: xr.Dataset) -> xr.Dataset:
return wind.rename({"uwnd": "U", "vwnd": "V", "level": "depth"})
out_dir = output_root / sourcefile_path.stem
out_dir.mkdir(parents=True, exist_ok=True)
# run the model!
out_paths = run_advector_3D(
output_directory=str(out_dir),
sourcefile_path=str(sourcefile_path),
configfile_path=str(configfile_path),
u_water_path=str(data_root / "EVEL_2015*.nc"),
v_water_path=str(data_root / "NVEL_2015*.nc"),
w_water_path=str(data_root / "WVELMASS_2015*.nc"),
water_preprocessor=preprocess_currents,
u_wind_path=str(data_root / "uwnd.10m.gauss.2015.nc"),
v_wind_path=str(data_root / "vwnd.10m.gauss.2015.nc"),
wind_preprocessor=preprocess_wind,
seawater_density_path=str(data_root / "RHO_2015.nc"),
advection_start_date=ADVECTION_START,
timestep=timedelta(hours=1),
num_timesteps=24 * (ADVECTION_END - ADVECTION_START).days,
save_period=4,
memory_utilization=0.4,
overwrite_existing_files=True,
)
for out_path in out_paths:
print("Animating trajectories...")
animate_ocean_advection(out_path)
if __name__ == "__main__":
main()
```
#### File: examples/helpers/generate_configfile.py
```python
from pathlib import Path
import numpy as np
import xarray as xr
def generate_configfile(
out_path: Path,
horizontal_diffusivity: np.ndarray,
z_hd: np.ndarray,
vertical_diffusivity: np.ndarray,
z_vd: np.ndarray,
max_wave_height: float = None,
wave_mixing_depth_factor: float = None,
):
"""
script to generate a configuration file given requisite parameters
"""
config = xr.Dataset(
{
"horizontal_diffusivity": (
"z_hd",
horizontal_diffusivity,
{"units": "m^2 s^-1"},
),
"vertical_diffusivity": (
"z_vd",
vertical_diffusivity,
{"units": "m^2 s^-1"},
),
},
coords={
"z_hd": (
"z_hd",
z_hd,
{
"long_name": "depth coordinate for horizontal_diffusivity",
"units": "m",
"positive": "up",
},
),
"z_vd": (
"z_vd",
z_vd,
{
"long_name": "depth coordinate for vertical_diffusivity",
"units": "m",
"positive": "up",
},
),
},
attrs={
"title": f"Configuration File for ADVECTOR",
"institution": "The Ocean Cleanup",
},
)
if max_wave_height:
config["max_wave_height"] = max_wave_height
if wave_mixing_depth_factor:
config["wave_mixing_depth_factor"] = wave_mixing_depth_factor
config.to_netcdf(out_path)
def generate_sample_configfile(out_path: Path):
"""a sample configuration file, diffusivity profiles are NOT based on true ocean state"""
generate_configfile(
horizontal_diffusivity=np.linspace(1500, 1, 20), # m^2 s^-1
z_hd=-np.logspace(0, 4, 20), # m
vertical_diffusivity=np.linspace(-5e-3, 1e-2, 10) ** 2,
z_vd=np.linspace(-1e4, 0, 10), # m
out_path=out_path,
)
if __name__ == "__main__":
generate_sample_configfile(out_path=Path(__file__).parent / "sample_config.nc")
```
#### File: ADVECTOR/io_tools/create_bathymetry.py
```python
import numpy as np
import xarray as xr
def create_bathymetry_from_land_mask(land_mask: xr.DataArray) -> xr.DataArray:
"""Method: identifies the lower depth bound of the shallowest
ocean cell (non-null) in each vertical grid column.
:param land_mask: dimensions {time, depth, lat, lon}, boloean array, True where cell is land"""
assert np.all(land_mask.depth <= 0), "depth coordinate must be positive up"
assert np.all(
np.diff(land_mask.depth) > 0
), "depth coordinate must be sorted ascending"
# In the kernel, particles look up data based on the nearest cell-center.
# Thus cell bounds are the midpoints between each centers.
# Very top cell bound is surface, and bottom cell bounds are
# assumed to be symmetric about bottom cell center.
depth_diff = np.diff(land_mask.depth)
depth_bnds = np.concatenate(
[
land_mask.depth.values[:1] - depth_diff[0] / 2,
land_mask.depth.values[:-1] + depth_diff / 2,
[0],
]
)
bathy = (
(~land_mask)
.assign_coords({"depth": depth_bnds[:-1]})
.idxmax(dim="depth")
.where(~land_mask.isel(depth=-1), depth_bnds[-1])
)
bathy = bathy.drop(["time", "depth"])
bathy.name = "bathymetry"
bathy.attrs = {"units": "m", "positive": "up"}
return bathy
```
#### File: src/model_core/test_vertical_profile.py
```python
import numpy as np
import pyopencl as cl
from tests.config import CL_CONTEXT, CL_QUEUE, MODEL_CORE_DIR
def sample_profile(sample_z: np.ndarray, z: np.ndarray, var: np.ndarray) -> np.ndarray:
"""should return linear interpolation of profile given by var, z at z=sample_z
samples all sample_z elements in parallel"""
# setup
prg = cl.Program(
CL_CONTEXT,
"""
#include "vertical_profile.cl"
__kernel void test_sample_profile(
__global const double *var,
__global const double *z,
const unsigned int len,
__global const double *sample_z,
__global double *out) {
vertical_profile prof = {.z = z, .values = var, .len = len};
out[get_global_id(0)] = sample_profile(prof, sample_z[get_global_id(0)]);
}
""",
).build(options=["-I", str(MODEL_CORE_DIR)])
out = np.zeros(sample_z.shape).astype(np.float64)
d_var = cl.Buffer(
CL_CONTEXT,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=var.astype(np.float64),
)
d_z = cl.Buffer(
CL_CONTEXT,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=z.astype(np.float64),
)
d_sample_z = cl.Buffer(
CL_CONTEXT,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=sample_z.astype(np.float64),
)
d_out = cl.Buffer(CL_CONTEXT, cl.mem_flags.WRITE_ONLY, out.nbytes)
prg.test_sample_profile(
CL_QUEUE, sample_z.shape, None, d_var, d_z, np.uint32(len(z)), d_sample_z, d_out
)
CL_QUEUE.finish()
cl.enqueue_copy(CL_QUEUE, out, d_out)
return out
def test_interpolation():
"""check the interpolation is same as numpy linear interp"""
var = np.linspace(0, 1000, 10)
z = -np.logspace(3, 0, 10)
sample_z = np.linspace(min(z), max(z), 10000)
np.testing.assert_allclose(
np.interp(sample_z, z, var), sample_profile(sample_z=sample_z, z=z, var=var)
)
def test_outside_domain():
"""check samples outside z domain evaluate to profile endpoints"""
var = np.array([1000, 1200, 1500, 2000])
z = np.array([-2, 0, 1, 4])
sample_z = np.array([-2.1, -2, 4, 4.1])
np.testing.assert_allclose(
[1000, 1000, 2000, 2000], sample_profile(sample_z=sample_z, z=z, var=var)
)
```
#### File: src/model_core/test_wind_driven_mixing.py
```python
import matplotlib.pyplot as plt
import numpy as np
import pyopencl as cl
from tests.config import CL_CONTEXT, CL_QUEUE, MODEL_CORE_DIR
def sample_concentration_profile(
wind_10m: float, rise_velocity: float, nsamples: int
) -> np.ndarray:
"""
:return: random_depths, sampled using kukulka's concentration profile as a PDF
"""
# setup
prg = cl.Program(
CL_CONTEXT,
"""
#include "wind_driven_mixing.cl"
#include "random.cl"
__kernel void test_sample_concentration_profile(
double wind_10m,
double rise_velocity,
unsigned int nsamples,
__global double *out) {
random_state rstate = {.a = 1};
for (unsigned int i=0; i<nsamples; i++) {
random(&rstate);
out[i] = sample_concentration_profile(wind_10m, rise_velocity, 20, 10, &rstate);
}
}
""",
).build(options=["-I", str(MODEL_CORE_DIR)])
out = np.zeros(nsamples).astype(np.float64)
d_out = cl.Buffer(CL_CONTEXT, cl.mem_flags.WRITE_ONLY, out.nbytes)
prg.test_sample_concentration_profile(
CL_QUEUE,
(1,),
None,
np.float64(wind_10m),
np.float64(rise_velocity),
np.uint32(nsamples),
d_out,
)
CL_QUEUE.finish()
cl.enqueue_copy(CL_QUEUE, out, d_out)
return out
def test_sample_concentration_profile(plot=False):
"""tries to reproduce concentration plot given in Kukulka 2012 fig 3a
parameters: 10m wind speed is 6.5 m/s (based on the .75 cm/s water friction, stated average conditions)
particle rise velocity is modeled at .01 m/s, as in Kukulka
"""
u10 = 6.5 # m/s, roughly corresponds to water friction velocity .75 cm/s
w_b = 0.01 # m s^-1, given rise velocity
A_0 = 0.002628 # computed from ustar=.75, or u10 = 6.5 m/s
H_s = calculate_significant_wave_height(
calculate_wind_stress(u10)
) # significant wave height
MLD = -10 * H_s # as defined in wind_driven_mixing.cl
samples = sample_concentration_profile(
wind_10m=u10, rise_velocity=w_b, nsamples=100000
)
measured_PDF, bin_edges = np.histogram(
samples, bins=50, range=(MLD, 0), density=True
)
z = bin_edges[:-1] + np.diff(bin_edges) / 2
true_PDF = w_b / (A_0 * (1 - np.exp(w_b / A_0 * MLD))) * np.exp(w_b / A_0 * z)
def test_and_plot():
if plot:
plt.figure()
plt.plot(true_PDF, z, label="PDF")
plt.plot(measured_PDF, z, label="measured PDF")
plt.hist(
samples,
bins=50,
label="samples",
range=(MLD, 0),
orientation="horizontal",
density=True,
)
plt.ylim([MLD, 0])
plt.legend()
plt.ylabel("z")
plt.title(f"w_b={w_b} m/s, u10={u10} m/s --> MLD={MLD: .2f} m")
np.testing.assert_allclose(
true_PDF, measured_PDF, atol=0.03
) # every bin within 3%, that's a super close match
test_and_plot()
# now try with a neutral particle (special case, as PDF/CDF become undefined
# but asymptotic behavior is understood to be a uniform distribution)
w_b = 0
samples = sample_concentration_profile(
wind_10m=u10, rise_velocity=w_b, nsamples=100000
)
measured_PDF, bin_edges = np.histogram(
samples, bins=50, range=(MLD, 0), density=True
)
z = bin_edges[:-1] + np.diff(bin_edges) / 2
true_PDF = np.ones_like(z) / (0 - MLD)
test_and_plot()
def calculate_significant_wave_height(wind_stress: float) -> float:
"""
:param wind_stress (kg m^-1 s^-2)
:return: significant wave height (m)
"""
# setup
prg = cl.Program(
CL_CONTEXT,
"""
#include "wind_driven_mixing.cl"
__kernel void test_calculate_significant_wave_height(
const double wind_stress,
__global double *out) {
out[0] = calculate_significant_wave_height(wind_stress, 20);
}
""",
).build(options=["-I", str(MODEL_CORE_DIR)])
out = np.zeros(1).astype(np.float64)
d_out = cl.Buffer(CL_CONTEXT, cl.mem_flags.WRITE_ONLY, out.nbytes)
prg.test_calculate_significant_wave_height(
CL_QUEUE, (1,), None, np.float64(wind_stress), d_out
)
CL_QUEUE.finish()
cl.enqueue_copy(CL_QUEUE, out, d_out)
return out[0]
def test_calculate_significant_wave_height():
# kukulka says for u10 = ~6 m/s, water_friction_velocity = ~.75 cm/s.
# we can get wind stress from this, and calculate wave height, to at least do a sanity check
water_density = 1025 # kg m^-3
wind_stress = (
0.75e-2 ** 2 * water_density
) # m/s inverting kukulka's given frictional water velocity
wave_height = calculate_significant_wave_height(wind_stress)
assert 0 < wave_height < 1 # sanity range check
# check the maximum allowable wave height is not surpassed in absurd winds
# max height defined in physical_constants.h
assert (
calculate_significant_wave_height(calculate_wind_stress(wind_speed_10m=40))
== 20
)
def calculate_wind_stress(wind_speed_10m: float) -> float:
"""
:param wind_speed_10m: m/s
:return: wind stress (kg m^-1 s^-2)
"""
# setup
prg = cl.Program(
CL_CONTEXT,
"""
#include "wind_driven_mixing.cl"
__kernel void test_calculate_wind_stress(
const double wind_speed_10m,
__global double *out) {
out[0] = calculate_wind_stress(wind_speed_10m);
}
""",
).build(options=["-I", str(MODEL_CORE_DIR)])
out = np.zeros(1).astype(np.float64)
d_out = cl.Buffer(CL_CONTEXT, cl.mem_flags.WRITE_ONLY, out.nbytes)
prg.test_calculate_wind_stress(
CL_QUEUE, (1,), None, np.float64(wind_speed_10m), d_out
)
CL_QUEUE.finish()
cl.enqueue_copy(CL_QUEUE, out, d_out)
return out[0]
def test_calculate_wind_stress():
"""check matches some examples from kukulka"""
# kukulka says for u10 = 4.7 m/s, water_friction_velocity = .55 cm/s.
# water_friction_velocity = sqrt(wind_stress/water_density), so we can test this to compare methods.
u10 = 4.7
wind_stress = calculate_wind_stress(u10)
water_density = 1025 # kg m^-3
kukulka_result = 0.55e-2 # m/s
np.testing.assert_allclose(
kukulka_result, np.sqrt(wind_stress / water_density), rtol=0.01
)
if __name__ == "__main__":
test_sample_concentration_profile(plot=True)
``` |
{
"source": "john-science/yamlize",
"score": 3
} |
#### File: yamlize/yamlize/attribute_collection.py
```python
from yamlize.attributes import Attribute, MapItem, KeyedListItem
from yamlize.yamlizing_error import YamlizingError
class AttributeCollection(object):
__slots__ = ('order', 'by_key', 'by_name')
def __init__(self, *args, **kwargs):
# let's assume the order things were defined is the order we want to
# display them, still public if someone wants to muck
self.order = list()
self.by_key = dict()
self.by_name = dict()
for item in args:
if not isinstance(item, Attribute):
raise TypeError('Incorrect type {} while initializing '
'AttributeCollection with {}'
.format(type(item), item))
self.add(item)
def __iter__(self):
return iter(self.order)
@property
def required(self):
return {attr for attr in self if attr.is_required}
def add(self, attr):
existing = self.by_key.get(attr.key, None)
if existing is not None and existing is not attr:
raise KeyError('AttributeCollection already contains an entry for '
'{}, previously defined: {}'
.format(attr.key, existing))
elif existing is attr:
return
existing = self.by_name.get(attr.name, None)
if existing is not None and existing is not attr:
raise KeyError('AttributeCollection already contains an entry for '
'{}, previously defined: {}'
.format(attr.name, existing))
elif existing is attr:
return
self.by_key[attr.key] = attr
self.by_name[attr.name] = attr
self.order.append(attr)
def from_yaml(self, obj, loader, key_node, val_node, round_trip_data):
"""
returns: Attribute that was applied
"""
key = loader.construct_object(key_node)
attribute = self.by_key.get(key, None)
if attribute is None:
raise YamlizingError('Error parsing {}, found key `{}` but '
'expected any of {}'
.format(type(obj), key, self.by_key.keys()),
key_node)
attribute.from_yaml(obj, loader, val_node, round_trip_data)
return attribute
def yaml_attribute_order(self, obj, attr_order):
"""
returns: Attribute that was applied
"""
new_attrs = []
for attr in self:
if attr not in attr_order:
new_attrs.append(attr)
return attr_order + new_attrs
def attr_dump_order(self, obj, attr_order):
"""
returns: Attribute that was applied
"""
new_attrs = []
for attr in self:
if attr.has_default(obj):
if attr in attr_order:
attr_order.remove(attr)
continue
if attr not in attr_order:
new_attrs.append(attr)
return attr_order + new_attrs
class MapAttributeCollection(AttributeCollection):
__slots__ = ()
def from_yaml(self, obj, loader, key_node, val_node, round_trip_data):
"""
returns: Attribute that was applied, or None.
Raises an exception if there was actually a problem.
"""
key = loader.construct_object(key_node)
attribute = self.by_key.get(key, None)
if attribute is not None:
attribute.from_yaml(obj, loader, val_node, round_trip_data)
else:
# the key_node will point to our object
del loader.constructed_objects[key_node]
key = obj.key_type.from_yaml(loader, key_node, round_trip_data)
val = obj.value_type.from_yaml(loader, val_node, round_trip_data)
try:
obj.__setitem__(key, val)
except Exception as ee:
raise YamlizingError('Failed to add key `{}` with value `{}`, got: {}'
.format(key, val, ee), key_node)
return attribute # could be None, and that is fine
def yaml_attribute_order(self, obj, attr_order):
"""
returns: Attribute that was applied
"""
attr_order = AttributeCollection.yaml_attribute_order(self, obj, attr_order)
for item_key in obj.keys():
attr_order.append(MapItem(item_key, obj.key_type, obj.value_type))
return attr_order
def attr_dump_order(self, obj, attr_order):
"""
returns: Attribute that was applied
"""
attr_order = AttributeCollection.attr_dump_order(self, obj, attr_order)
for item_key in obj.keys():
attr_order.append(MapItem(item_key, obj.key_type, obj.value_type))
return attr_order
class KeyedListAttributeCollection(AttributeCollection):
__slots__ = ()
def from_yaml(self, obj, loader, key_node, val_node, round_trip_data):
"""
returns: Attribute that was applied, or None.
Raises an exception if there was actually a problem.
"""
key = loader.construct_object(key_node)
attribute = self.by_key.get(key, None)
if attribute is not None:
attribute.from_yaml(obj, loader, val_node, round_trip_data)
else:
# the key_node will point to our object
del loader.constructed_objects[key_node]
val = obj.item_type.from_yaml_key_val(
loader,
key_node,
val_node,
obj.__class__.key_attr,
round_trip_data
)
obj[obj.__class__.key_attr.get_value(val)] = val
return attribute # could be None, and that is fine
def yaml_attribute_order(self, obj, attr_order):
"""
returns: Attribute that was applied
"""
attr_order = AttributeCollection.yaml_attribute_order(self, obj, attr_order)
for item_key in obj.keys():
attr_order.append(KeyedListItem(obj.__class__.key_attr, obj.item_type, item_key))
return attr_order
def attr_dump_order(self, obj, attr_order):
"""
returns: Attribute that was applied
"""
attr_order = AttributeCollection.attr_dump_order(self, obj, attr_order)
for item_key in obj.keys():
attr_order.append(KeyedListItem(obj.__class__.key_attr, obj.item_type, item_key))
return attr_order
``` |
{
"source": "johnsean180801/lk-21",
"score": 3
} |
#### File: lk21/extractors/melongmovie.py
```python
from . import BaseExtractor
class Melongmovie(BaseExtractor):
tag = "movie"
host = "http://167.99.31.48"
def extract_meta(self, id: str) -> dict:
"""
Ambil semua metadata dari halaman web
Args:
id: type 'str'
"""
raw = self.session.get(f"{self.host}/{id}")
soup = self.soup(raw)
meta = self.MetaSet()
meta["image"] = soup.find(class_="wp-post-image")["src"]
meta["judul"] = self.re.split(
"(?i)(?:bd )?(?:batch )?subtitle", soup.title.text)[0]
alias = {
"Country": "negara",
"Quality": "kualitas",
"Network": "jaringan",
"Duration": "durasi",
"Stars": "bintang film",
"Release": "rilis"
}
if (ul := soup.find("ul", class_="data")):
for li in ul.findAll("li"):
k, v = self.re.split(r"\s*:\s*", li.text)
meta.add(alias.get(k, k), v)
sinopsis = soup.find(class_="dzdesu").findPrevious("p")
meta["sinopsis"] = sinopsis.text
return meta
def extract_data(self, id: str) -> dict:
"""
Ambil semua situs unduhan dari halaman web
Args:
id: jalur url dimulai setelah host, type 'str'
Returns:
dict: hasil 'scrape' halaman web
"""
raw = self.session.get(f"{self.host}/{id}")
soup = self.soup(raw)
result = {}
for ep in soup.findAll(text=self.re.compile(r"(?i)episode\s+\d+|LINK DOWNLOAD")):
content = ep.findNext("div")
r = {}
for p in content.findAll("p"):
if p.a:
y = {}
for a in p.findAll("a"):
y[a.text] = a["href"]
title = self.re.search(r"\s*([^=]+)", p.text)
r[title.group(1)] = y
if r:
result[ep] = r
for ep in soup.findAll("h2", text=self.re.compile(r"(?i)episode\s+\d+")):
r = {}
ul = ep.findNext("ul")
for li in ul.findAll("li"):
sub = "/".join(strong.text for strong in li.findAll("strong"))
if sub.count("/") > 2:
continue
y = {}
for a in li.findAll("a"):
y[a.text] = a["href"]
r[sub] = y
result[ep.text] = r
pattern = self.re.compile(r"[A-Z ]+:")
if (ref := soup.find("strong", text=pattern)):
for li in ref.findAllNext("li"):
sub = "/".join(strong.text for strong in li.findAll("strong"))
r = {}
for a in li.findAll("a"):
r[a.text] = a["href"]
title = li.findPrevious(
"strong", text=pattern).text.strip(": \n")
if not result.get(title):
result[title] = {}
result[title][sub] = r
return result
def search(self, query: str, page: int = 1) -> list:
"""
Cari item berdasarkan 'query' yang diberikan
Args:
query: kata kunci pencarian, type 'str'
page: indeks halaman web, type 'int'
Returns:
list: daftar item dalam bentuk 'dict'
"""
raw = self.session.get(f"{self.host}/page/{page}",
params={"s": query})
soup = self.soup(raw)
result = []
if (los := soup.find(class_="los")):
for article in los.findAll("article"):
a = article.find("a")
r = {
"id": self.getPath(a["href"]),
"title": a["alt"]
}
for k in ("quality", "eps"):
if (i := article.find(class_=k)):
r[k] = i.text
for ip in ("genre", "name"):
if (i := article.findAll(itemprop=ip)):
r[ip] = [a.text for a in i]
result.append(r)
return result
``` |
{
"source": "johnsears/monopoly-deal",
"score": 3
} |
#### File: src/monopoly_deal/actions.py
```python
from itertools import combinations
from typing import List, Union
from monopoly_deal.cards import *
from monopoly_deal.game import *
MAX_CARDS_IN_HAND = 7
class Action:
respondable = False
class EndTurn(Action):
pass
class NoResponse(Action):
pass
class Charge(Action):
def __init__(self, charge_player: Player, amount: int):
self.target_player = charge_player
self.amount = amount
self.respondable = True
def __repr__(self):
return f'<Charge ${self.amount} to player {self.target_player.index}>'
class StealCard(Action):
def __init__(self, steal_from_player: Player, steal_card: PropertyCard):
self.target_player = steal_from_player
self.steal_card = steal_card
self.respondable = True
def __repr__(self):
return f'<StealCard {self.steal_card}>>'
class StealSet(Action):
def __init__(self, steal_from_player: Player, steal_set: PropertySet):
self.target_player = steal_from_player
self.steal_set = steal_set
self.respondable = True
def __repr__(self):
return f'<StealSet {self.steal_set.color} from player {self.target_player.index}>'
class Swap(Action):
def __init__(self, steal_from_player: Player, steal_card: PropertyCard, give_card: PropertyCard):
self.target_player = steal_from_player
self.steal_card = steal_card
self.give_card = give_card
self.respondable = True
def __repr__(self):
return f'<Swap {self.steal_card} for {self.give_card}>'
class Pay(Action):
def __init__(self, pay_to_player: Player, cash_cards: Tuple[Card], property_cards: Tuple[PropertyCard]):
self.target_player = pay_to_player
self.cash_cards = cash_cards
self.property_cards = property_cards
self.respondable = False
def get_amount(self):
return Board.get_value_of_cards(cards=self.cash_cards + self.property_cards)
def __repr__(self):
return f'<Pay {self.cash_cards} + {self.property_cards} to {self.target_player.index}'
class Discard(Action):
def __init__(self, discard_cards: Tuple[Card]):
self.discard_cards = discard_cards
self.respondable = False
class ChangeColor(Action):
def __init__(self, property_card: PropertyCard, to_color: Color):
self.property_card = property_card
self.to_color = to_color
self.respondable = False
def __repr__(self):
return f'<ChangeColor {self.property_card} to {self.to_color}>'
class Draw(Action):
def __init__(self, num_to_draw: int):
self.respondable = False
self.num_to_draw = num_to_draw
class SayNo(Action):
def __init__(self, to_player: Player):
self.respondable = True
self.target_player = to_player
class DoubleCharge(Action):
def __init__(self):
self.respondable = True
class PlayProperty(Action):
def __init__(self, property_card: PropertyCard, color: Color):
self.property_card = property_card
self.color = color
self.respondable = False
def __repr__(self):
return f'<PlayProperty {self.property_card.name}> as {self.color}>'
class PlayAsCash(Action):
def __init__(self, cash_card: Cashable):
self.cash_card = cash_card
self.respondable = False
def __repr__(self):
return f'<PlayAsCash {self.cash_card}>'
def get_available_actions(card: Card, players: Tuple[Player], current_player: Player):
available_actions = []
if isinstance(card, CashCard):
available_actions = [PlayAsCash(cash_card=card)]
elif isinstance(card, PropertyCard):
if card.buildable:
if Color.ALL not in card.colors:
for color in card.colors:
available_actions.append(PlayProperty(property_card=card, color=color))
else:
for color in Color:
if color != Color.ALL:
available_actions.append(PlayProperty(property_card=card, color=color))
else:
for property_set in current_player.board.get_complete_sets():
if property_set.can_add_house() and card.name == HOUSE:
available_actions.append(PlayProperty(property_card=card, color=property_set.color))
elif property_set.can_add_hotel() and card.name == HOTEL:
available_actions.append(PlayProperty(property_card=card, color=property_set.color))
elif isinstance(card, RentCard):
available_actions = [PlayAsCash(cash_card=card)]
matching_property_sets = current_player.board.get_property_sets_matching_colors(colors=card.colors)
if len(matching_property_sets) > 0:
best_rent = max([pset.get_rent_due() for pset in matching_property_sets])
for opposing_player in players:
if opposing_player != current_player:
available_actions.append(Charge(charge_player=opposing_player, amount=best_rent))
elif isinstance(card, ActionCard):
available_actions = [PlayAsCash(cash_card=card)]
if card.action_type == ActionType.PASS_GO:
available_actions.append(Draw(2))
else:
for opposing_player in players:
if opposing_player != current_player:
if card.action_type == ActionType.BDAY:
available_actions.append(Charge(charge_player=opposing_player, amount=2))
elif card.action_type == ActionType.DEAL_BREAKER:
for complete_set in opposing_player.board.get_complete_sets():
available_actions.append(StealSet(steal_from_player=opposing_player, steal_set=complete_set))
elif card.action_type == ActionType.DEBT_COLLECTOR:
available_actions.append(Charge(charge_player=opposing_player, amount=5))
elif card.action_type in (ActionType.SLY_DEAL, ActionType.FORCED_DEAL):
for property_set in opposing_player.board.property_sets:
if not property_set.is_complete():
for property_card in property_set.cards:
if card.action_type == ActionType.SLY_DEAL:
available_actions.append(
StealCard(steal_from_player=opposing_player, steal_card=property_card)
)
else:
for own_property_set in current_player.board.property_sets:
if not own_property_set.is_complete():
for own_property_card in own_property_set.cards:
available_actions.append(
Swap(
steal_from_player=opposing_player,
steal_card=property_card,
give_card=own_property_card
)
)
return available_actions
def get_available_responses(player: Player, actions: List[Tuple[Player, Card, Action]]):
available_responses = {None: [NoResponse()]}
current_hand = player.hand
for current_player, card_to_play, action in actions:
if player.index == current_player.index and card_to_play is not None:
current_hand = current_hand.play_card(card_to_play)
opposing_player, card_to_play, action = actions[-1]
if isinstance(action, Charge):
available_payment_options = []
# If wipes them out, give everything
if action.amount >= player.board.get_total_value():
pay = Pay(
pay_to_player=opposing_player,
cash_cards=player.board.cash_cards,
property_cards=player.board.get_all_property_cards()
)
available_payment_options.append(pay)
else:
# Try paying with largest bills first
cash_cards_to_pay = player.board.find_cash_to_pay_bill_up_to_amount(bill_amount=action.amount)
cash_value = sum([card.value for card in cash_cards_to_pay])
if cash_value == action.amount:
pay = Pay(pay_to_player=opposing_player, cash_cards=tuple(cash_cards_to_pay), property_cards=tuple())
available_payment_options.append(pay)
else:
payment_options = player.board.find_additional_cards_to_pay_bill(
bill_amount=action.amount,
cards_in_payment=cash_cards_to_pay
)
for payment_set in payment_options:
cash_cards: Tuple[Cashable] = tuple([card for card in payment_set if isinstance(card, Cashable)])
property_cards = tuple([card for card in payment_set if isinstance(card, PropertyCard)])
pay = Pay(pay_to_player=opposing_player, cash_cards=cash_cards, property_cards=property_cards)
available_payment_options.append(pay)
if available_payment_options:
available_responses[None] = available_payment_options
for card in current_hand.cards_in_hand:
if isinstance(card, ActionCard):
if card.action_type == ActionType.JUST_SAY_NO:
available_responses[card] = [SayNo(to_player=opposing_player)]
break
return available_responses
def get_discard_options(player: Player):
num_needed_to_discard = len(player.hand.cards_in_hand) - MAX_CARDS_IN_HAND
potential_discards = combinations(player.hand.cards_in_hand, num_needed_to_discard)
return {None: [Discard(discard_cards=tuple(cards)) for cards in potential_discards]}
```
#### File: src/monopoly_deal/cards.py
```python
from enum import Enum
HOUSE = 'House'
HOTEL = 'Hotel'
class Card():
def __init__(self, index, value):
self.index = index
self.value = value
class Cashable(Card):
pass
class CashCard(Cashable):
def __repr__(self):
return f'<CashCard (${self.value})>'
class ActionCard(Cashable):
def __init__(self, index, value, action_type, description):
super().__init__(index, value)
self.action_type = action_type
self.description = description
def __repr__(self):
return f'<ActionCard: {self.action_type.value} (${self.value})>'
class PropertyCard(Card):
def __init__(self, index, value, name, colors, rent, buildable):
super().__init__(index, value)
self.name = name
self.colors = colors
self.rent = rent
self.buildable = buildable
def __repr__(self):
return f'<PropertyCard: {self.name} (${self.value})>'
class RentCard(Cashable):
def __init__(self, index, value, colors, wild):
super().__init__(index, value)
self.colors = colors # Set
self.wild = wild # Boolean - Targeting
def __repr__(self):
return f'<RentCard: {self.colors} (${self.value})>'
class ActionType(Enum):
BDAY = "It's my birthday!"
DOUBLE_THE_RENT = "Double the Rent"
DEAL_BREAKER = "Deal Breaker"
JUST_SAY_NO = "Just Say No!"
DEBT_COLLECTOR = "Debt Collector"
SLY_DEAL = "Sly Deal"
FORCED_DEAL = "Forced Deal"
PASS_GO = "Pass Go"
class Color(Enum):
RED = "red"
DBLUE = "darkblue"
LBLUE = "lightblue"
PURPLE = "purple"
GREEN = "green"
ORANGE = "orange"
YELLOW = "yellow"
BROWN = "brown"
RR = "railroad"
UTIL = "utility"
ALL = "all"
def __repr__(self):
return self.value
deck = {
1: PropertyCard(1, 3, HOUSE, {Color.ALL}, [], False),
2: PropertyCard(2, 3, HOUSE, {Color.ALL}, [], False),
3: PropertyCard(3, 3, HOUSE, {Color.ALL}, [], False),
4: PropertyCard(4, 4, HOTEL, {Color.ALL}, [], False),
5: PropertyCard(5, 4, HOTEL, {Color.ALL}, [], False),
6: PropertyCard(6, 4, HOTEL, {Color.ALL}, [], False),
7: ActionCard(7, 2, ActionType.BDAY, "All players give you $2M as a gift."),
8: ActionCard(8, 2, ActionType.BDAY, "All players give you $2M as a gift."),
9: ActionCard(9, 2, ActionType.BDAY, "All players give you $2M as a gift."),
10: ActionCard(10, 1, ActionType.DOUBLE_THE_RENT, "Needs to be played with a rent card."),
11: ActionCard(11, 1, ActionType.DOUBLE_THE_RENT, "Needs to be played with a rent card."),
12: ActionCard(12, 5, ActionType.DEAL_BREAKER, "Steal a complete set from any player (includes any buildings)"),
13: ActionCard(13, 5, ActionType.DEAL_BREAKER, "Steal a complete set from any player (includes any buildings)"),
14: ActionCard(14, 4, ActionType.JUST_SAY_NO, "Use any time when an action card is played against you."),
15: ActionCard(15, 4, ActionType.JUST_SAY_NO, "Use any time when an action card is played against you."),
16: ActionCard(16, 4, ActionType.JUST_SAY_NO, "Use any time when an action card is played against you."),
17: ActionCard(17, 3, ActionType.DEBT_COLLECTOR, "Force any player to pay you $5M"),
18: ActionCard(18, 3, ActionType.DEBT_COLLECTOR, "Force any player to pay you $5M"),
19: ActionCard(19, 3, ActionType.DEBT_COLLECTOR, "Force any player to pay you $5M"),
20: ActionCard(20, 3, ActionType.SLY_DEAL, "Steal a property from a player of your choice (cannot be a part of a full set)!"),
21: ActionCard(21, 3, ActionType.SLY_DEAL, "Steal a property from a player of your choice (cannot be a part of a full set)!"),
22: ActionCard(22, 3, ActionType.SLY_DEAL, "Steal a property from a player of your choice (cannot be a part of a full set)!"),
23: ActionCard(23, 3, ActionType.FORCED_DEAL, "Swap any property with another player (cannot be part of a full set)!"),
24: ActionCard(24, 3, ActionType.FORCED_DEAL, "Swap any property with another player (cannot be part of a full set)!"),
25: ActionCard(25, 3, ActionType.FORCED_DEAL, "Swap any property with another player (cannot be part of a full set)!"),
26: ActionCard(26, 3, ActionType.FORCED_DEAL, "Swap any property with another player (cannot be part of a full set)!"),
27: ActionCard(27, 1, ActionType.PASS_GO, "Draw two extra cards!"),
28: ActionCard(28, 1, ActionType.PASS_GO, "Draw two extra cards!"),
29: ActionCard(29, 1, ActionType.PASS_GO, "Draw two extra cards!"),
30: ActionCard(30, 1, ActionType.PASS_GO, "Draw two extra cards!"),
31: ActionCard(31, 1, ActionType.PASS_GO, "Draw two extra cards!"),
32: ActionCard(32, 1, ActionType.PASS_GO, "Draw two extra cards!"),
33: ActionCard(33, 1, ActionType.PASS_GO, "Draw two extra cards!"),
34: ActionCard(34, 1, ActionType.PASS_GO, "Draw two extra cards!"),
35: ActionCard(35, 1, ActionType.PASS_GO, "Draw two extra cards!"),
36: ActionCard(36, 1, ActionType.PASS_GO, "Draw two extra cards!"),
37: PropertyCard(37, 2, "Electric Company", {Color.UTIL}, [1, 2], True),
38: PropertyCard(38, 2, "Waterworks", {Color.UTIL}, [1, 2], True),
39: PropertyCard(39, 2, "Pennsylvania Railroad", {Color.RR}, [1, 2, 3, 4], True),
40: PropertyCard(40, 2, "Reading Railroad", {Color.RR}, [1, 2, 3, 4], True),
41: PropertyCard(41, 2, "B. & O. Railroad", {Color.RR}, [1, 2, 3, 4], True),
42: PropertyCard(42, 2, "Short Line Railroad", {Color.RR}, [1, 2, 3, 4], True),
43: PropertyCard(43, 1, "Baltic Avenue", {Color.BROWN}, [1, 2], True),
44: PropertyCard(44, 1, "Mediterranean Avenue", {Color.BROWN}, [1, 2], True),
45: PropertyCard(45, 1, "Oriental Avenue", {Color.LBLUE}, [1, 2, 3], True),
46: PropertyCard(46, 1, "Connecticut Avenue", {Color.LBLUE}, [1, 2, 3], True),
47: PropertyCard(47, 1, "Vermont Avenue", {Color.LBLUE}, [1, 2, 3], True),
48: PropertyCard(48, 2, "States Avenue", {Color.PURPLE}, [1, 2, 4], True),
49: PropertyCard(49, 2, "Virginia Avenue", {Color.PURPLE}, [1, 2, 4], True),
50: PropertyCard(50, 2, "St. Charles Place", {Color.PURPLE}, [1, 2, 4], True),
51: PropertyCard(51, 2, "St. James Place", {Color.ORANGE}, [1, 3, 5], True),
52: PropertyCard(52, 2, "Tennessee Avenue", {Color.ORANGE}, [1, 3, 5], True),
53: PropertyCard(53, 2, "New York Avenue", {Color.ORANGE}, [1, 3, 5], True),
54: PropertyCard(54, 3, "Indiana Avenue", {Color.RED}, [2, 3, 6], True),
55: PropertyCard(55, 3, "Illinois Avenue", {Color.RED}, [2, 3, 6], True),
56: PropertyCard(56, 3, "Kentucky Avenue", {Color.RED}, [2, 3, 6], True),
57: PropertyCard(57, 3, "Atlantic Avenue", {Color.YELLOW}, [2, 4, 6], True),
58: PropertyCard(58, 3, "<NAME>", {Color.YELLOW}, [2, 4, 6], True),
59: PropertyCard(59, 3, "Ventnor Avenue", {Color.YELLOW}, [2, 4, 6], True),
60: PropertyCard(60, 4, "Pennsylvania Avenue", {Color.GREEN}, [2, 4, 7], True),
61: PropertyCard(61, 4, "Pacific Avenue", {Color.GREEN}, [2, 4, 7], True),
62: PropertyCard(62, 4, "North Carolina Avenue", {Color.GREEN}, [2, 4, 7], True),
63: PropertyCard(63, 4, "Park Place", {Color.DBLUE}, [3, 8], True),
64: PropertyCard(64, 4, "Boardwalk", {Color.DBLUE}, [3, 8], True),
65: PropertyCard(65, 0, "Wild", {Color.ALL, Color.ALL}, [], True),
66: PropertyCard(66, 0, "Wild", {Color.ALL, Color.ALL}, [], True),
67: PropertyCard(67, 4, "Wild", {Color.RR, Color.LBLUE}, [], True),
68: PropertyCard(68, 2, "Wild", {Color.RR, Color.UTIL}, [], True),
69: PropertyCard(69, 4, "Wild", {Color.RR, Color.GREEN}, [], True),
70: PropertyCard(70, 4, "Wild", {Color.GREEN, Color.DBLUE}, [], True),
71: PropertyCard(71, 3, "Wild", {Color.YELLOW, Color.RED}, [], True),
72: PropertyCard(72, 3, "Wild", {Color.YELLOW, Color.RED}, [], True),
73: PropertyCard(73, 1, "Wild", {Color.LBLUE, Color.BROWN}, [], True),
74: PropertyCard(74, 2, "Wild", {Color.PURPLE, Color.ORANGE}, [], True),
75: PropertyCard(75, 2, "Wild", {Color.PURPLE, Color.ORANGE}, [], True),
76: RentCard(76, 1, {Color.BROWN, Color.LBLUE}, False),
77: RentCard(77, 1, {Color.BROWN, Color.LBLUE}, False),
78: RentCard(78, 1, {Color.RED, Color.YELLOW}, False),
79: RentCard(79, 1, {Color.RED, Color.YELLOW}, False),
80: RentCard(80, 1, {Color.GREEN, Color.DBLUE}, False),
81: RentCard(81, 1, {Color.GREEN, Color.DBLUE}, False),
82: RentCard(82, 1, {Color.RR, Color.UTIL}, False),
83: RentCard(83, 1, {Color.RR, Color.UTIL}, False),
84: RentCard(84, 1, {Color.PURPLE, Color.ORANGE}, False),
85: RentCard(85, 1, {Color.PURPLE, Color.ORANGE}, False),
86: RentCard(86, 3, {Color.ALL}, True),
87: RentCard(87, 3, {Color.ALL}, True),
88: RentCard(88, 3, {Color.ALL}, True),
89: CashCard(89, 1),
90: CashCard(90, 1),
91: CashCard(91, 1),
92: CashCard(92, 1),
93: CashCard(93, 1),
94: CashCard(94, 1),
95: CashCard(95, 2),
96: CashCard(96, 2),
97: CashCard(97, 2),
98: CashCard(98, 2),
99: CashCard(99, 2),
100: CashCard(100, 3),
101: CashCard(101, 3),
102: CashCard(102, 3),
103: CashCard(103, 4),
104: CashCard(104, 4),
105: CashCard(105, 4),
106: CashCard(106, 5),
107: CashCard(107, 5),
108: CashCard(108, 10)
}
property_set_rents = {
Color.UTIL: [1, 2],
Color.RR: [1, 2, 3, 4],
Color.BROWN: [1, 2],
Color.LBLUE: [1, 2, 3],
Color.PURPLE: [1, 2, 4],
Color.ORANGE: [1, 3, 5],
Color.RED: [2, 3, 6],
Color.YELLOW: [2, 4, 6],
Color.GREEN: [2, 4, 7],
Color.DBLUE: [3, 8]
}
```
#### File: src/monopoly_deal/mcts.py
```python
from mcts import mcts
from typing import List, Dict
from monopoly_deal.actions import Action
from monopoly_deal.agents import Agent, RandomAgent
from monopoly_deal.cards import Card
from monopoly_deal.game import Game, Player
from monopoly_deal.play import step
class State:
def __init__(self, ai_player: Player, player: Player, game: Game, actions: List, available_actions: Dict):
self.ai_player = ai_player
self.game = game
self.player = player
self.actions = actions
self.available_actions = available_actions
def getCurrentPlayer(self):
if self.player.index == self.ai_player.index:
return 1
else:
return -1
def getPossibleActions(self):
possible_actions = []
for card, list_of_actions in self.available_actions.items():
for action in list_of_actions:
possible_actions.append((card, action))
return possible_actions
def takeAction(self, action):
card, action = action
try:
player, game, actions, available_actions, is_over = step(
game=self.game,
actions=self.actions,
card_to_play=card,
action=action
)
except Exception:
import ipdb;
ipdb.set_trace()
return State(
ai_player=self.ai_player,
player=player,
game=game,
actions=actions,
available_actions=available_actions
)
def isTerminal(self):
return self.game.winner() is not None
def getReward(self):
winner = self.game.winner()
if winner.index == self.ai_player.index:
return 1
else:
return -1
class MCTSAgent(Agent):
def __init__(self, player: Player, time_limit: int):
self.player = player
self.mcts = mcts(timeLimit=time_limit)
def get_response(self, game: Game, actions: List[Action], available_responses: Dict[Card, List[Action]]):
card, action = self.mcts.search(initialState=State(ai_player=self.player, player=game.players[self.player.index], game=game, actions=actions, available_actions=available_responses))
return card, action
def get_discard_action(self, game: Game, actions: List[Action], discard_options: Dict[Card, List[Action]]):
card, action = self.mcts.search(
initialState=State(ai_player=self.player, player=game.players[self.player.index], game=game, actions=actions,
available_actions=discard_options))
return card, action
def get_action(self, game: Game, actions: List[Action], available_actions: Dict[Card, List[Action]]):
card, action = self.mcts.search(
initialState=State(ai_player=self.player, player=game.players[self.player.index], game=game, actions=actions,
available_actions=available_actions))
return card, action
``` |
{
"source": "johnseekins/api-v3",
"score": 3
} |
#### File: api/db/__init__.py
```python
import os
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
DATABASE_URL = os.environ["DATABASE_URL"].replace("postgres://", "postgresql://")
"""
From https://docs.sqlalchemy.org/en/14/core/pooling.html
Default pool/overflow size is 5/10, timeout 30 seconds
max_overflow=10 - the number of connections to allow in connection pool “overflow”, that is connections that can be opened above and beyond the pool_size setting, which defaults to five. this is only used with QueuePool.
pool_size=5 - the number of connections to keep open inside the connection pool. This used with QueuePool as well as SingletonThreadPool. With QueuePool, a pool_size setting of 0 indicates no limit; to disable pooling, set poolclass to NullPool instead.
pool_timeout=30 - number of seconds to wait before giving up on getting a connection from the pool. This is only used with QueuePool. This can be a float but is subject to the limitations of Python time functions which may not be reliable in the tens of milliseconds.
pool_recycle=-1 - this setting causes the pool to recycle connections after the given number of seconds has passed. It defaults to -1, or no timeout. For example, setting to 3600 means connections will be recycled after one hour. Note that MySQL in particular will disconnect automatically if no activity is detected on a connection for eight hours (although this is configurable with the MySQLDB connection itself and the server configuration as well).
"""
engine = create_engine(DATABASE_URL, pool_size=10, max_overflow=15, pool_timeout=30)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
``` |
{
"source": "johnseekins/openstates.org",
"score": 2
} |
#### File: openstates.org/dashboards/views.py
```python
import datetime
from collections import defaultdict, Counter
from django.contrib.auth.decorators import user_passes_test
from django.db.models import Count, F
from django.contrib.auth.models import User
from django.shortcuts import render
from allauth.socialaccount.models import SocialAccount
from profiles.models import Subscription, Notification, UsageReport, Profile, KEY_TIERS
from utils.common import abbr_to_jid, sessions_with_bills, states
from utils.orgs import get_chambers_from_abbr
from dashboards.models import DataQualityReport
from openstates.data.models import LegislativeSession
def dqr_listing(request):
state_dqr_data = {}
for state in states:
try:
session = sessions_with_bills(abbr_to_jid(state.abbr))[0]
except KeyError:
continue
dashboards = list(
DataQualityReport.objects.filter(session=session).order_by("chamber")
)
session_name = session.name
# if there are two, lower is first (b/c of ordering above), otherwise figure it out
if len(dashboards) == 2:
lower_dashboard, upper_dashboard = dashboards
elif len(dashboards) == 1:
if dashboards[0].chamber == "lower":
lower_dashboard = dashboards[0]
upper_dashboard = None
else:
upper_dashboard = dashboards[0]
lower_dashboard = None
state_dqr_data[state.abbr.lower()] = {
"state": state.name,
"session_name": session_name,
"lower_dashboard": lower_dashboard,
"upper_dashboard": upper_dashboard,
}
return render(
request, "dashboards/dqr_listing.html", {"state_dqr_data": state_dqr_data}
)
def dq_overview(request, state):
jid = abbr_to_jid(state)
all_sessions = sessions_with_bills(jid)
dashboards = []
session = "Dashboards Not Generated Yet"
if all_sessions:
session = all_sessions[0]
dashboards = DataQualityReport.objects.filter(session=session)
chambers = get_chambers_from_abbr(state)
context = {
"state": state,
"chambers": chambers,
"session": session,
"all_sessions": all_sessions,
"dashboards": dashboards,
}
return render(request, "dashboards/dqr_page.html", context)
def dq_overview_session(request, state, session):
jid = abbr_to_jid(state)
all_sessions = sessions_with_bills(jid)
session = LegislativeSession.objects.get(identifier=session, jurisdiction_id=jid)
dashboards = DataQualityReport.objects.filter(session=session)
chambers = get_chambers_from_abbr(state)
context = {
"state": state,
"chambers": chambers,
"session": session,
"all_sessions": all_sessions,
"dashboards": dashboards,
}
return render(request, "dashboards/dqr_page.html", context)
@user_passes_test(lambda u: u.is_superuser)
def user_overview(request):
bill_subscriptions = Subscription.objects.filter(bill_id__isnull=False).count()
query_subscriptions = Subscription.objects.exclude(query="").count()
users_by_day = list(
User.objects.extra(select={"day": "date(date_joined)"})
.values("day")
.annotate(Count("id"))
.order_by("day")
.filter(date_joined__gte="2020-01-01")
)
# get counts by each provider (ignore small % with multiple providers)
providers = list(
SocialAccount.objects.values(name=F("provider")).annotate(value=Count("id"))
)
# append the number of users that only have an OS-account
providers.append(
{
"name": "openstates",
"value": User.objects.exclude(
id__in=SocialAccount.objects.values("user_id")
).count(),
}
)
active_users = list(
User.objects.annotate(sub_count=Count("subscriptions"))
.values(
"id",
"profile__subscription_emails_html",
"profile__subscription_frequency",
"sub_count",
)
.filter(sub_count__gt=0)
)
# show what users prefer
frequencies = {"w": 0, "d": 0}
for user in active_users:
frequencies[user["profile__subscription_frequency"]] += 1
frequencies = [
{"name": "weekly", "value": frequencies["w"]},
{"name": "daily", "value": frequencies["d"]},
]
notifications_by_day = list(
Notification.objects.extra(select={"day": "date(sent)"})
.values("day")
.annotate(Count("id"))
.order_by("day")
)
context = {
"user_count": User.objects.count(),
"subscriber_count": len(active_users),
"bill_subscriptions": bill_subscriptions,
"query_subscriptions": query_subscriptions,
"users_by_day": users_by_day,
"providers": providers,
"notifications_by_day": notifications_by_day,
"email_frequencies": frequencies,
}
return render(request, "dashboards/users.html", {"context": context})
def _counter_to_chartdata(counter):
"""restructure data from a format like "date -> value -> num"
to "{date: date, value1: num1, value2: num2}"
for use in charts
"""
ret_data = []
for date, subcounter in counter.items():
cur_item = {"date": date}
for k, v in subcounter.items():
cur_item[k] = v
ret_data.append(cur_item)
return sorted(ret_data, key=lambda x: x["date"])
@user_passes_test(lambda u: u.is_superuser)
def api_overview(request):
endpoint_usage = defaultdict(lambda: defaultdict(int))
key_usage = defaultdict(lambda: defaultdict(int))
key_totals = Counter()
v2_key_totals = Counter()
v3_key_totals = Counter()
all_keys = set()
days = int(request.GET.get("days", 60))
since = datetime.datetime.today() - datetime.timedelta(days=days)
reports = list(
UsageReport.objects.filter(date__gte=since, calls__gt=0).select_related(
"profile__user"
)
)
for report in reports:
date = str(report.date)
key = f"{report.profile.api_key} - {report.profile.user.email}"
endpoint_usage[date][report.endpoint] += report.calls
key_usage[date][key] += report.calls
key_totals[key] += report.calls
if report.endpoint == "graphql":
v2_key_totals[key] += report.calls
elif report.endpoint == "v3":
v3_key_totals[key] += report.calls
all_keys.add(key)
context = {
"endpoint_usage": _counter_to_chartdata(endpoint_usage),
"key_usage": _counter_to_chartdata(key_usage),
"most_common": key_totals.most_common(),
"v2_totals": v2_key_totals,
"v3_totals": v3_key_totals,
"key_tiers": list(KEY_TIERS.values()),
"total_keys": Profile.objects.exclude(
api_tier__in=("inactive", "suspended")
).count(),
"active_keys": len(all_keys),
"days": days,
}
return render(request, "dashboards/api.html", {"context": context})
```
#### File: openstates.org/graphapi/core.py
```python
import datetime
import graphene
from django.db.models import Q, Prefetch
from openstates.data.models import (
Jurisdiction,
Organization,
Person,
Membership,
LegislativeSession,
RunPlan,
)
from utils.geo import coords_to_divisions
from .common import (
OCDBaseNode,
IdentifierNode,
NameNode,
LinkNode,
DjangoConnectionField,
CountableConnectionBase,
)
from .optimization import optimize
def _resolve_suborganizations(root_obj, field_name, classification=None):
"""resolve organizations by classification optionally using the prefetch cache"""
# special case filtering if organizations are prefetched
if classification and field_name in getattr(
root_obj, "_prefetched_objects_cache", []
):
if isinstance(classification, str):
return [
o
for o in root_obj._prefetched_objects_cache[field_name]
if o.classification == classification
]
elif isinstance(classification, (list, tuple)):
return [
o
for o in root_obj._prefetched_objects_cache[field_name]
if o.classification in classification
]
qs = getattr(root_obj, field_name).all()
if isinstance(classification, str):
qs = qs.filter(classification=classification)
elif isinstance(classification, (list, tuple)):
qs = qs.filter(classification__in=classification)
return qs
def _membership_filter(
qs, info, classification=None, prefix=None, current=False, coming_from_person=True
):
today = datetime.date.today().isoformat()
if current:
qs = qs.filter(
Q(start_date="") | Q(start_date__lte=today),
Q(end_date="") | Q(end_date__gte=today),
)
else:
qs = qs.filter(
Q(start_date__gte=today) | (Q(end_date__lte=today) & ~Q(end_date=""))
)
if classification:
qs = qs.filter(organization__classification__in=classification)
related = [".post", ".post.division"]
if coming_from_person:
related.append(".organization")
else:
related.append(".person")
# if we're getting a membership we're probably going to need org/post
qs = optimize(qs, info, None, related, prefix=prefix)
return qs
class OfficeNode(graphene.ObjectType):
classification = graphene.String()
address = graphene.String()
voice = graphene.String()
fax = graphene.String()
display_name = graphene.String()
class ContactDetailNode(graphene.ObjectType):
type = graphene.String()
value = graphene.String()
note = graphene.String()
label = graphene.String()
class OrganizationNode(OCDBaseNode):
name = graphene.String()
image = graphene.String()
# jurisdiction left out for now since traversing up can lead to query explosion
classification = graphene.String()
founding_date = graphene.String()
dissolution_date = graphene.String()
jurisdiction_id = graphene.String()
# self-referential relationship
parent = graphene.Field("graphapi.core.OrganizationNode")
children = DjangoConnectionField(
"graphapi.core.OrganizationConnection", classification=graphene.String()
)
current_memberships = graphene.List("graphapi.core.MembershipNode")
# related objects
identifiers = graphene.List(IdentifierNode)
other_names = graphene.List(NameNode)
links = graphene.List(LinkNode)
sources = graphene.List(LinkNode)
def resolve_children(
self, info, classification=None, first=None, last=None, before=None, after=None
):
return _resolve_suborganizations(self, "children", classification)
def resolve_current_memberships(self, info):
if hasattr(self, "current_memberships"):
return self.current_memberships
else:
return _membership_filter(
self.memberships, info, None, current=True, coming_from_person=False
)
class DivisionNode(OCDBaseNode):
name = graphene.String()
redirect = graphene.Field("graphapi.core.DivisionNode")
country = graphene.String()
class PostNode(OCDBaseNode):
label = graphene.String()
role = graphene.String()
division = graphene.Field(DivisionNode)
start_date = graphene.String()
end_date = graphene.String()
maximum_memberships = graphene.Int()
# organization excluded from this direction
class PersonNode(OCDBaseNode):
name = graphene.String()
sort_name = graphene.String()
family_name = graphene.String()
given_name = graphene.String()
image = graphene.String()
# not used: gender, summary, national_identity, biography
birth_date = graphene.String()
death_date = graphene.String()
primary_party = graphene.String()
email = graphene.String()
# related objects
identifiers = graphene.List(IdentifierNode)
other_names = graphene.List(NameNode)
links = graphene.List(LinkNode)
sources = graphene.List(LinkNode)
contact_details = graphene.List(ContactDetailNode)
offices = graphene.List(OfficeNode)
# special attributes
current_memberships = graphene.List(
"graphapi.core.MembershipNode", classification=graphene.List(graphene.String)
)
old_memberships = graphene.List(
"graphapi.core.MembershipNode", classification=graphene.List(graphene.String)
)
votes = DjangoConnectionField("graphapi.legislative.BillVoteConnection")
def resolve_identifiers(self, info):
return self.identifiers.all()
def resolve_other_names(self, info):
return self.other_names.all()
def resolve_links(self, info):
return self.links.all()
def resolve_sources(self, info):
return self.sources.all()
def resolve_contact_details(self, info):
contact_details = []
# contact detail shim for backwards compatibility
for office in self.offices.all():
for key in ("fax", "voice", "address"):
if value := getattr(office, key):
contact_details.append(
dict(value=value, type=key, note=office.display_name)
)
# email shim for backwards compatibility
if self.email:
contact_details.append(
dict(value=self.email, type="email", note="Capitol Office")
)
return contact_details
def resolve_offices(self, info):
return self.offices.all()
def resolve_current_memberships(self, info, classification=None):
if hasattr(self, "current_memberships"):
if classification:
return [
m
for m in self.current_memberships
if m.organization.classification in classification
]
return self.current_memberships
else:
return _membership_filter(
self.memberships, info, classification, current=True
)
def resolve_old_memberships(self, info, classification=None):
if hasattr(self, "old_memberships"):
if classification:
return [
m
for m in self.old_memberships
if m.organization.classification in classification
]
return self.old_memberships
else:
return _membership_filter(
self.memberships, info, classification, current=False
)
def resolve_votes(self, info):
return self.votes.all()
class MembershipNode(OCDBaseNode):
organization = graphene.Field(OrganizationNode)
person = graphene.Field(PersonNode)
person_name = graphene.String()
post = graphene.Field(PostNode)
# on_behalf_of (not used?)
label = graphene.String()
role = graphene.String()
start_date = graphene.String()
end_date = graphene.String()
class LegislativeSessionNode(graphene.ObjectType):
jurisdiction = graphene.Field("graphapi.core.JurisdictionNode")
identifier = graphene.String()
name = graphene.String()
classification = graphene.String()
start_date = graphene.String()
end_date = graphene.String()
class LegislativeSessionConnection(graphene.relay.Connection):
class Meta:
node = LegislativeSessionNode
class OrganizationConnection(CountableConnectionBase):
class Meta:
node = OrganizationNode
max_items = 100
class JurisdictionNode(graphene.ObjectType):
id = graphene.String()
name = graphene.String()
url = graphene.String()
classification = graphene.String()
feature_flags = graphene.List(graphene.String)
last_scraped_at = graphene.String()
legislative_sessions = DjangoConnectionField(LegislativeSessionConnection)
organizations = DjangoConnectionField(
OrganizationConnection, classification=graphene.List(graphene.String)
)
def resolve_legislative_sessions(
self, info, first=None, last=None, before=None, after=None
):
return self.legislative_sessions.all()
def resolve_organizations(
self, info, first=None, last=None, before=None, after=None, classification=None
):
return _resolve_suborganizations(self, "organizations", classification)
def resolve_last_scraped_at(self, info):
try:
return self.runs.filter(success=True).latest("end_time").end_time
except RunPlan.DoesNotExist:
return None
class JurisdictionConnection(graphene.relay.Connection):
class Meta:
node = JurisdictionNode
class PersonConnection(CountableConnectionBase):
class Meta:
node = PersonNode
max_items = 100
class CoreQuery:
jurisdictions = DjangoConnectionField(
JurisdictionConnection, classification=graphene.String()
)
jurisdiction = graphene.Field(
JurisdictionNode, id=graphene.String(), name=graphene.String()
)
people = DjangoConnectionField(
PersonConnection,
member_of=graphene.String(),
ever_member_of=graphene.String(),
district=graphene.String(),
division_id=graphene.String(),
name=graphene.String(),
updated_since=graphene.String(),
latitude=graphene.Float(),
longitude=graphene.Float(),
)
person = graphene.Field(PersonNode, id=graphene.ID())
organization = graphene.Field(OrganizationNode, id=graphene.ID())
def resolve_jurisdictions(
self,
info,
classification="state",
first=None,
last=None,
before=None,
after=None,
):
qs = Jurisdiction.objects.filter(classification=classification).order_by("name")
return optimize(
qs,
info,
[
(
".legislativeSessions",
Prefetch(
"legislative_sessions",
LegislativeSession.objects.all().order_by("start_date"),
),
),
".organizations",
".organizations.children",
],
)
def resolve_jurisdiction(self, info, id=None, name=None):
if id:
return Jurisdiction.objects.get(id=id)
if name:
return Jurisdiction.objects.get(name=name)
else:
raise ValueError("Jurisdiction requires id or name")
def resolve_people(
self,
info,
first=None,
last=None,
before=None,
after=None,
member_of=None,
ever_member_of=None,
district=None,
division_id=None,
name=None,
updated_since=None,
latitude=None,
longitude=None,
):
qs = Person.objects.all()
today = datetime.date.today()
if name:
qs = qs.filter(
Q(name__icontains=name) | Q(other_names__name__icontains=name)
)
if division_id:
qs = qs.filter(
Q(memberships__post__division_id=division_id),
Q(memberships__end_date="") | Q(memberships__end_date__gt=today),
)
if member_of:
qs = qs.member_of(member_of, post=district)
if ever_member_of:
qs = qs.member_of(ever_member_of, current_only=False, post=district)
if updated_since:
qs = qs.filter(updated_at__gte=updated_since)
if district and not (member_of or ever_member_of):
raise ValueError(
"'district' parameter requires specifying either "
"'memberOf' or 'everMemberOf'"
)
if latitude and longitude:
try:
latitude = float(latitude)
longitude = float(longitude)
except ValueError:
raise ValueError("invalid lat or lon")
divisions = coords_to_divisions(latitude, longitude)
qs = qs.filter(
Q(memberships__post__division__id__in=divisions),
Q(memberships__end_date="") | Q(memberships__end_date__gt=today),
)
elif latitude or longitude:
raise ValueError("must provide lat & lon together")
qs = optimize(
qs,
info,
[
".identifiers",
".otherNames",
".links",
".sources",
".offices",
(".contactDetails", Prefetch("offices")),
(
".currentMemberships",
Prefetch(
"memberships",
queryset=_membership_filter(
Membership.objects,
info,
prefix=".currentMemberships",
current=True,
),
to_attr="current_memberships",
),
),
(
".oldMemberships",
Prefetch(
"memberships",
queryset=_membership_filter(
Membership.objects,
info,
prefix=".oldMemberships",
current=False,
),
to_attr="old_memberships",
),
),
],
)
return qs
def resolve_person(self, info, id):
return Person.objects.get(pk=id)
def resolve_organization(self, info, id):
return optimize(Organization.objects, info, None, [".parent"]).get(pk=id)
```
#### File: graphapi/tests/test_core.py
```python
import pytest
from graphapi.schema import schema
from openstates.data.models import Organization, Person
from .utils import populate_db
@pytest.mark.django_db
def setup():
populate_db()
@pytest.mark.django_db
def test_jurisdictions(django_assert_num_queries):
with django_assert_num_queries(2):
result = schema.execute(
""" {
jurisdictions {
edges {
node {
name
}
}
}
}
"""
)
assert result.errors is None
assert result.data["jurisdictions"]["edges"][0]["node"]["name"] == "Alaska"
assert result.data["jurisdictions"]["edges"][1]["node"]["name"] == "Wyoming"
@pytest.mark.django_db
def test_jurisdictions_num_queries(django_assert_num_queries):
with django_assert_num_queries(4):
result = schema.execute(
""" {
jurisdictions {
edges {
node {
name
legislativeSessions {
edges { node { identifier } }
}
organizations(first: 50) {
edges { node { name } }
}
}
}
}
}
"""
)
assert result.errors is None
assert (
len(
result.data["jurisdictions"]["edges"][0]["node"]["legislativeSessions"][
"edges"
]
)
== 2
)
assert (
len(result.data["jurisdictions"]["edges"][0]["node"]["organizations"]["edges"])
== 3
)
@pytest.mark.django_db
def test_jurisdictions_num_queries_subquery(django_assert_num_queries):
# same as test_jurisdictions_num_queries but with slightly more complex filtering on nodes
with django_assert_num_queries(4):
result = schema.execute(
""" {
jurisdictions {
edges {
node {
name
legislativeSessions(first: 1) {
edges { node { identifier } }
}
organizations(classification: "legislature", first: 50) {
edges { node { name } }
}
}
}
}
}
"""
)
assert result.errors is None
assert (
len(
result.data["jurisdictions"]["edges"][0]["node"]["legislativeSessions"][
"edges"
]
)
== 1
)
assert (
len(result.data["jurisdictions"]["edges"][0]["node"]["organizations"]["edges"])
== 1
)
@pytest.mark.django_db
def test_jurisdiction_by_id(django_assert_num_queries):
with django_assert_num_queries(5):
result = schema.execute(
""" {
jurisdiction(id:"ocd-jurisdiction/country:us/state:wy/government") {
name
legislativeSessions(first: 1) {
edges { node { identifier } }
}
organizations(classification: "legislature", first: 50) {
edges { node { name } }
}
}
}
"""
)
assert result.errors is None
assert len(result.data["jurisdiction"]["legislativeSessions"]["edges"]) == 1
assert len(result.data["jurisdiction"]["organizations"]["edges"]) == 1
@pytest.mark.django_db
def test_jurisdiction_by_name(django_assert_num_queries):
with django_assert_num_queries(5):
result = schema.execute(
""" {
jurisdiction(name:"Wyoming") {
name
legislativeSessions(first: 1) {
edges { node { identifier } }
}
organizations(classification: "legislature", first: 50) {
edges { node { name } }
}
}
}
"""
)
assert result.errors is None
assert len(result.data["jurisdiction"]["legislativeSessions"]["edges"]) == 1
assert len(result.data["jurisdiction"]["organizations"]["edges"]) == 1
@pytest.mark.django_db
def test_jurisdiction_chambers_current_members(django_assert_num_queries):
with django_assert_num_queries(5):
result = schema.execute(
""" {
jurisdiction(name:"Wyoming") {
chambers: organizations(classification:["upper", "lower"], first:2)
{ edges { node {
name
currentMemberships {
person { name }
}
} }
}
}
}
"""
)
assert result.errors is None
assert len(result.data["jurisdiction"]["chambers"]["edges"]) == 2
assert set(("Wyoming House", "Wyoming Senate")) == set(
edge["node"]["name"]
for edge in result.data["jurisdiction"]["chambers"]["edges"]
)
people = []
for chamber in result.data["jurisdiction"]["chambers"]["edges"]:
for m in chamber["node"]["currentMemberships"]:
people.append(m["person"]["name"])
assert len(people) == 2
@pytest.mark.django_db
def test_people_by_member_of(django_assert_num_queries):
ak_house = Organization.objects.get(
jurisdiction__name="Alaska", classification="lower"
)
with django_assert_num_queries(2):
result = schema.execute(
""" {
people(memberOf: "%s", first: 50) {
edges {
node {
name
}
}
}
}
"""
% ak_house.id
)
assert result.errors is None
assert len(result.data["people"]["edges"]) == 4
@pytest.mark.django_db
def test_variable_people_by_member_of(django_assert_num_queries):
ak_house = Organization.objects.get(
jurisdiction__name="Alaska", classification="lower"
)
with django_assert_num_queries(2):
result = schema.execute(
"""
query peeps($f: Int){
people(memberOf: "%s", first: $f) {
edges {
node {
name
}
}
}
}
"""
% ak_house.id,
variable_values={"f": 3},
)
assert result.errors is None
assert len(result.data["people"]["edges"]) == 3
@pytest.mark.django_db
def test_people_by_ever_member_of(django_assert_num_queries):
ak_house = Organization.objects.get(
jurisdiction__name="Alaska", classification="lower"
)
with django_assert_num_queries(2):
result = schema.execute(
""" {
people(everMemberOf: "%s", first:50) {
edges {
node {
name
}
}
}
}
"""
% ak_house.id
)
assert result.errors is None
# one extra person (<NAME>) is added as a former member of the House
assert len(result.data["people"]["edges"]) == 5
@pytest.mark.django_db
def test_people_by_district():
ak_house = Organization.objects.get(
jurisdiction__name="Alaska", classification="lower"
)
result = schema.execute(
""" {
ones: people(memberOf: "%s", district: "1", first: 50) {
edges { node { name } }
}
fives: people(everMemberOf: "%s", district: "5", first: 50) {
edges { node { name } }
}
bad: people(district: "1", first: 50) {
edges { node { name } }
}
}
"""
% (ak_house.id, ak_house.id)
)
assert "'district' parameter requires" in result.errors[0].message
assert len(result.data["ones"]["edges"]) == 1
assert len(result.data["fives"]["edges"]) == 1
assert result.data["bad"] is None
@pytest.mark.django_db
def test_people_by_division_id():
# Note: uses a fake divisionId that has two reps (one retired), only one should be returned
result = schema.execute(
""" {
people(divisionId: "ocd-division/country:us/state:ak/sldu:b", first: 50) {
edges { node { name } }
}
}
"""
)
assert len(result.data["people"]["edges"]) == 1
@pytest.mark.django_db
def test_people_by_name():
result = schema.execute(
""" {
people(name: "Hank", first: 50) {
edges { node { name } }
}
}
"""
)
assert result.errors is None
assert len(result.data["people"]["edges"]) == 1
@pytest.mark.django_db
def test_people_by_party():
result = schema.execute(
""" {
dems: people(memberOf: "Democratic", first: 50) {
edges { node { name } }
}
reps: people(memberOf: "Republican", first: 50) {
edges { node { name } }
}
}
"""
)
assert result.errors is None
assert len(result.data["dems"]["edges"]) == 3
assert len(result.data["reps"]["edges"]) == 4
# @pytest.mark.django_db
# def test_people_by_location():
# # TODO: need data to test with
# pass
@pytest.mark.django_db
def test_people_num_queries(django_assert_num_queries):
with django_assert_num_queries(8):
result = schema.execute(
""" {
people(first: 50) {
edges {
node {
name
image
identifiers { identifier }
otherNames { name }
links { url }
sources { url }
contactDetails { value label }
offices { classification displayName voice fax address }
currentMemberships {
post {
label
division {
id
}
}
organization { name }
}
}
}
}
}"""
)
assert result.errors is None
assert len(result.data["people"]["edges"]) == 9
total_memberships = 0
for person in result.data["people"]["edges"]:
total_memberships += len(person["node"]["currentMemberships"])
assert total_memberships == 16 # 8 chambers + 8 parties
@pytest.mark.django_db
def test_people_total_count(django_assert_num_queries):
with django_assert_num_queries(2):
result = schema.execute(
""" {
people(first: 50) {
totalCount
edges {
node {
name
}
}
}
}"""
)
assert result.errors is None
assert result.data["people"]["totalCount"] == 9
assert len(result.data["people"]["edges"]) == 9
with django_assert_num_queries(2):
result = schema.execute(
""" {
people(first: 50, name: "Amanda") {
totalCount
edges {
node {
name
}
}
}
}"""
)
assert result.errors is None
assert result.data["people"]["totalCount"] == 1
assert len(result.data["people"]["edges"]) == 1
@pytest.mark.django_db
def test_people_current_memberships_classification(django_assert_num_queries):
with django_assert_num_queries(3):
result = schema.execute(
""" {
people(first: 50) {
edges {
node {
currentMemberships(classification: "party") {
organization { name }
}
}
}
}
}"""
)
assert result.errors is None
total_memberships = 0
for person in result.data["people"]["edges"]:
total_memberships += len(person["node"]["currentMemberships"])
assert total_memberships == 8 # Only the 8 parties should be returned
@pytest.mark.django_db
def test_people_old_memberships(django_assert_num_queries):
with django_assert_num_queries(3):
result = schema.execute(
"""{
people(first: 50) {
edges {
node {
oldMemberships {
organization { name }
}
}
}
}
}"""
)
assert result.errors is None
old_memberships = 0
for person in result.data["people"]["edges"]:
old_memberships += len(person["node"]["oldMemberships"])
assert old_memberships == 3 # three old memberships in test data right now
@pytest.mark.django_db
def test_person_by_id(django_assert_num_queries):
person = Person.objects.get(name="<NAME>")
with django_assert_num_queries(7):
result = schema.execute(
""" {
person(id:"%s") {
name
image
primaryParty
email
identifiers { identifier }
otherNames { name }
links { url }
sources { url }
offices { classification displayName voice fax address }
currentMemberships {
post {
label
division {
id
}
}
organization { name }
}
}
}"""
% person.id
)
assert result.errors is None
assert result.data["person"]["name"] == "<NAME>"
assert result.data["person"]["primaryParty"] == "Republican"
assert len(result.data["person"]["currentMemberships"]) == 2
division = None
for membership in result.data["person"]["currentMemberships"]:
if membership["post"]:
division = membership["post"]["division"]
break
assert division["id"] == "ocd-division/country:us/state:ak/sldl:2"
@pytest.mark.django_db
def test_person_email_shim(django_assert_num_queries):
# email used to be available in contact_details, make sure they can still find it there
person = Person.objects.get(name="<NAME>")
person.email = "<EMAIL>"
person.save()
result = schema.execute(
""" {
person(id:"%s") {
name
email
contactDetails { value note type }
}
}"""
% person.id
)
assert result.errors is None
assert result.data["person"]["name"] == "<NAME>"
assert result.data["person"]["email"] == "<EMAIL>"
assert result.data["person"]["contactDetails"][0] == {
"value": "<EMAIL>",
"type": "email",
"note": "Capitol Office",
}
@pytest.mark.django_db
def test_person_contact_details_shim(django_assert_num_queries):
# make sure contactDetails populates properly from office data
person = Person.objects.get(name="<NAME>")
person.offices.create(
classification="district",
voice="123-456-7890",
address="123 Boogie Woogie Ave",
)
result = schema.execute(
""" {
person(id:"%s") {
name
email
contactDetails { value note type }
}
}"""
% person.id
)
assert result.errors is None
assert result.data["person"]["name"] == "<NAME>"
assert result.data["person"]["contactDetails"] == [
{"value": "123-456-7890", "type": "voice", "note": "District Office"},
{
"value": "123 Boogie Woogie Ave",
"type": "address",
"note": "District Office",
},
]
@pytest.mark.django_db
def test_organization_by_id(django_assert_num_queries):
# get targets
leg = Organization.objects.get(
jurisdiction__name="Wyoming", classification="legislature"
)
sen = Organization.objects.get(jurisdiction__name="Wyoming", classification="upper")
# 1 query for legislature, 1 query each for children
# 1 query for senate w/ parent
with django_assert_num_queries(4):
result = schema.execute(
""" {
leg: organization(id: "%s") {
name
classification
children(classification: "upper", first: 50) {
edges { node { classification } }
}
links { url }
sources { url }
}
senate: organization(id: "%s") {
name
parent {
name
}
}
}
"""
% (leg.id, sen.id)
)
assert result.errors is None
assert len(result.data["leg"]["children"]["edges"]) == 1
assert result.data["senate"]["parent"]["name"] == "Wyoming Legislature"
@pytest.mark.django_db
def test_people_by_updated_since():
middle_date = Person.objects.all().order_by("updated_at")[2].updated_at
result = schema.execute(
"""{
all: people(updatedSince: "2017-01-01T00:00:00Z", last:50) {
edges { node { name } }
}
some: people(updatedSince: "%s", first:50) {
edges { node { name } }
}
none: people(updatedSince: "2030-01-01T00:00:00Z", first:50) {
edges { node { name } }
}
}"""
% middle_date
)
assert result.errors is None
assert len(result.data["all"]["edges"]) == 9
assert len(result.data["some"]["edges"]) == 7
assert len(result.data["none"]["edges"]) == 0
@pytest.mark.django_db
def test_jurisdiction_fragment(django_assert_num_queries):
with django_assert_num_queries(3):
result = schema.execute(
"""
fragment JurisdictionFields on JurisdictionNode {
id
name
url
legislativeSessions {
edges {
node {
name
startDate
endDate
classification
identifier
}
}
}
}
query jurisdictionsQuery {
jurisdictions {
edges {
node {
...JurisdictionFields
}
}
}
}"""
)
assert result.errors is None
```
#### File: openstates.org/people_admin/diff.py
```python
import typing
from dataclasses import dataclass
class DiffError(ValueError):
pass
@dataclass
class DiffItem:
action: str
key: str
param: typing.Optional[str]
def apply_diffs(obj, diff):
for item in diff:
obj = apply_diff_item(obj, item)
return obj
def get_subobj(obj: dict, key_pieces: typing.List[str]):
if key_pieces:
k = key_pieces[0]
if isinstance(obj, list):
k = int(k)
return get_subobj(obj[k], key_pieces[1:])
return obj
def apply_diff_item(obj, diff_item):
if isinstance(diff_item, dict):
diff_item = DiffItem(**diff_item)
else:
diff_item = DiffItem(*diff_item)
key_pieces = diff_item.key.split(".")
if diff_item.action == "set":
subobj = get_subobj(obj, key_pieces[:-1])
k = key_pieces[-1]
if isinstance(subobj, list):
k = int(k)
subobj[k] = diff_item.param
elif diff_item.action == "append":
try:
subobj = get_subobj(obj, key_pieces)
except KeyError:
# if the key doesn't exist, try to append to a new empty list
subobj = get_subobj(obj, key_pieces[:-1])
subobj[key_pieces[-1]] = []
subobj = subobj[key_pieces[-1]]
if not isinstance(subobj, list):
raise DiffError(f"cannot 'append' to non-list element for {diff_item}")
subobj.append(diff_item.param)
elif diff_item.action == "delete":
subobj = get_subobj(obj, key_pieces[:-1])
k = key_pieces[-1]
if isinstance(subobj, list):
k = int(k)
subobj.pop(k)
else:
raise ValueError(f"unknown action: {diff_item}")
return obj
```
#### File: people_admin/tests/test_diff.py
```python
import pytest
from ..diff import apply_diff_item
@pytest.mark.parametrize(
"start,patch,output",
[
# set new value
({}, ["set", "a", 123], {"a": 123}),
({}, ["set", "a", {"b": "c"}], {"a": {"b": "c"}}),
# replace value
({"a": 4}, ["set", "a", 123], {"a": 123}),
({"a": 4}, ["set", "a", {"b": "c"}], {"a": {"b": "c"}}),
# set multiple levels deep
({"a": {"b": 0}}, ["set", "a.b", 123], {"a": {"b": 123}}),
# set within a list
({"a": [1, 0, 3]}, ["set", "a.1", 2], {"a": [1, 2, 3]}),
(
{"a": [{"i": 1}, {"ii": 0}, {"iii": 3}]},
["set", "a.1.ii", 2],
{"a": [{"i": 1}, {"ii": 2}, {"iii": 3}]},
),
],
)
def test_set_item(start, patch, output):
assert apply_diff_item(start, patch) == output
@pytest.mark.parametrize(
"start,patch,output",
[
({"a": []}, ["append", "a", 123], {"a": [123]}),
({"a": {"b": [1, 2, 3]}}, ["append", "a.b", 4], {"a": {"b": [1, 2, 3, 4]}}),
(
{"a": [{"i": 1}, {"ii": 2}, {"iii": 3}]},
["append", "a", {"iv": 4}],
{"a": [{"i": 1}, {"ii": 2}, {"iii": 3}, {"iv": 4}]},
),
# append should create list if not present
({}, ["append", "a", 123], {"a": [123]}),
],
)
def test_append_item(start, patch, output):
assert apply_diff_item(start, patch) == output
@pytest.mark.parametrize(
"start,patch,output",
[
({"a": [1, 2, 3]}, ["delete", "a", None], {}),
({"a": [1, 2, 3]}, ["delete", "a.1", None], {"a": [1, 3]}),
({"a": {"b": {"c": "d"}}}, ["delete", "a.b.c", None], {"a": {"b": {}}}),
],
)
def test_delete_item(start, patch, output):
assert apply_diff_item(start, patch) == output
```
#### File: openstates.org/people_admin/unmatched.py
```python
import typing
from collections import defaultdict
from django.db.models import Count
from django.contrib.auth.models import User
from openstates.data.models import BillSponsorship, LegislativeSession, PersonVote
from utils.common import abbr_to_jid
from .models import UnmatchedName, NameStatus, DeltaSet, PersonDelta
def check_sponsorships(session: LegislativeSession) -> typing.Dict[str, int]:
unmatched = (
BillSponsorship.objects.filter(
bill__legislative_session=session,
person=None,
organization=None,
)
.values("name")
.annotate(count=Count("id"))
)
return {u["name"]: u["count"] for u in unmatched}
def check_votes(session: LegislativeSession) -> typing.Dict[str, int]:
unmatched = (
PersonVote.objects.filter(
vote_event__bill__legislative_session=session,
voter=None,
)
.values("voter_name")
.annotate(count=Count("id"))
)
return {u["voter_name"]: u["count"] for u in unmatched}
def update_unmatched(abbr: str, session: str) -> int:
session = LegislativeSession.objects.get(
jurisdiction_id=abbr_to_jid(abbr), identifier=session
)
missing_sponsorships = check_sponsorships(session)
missing_votes = check_votes(session)
all_names = set(missing_sponsorships) | set(missing_votes)
# delete rows that no longer exist
UnmatchedName.objects.filter(session=session).exclude(name__in=all_names).delete()
n = 0
for name in all_names:
UnmatchedName.objects.update_or_create(
session=session,
name=name,
# update numbers, but don't update status/match if it is already set
defaults=dict(
sponsorships_count=missing_sponsorships.get(name, 0),
votes_count=missing_votes.get(name, 0),
),
)
n += 1
return n
def unmatched_to_deltas(abbr: str) -> int:
bot_user, _ = User.objects.get_or_create(username="openstates-bot")
names = list(
UnmatchedName.objects.filter(
session__jurisdiction_id=abbr_to_jid(abbr),
status=NameStatus.MATCHED_PERSON,
matched_person_id__isnull=False,
)
)
# bail without any work if there aren't names to consider
if not names:
return 0
delta_set, created = DeltaSet.objects.get_or_create(
name=f"{abbr.upper()} legislator matching",
pr_url="",
created_by=bot_user,
)
delta_set.person_deltas.all().delete()
# build list of changes for each person
person_changes = defaultdict(list)
for name in names:
person_changes[name.matched_person_id].append(
["append", "other_names", {"name": name.name}]
)
for person_id, changes in person_changes.items():
PersonDelta.objects.create(
person_id=person_id, delta_set=delta_set, data_changes=changes
)
return len(person_changes)
```
#### File: openstates.org/people_admin/views.py
```python
import json
import us
from django.shortcuts import render, get_object_or_404
from django.db.models import Count
from openstates.data.models import LegislativeSession, Person
from utils.common import abbr_to_jid, sessions_with_bills, states
from django.views.decorators.http import require_http_methods
from django.views.decorators.cache import never_cache
from django.contrib.auth.decorators import user_passes_test
from django.http import JsonResponse
from .unmatched import unmatched_to_deltas
from people_admin.models import (
UnmatchedName,
NameStatus,
DeltaSet,
PersonDelta,
PersonRetirement,
NewPerson,
)
from people_admin.git import delta_set_to_pr
MATCHER_PERM = "people_admin.can_match_names"
EDIT_PERM = "people_admin.can_edit"
RETIRE_PERM = "people_admin.can_retire"
def person_data(person):
"""similar to utils.people.person_as_dict but customized for editable fields"""
extras = {}
identifier_types = ("twitter", "facebook", "instagram", "youtube")
for identifier in person.identifiers.all():
for itype in identifier_types:
if identifier.scheme == itype:
extras[itype] = identifier.identifier
for off in person.offices.all():
if off.fax:
extras[off.classification + "_fax"] = off.fax
if off.voice:
extras[off.classification + "_voice"] = off.voice
if off.address:
extras[off.classification + "_address"] = off.address
return {
"id": person.id,
"name": person.name,
"title": person.current_role["title"],
"district": person.current_role["district"],
"party": person.primary_party,
"image": person.image,
"email": person.email,
**extras,
}
@user_passes_test(lambda u: u.has_perm(MATCHER_PERM) or u.has_perm(EDIT_PERM))
def jurisdiction_list(request):
state_people_data = {}
unmatched_by_state = dict(
UnmatchedName.objects.filter(status="U")
.values_list("session__jurisdiction__name")
.annotate(number=Count("id"))
)
for state in states + [us.unitedstatesofamerica]:
jid = abbr_to_jid(state.abbr)
current_people = [
person_data(p)
for p in Person.objects.filter(
current_jurisdiction_id=jid, current_role__isnull=False
).prefetch_related("offices")
]
photoless = 0
phoneless = 0
addressless = 0
for person in current_people:
if "image" not in person or person["image"] == "":
photoless += 1
elif "capitol_voice" not in person and "district_voice" not in person:
phoneless += 1
elif "capitol_address" not in person and "district_address" not in person:
addressless += 1
jurisdiction = "United States" if state.abbr == "US" else state.name
state_people_data[state.abbr.lower()] = {
"state": jurisdiction,
"unmatched": unmatched_by_state.get(state.name, 0),
"missing_photo": photoless,
"missing_phone": phoneless,
"missing_address": addressless,
}
return render(
request,
"people_admin/jurisdiction_list.html",
{"state_people_data": state_people_data},
)
@never_cache
@user_passes_test(lambda u: u.has_perm(EDIT_PERM))
def people_list(request, state):
jid = abbr_to_jid(state)
current_people = [
person_data(p)
for p in Person.objects.filter(
current_jurisdiction_id=jid, current_role__isnull=False
)
.order_by("family_name", "name")
.prefetch_related("identifiers", "offices")
]
context = {
"current_people": current_people,
}
return render(request, "people_admin/person_list.html", {"context": context})
@never_cache
@user_passes_test(lambda u: u.has_perm(MATCHER_PERM))
def people_matcher(request, state, session=None):
jid = abbr_to_jid(state)
all_sessions = sessions_with_bills(jid)
if session:
session = get_object_or_404(
LegislativeSession, identifier=session, jurisdiction_id=jid
)
unmatched = UnmatchedName.objects.filter(
session_id=session, status="U"
).order_by("-sponsorships_count")
else:
unmatched = UnmatchedName.objects.filter(
session__jurisdiction__id=jid, status="U"
).order_by("-sponsorships_count")
state_sponsors = Person.objects.filter(current_jurisdiction_id=jid)
unmatched_total = unmatched.count()
context = {
"state": state,
"session": session,
"all_sessions": all_sessions,
"unmatched": unmatched,
"state_sponsors": state_sponsors,
"unmatched_total": unmatched_total,
}
return render(request, "people_admin/people_matcher.html", context)
@user_passes_test(lambda u: u.has_perm(MATCHER_PERM))
@require_http_methods(["POST"])
def apply_match(request):
form_data = json.load(request)["match_data"]
button = form_data["button"]
match_id = form_data["matchedId"]
unmatched_id = form_data["unmatchedId"]
unmatched_name = get_object_or_404(UnmatchedName, pk=unmatched_id)
if button == "Match":
unmatched_name.matched_person_id = match_id
unmatched_name.status = NameStatus.MATCHED_PERSON
elif button == "Source Error":
unmatched_name.status = NameStatus.SOURCE_ERROR
elif button == "Ignore":
unmatched_name.status = NameStatus.IGNORED
else:
unmatched_name.status = NameStatus.UNMATCHED
unmatched_name.save()
return JsonResponse({"status": "success"})
@user_passes_test(lambda u: u.has_perm(RETIRE_PERM))
@require_http_methods(["POST"])
def apply_retirement(request):
retirement = json.load(request)
name = retirement["name"]
delta = DeltaSet.objects.create(
name=f"retire {name}",
created_by=request.user,
)
PersonRetirement.objects.create(
delta_set=delta,
person_id=retirement["id"],
date=retirement["retirementDate"],
reason=retirement["reason"] or "",
is_dead=retirement["isDead"],
is_vacant=retirement["vacantSeat"],
)
return JsonResponse({"status": "success"})
@user_passes_test(lambda u: u.has_perm(EDIT_PERM))
def new_legislator(request, state):
context = {
"state": state,
}
return render(request, "people_admin/new_person.html", {"context": context})
@user_passes_test(lambda u: u.has_perm(EDIT_PERM))
@require_http_methods(["POST"])
def apply_new_legislator(request):
addition = json.load(request)
name = addition[person_data]["name"]
delta = DeltaSet.objects.create(
name=f"add {name}",
created_by=request.user,
)
NewPerson.objects.create(
name=name,
delta_set=delta,
state=addition["state"],
district=addition["district"],
chamber=addition["chamber"],
)
return JsonResponse({"status": "success"})
@user_passes_test(lambda u: u.has_perm(EDIT_PERM))
@require_http_methods(["POST"])
def apply_bulk_edits(request):
edits = json.load(request)
delta = DeltaSet.objects.create(
name=f"edit by {request.user}",
created_by=request.user,
)
for person in edits:
updates = []
for key in person:
if key != "id":
change = {"action": "set", "key": key, "param": person[key]}
updates.append(change)
PersonDelta.objects.create(
delta_set=delta,
person_id=person["id"],
data_changes=updates,
)
return JsonResponse({"status": "success"})
@never_cache
@user_passes_test(lambda u: u.has_perm(EDIT_PERM))
def create_delta_sets(request, state):
matches = unmatched_to_deltas(state)
name = f"{state.upper()} legislator matching"
delta = DeltaSet.objects.get(name=name, pr_status="N")
people_deltas = PersonDelta.objects.filter(delta_set_id=delta).order_by("person_id")
context = {
"people": people_deltas,
"matches": matches,
}
return render(request, "people_admin/deltasets.html", context)
@never_cache
@user_passes_test(lambda u: u.has_perm(EDIT_PERM))
@require_http_methods(["POST"])
def create_pr(request):
delta = json.load(request)["delta"]
ds = DeltaSet.objects.get(id=delta, pr_status="N")
print(f"creating {ds.id} | {ds.name} | {ds.created_by}")
ds.pr_url = delta_set_to_pr(ds)
ds.pr_status = "C"
ds.save()
return JsonResponse({"status": request})
```
#### File: openstates.org/utils/bills.py
```python
import re
from django.db.models import F
from django.contrib.postgres.search import SearchQuery
from openstates.data.models import Bill
from openstates.utils.transformers import fix_bill_id
from .common import abbr_to_jid
# decision was made in openstates/issues#193 to exclude these by default to not confuse users
EXCLUDED_CLASSIFICATIONS = ["proposed bill"]
def search_bills(
*,
sort,
bills=None,
query=None,
state=None,
chamber=None,
session=None,
sponsor=None,
sponsor_name=None,
classification=None,
exclude_classifications=None,
subjects=None,
status=None,
):
if bills is None:
bills = Bill.objects.all().select_related(
"legislative_session",
"legislative_session__jurisdiction",
)
if state:
jid = abbr_to_jid(state.lower())
bills = bills.filter(legislative_session__jurisdiction_id=jid)
if query:
if re.match(r"\w{1,3}\s*\d{1,5}", query):
bills = bills.filter(identifier__iexact=fix_bill_id(query))
else:
bills = bills.filter(
searchable__search_vector=SearchQuery(
query, search_type="websearch", config="english"
)
)
if chamber:
bills = bills.filter(from_organization__classification=chamber)
if session:
bills = bills.filter(legislative_session__identifier=session)
if sponsor:
bills = bills.filter(sponsorships__person_id=sponsor)
if sponsor_name:
bills = bills.filter(sponsorships__name=sponsor_name)
if classification:
bills = bills.filter(classification__contains=[classification])
elif exclude_classifications:
bills = bills.exclude(classification__contains=exclude_classifications)
if subjects:
bills = bills.filter(subject__overlap=subjects)
if not status:
status = []
if "passed-lower-chamber" in status:
bills = bills.filter(
actions__classification__contains=["passage"],
actions__organization__classification="lower",
)
elif "passed-upper-chamber" in status:
bills = bills.filter(
actions__classification__contains=["passage"],
actions__organization__classification="upper",
)
elif "signed" in status:
bills = bills.filter(actions__classification__contains=["executive-signature"])
if sort is None:
pass
elif sort == "-updated":
bills = bills.order_by("-updated_at")
elif sort == "first_action":
bills = bills.order_by(F("first_action_date").asc(nulls_last=True))
elif sort == "-first_action":
bills = bills.order_by(F("first_action_date").desc(nulls_last=True))
elif sort == "latest_action":
bills = bills.order_by(F("latest_action_date").asc(nulls_last=True))
else: # -latest_action, or not specified
bills = bills.order_by(F("latest_action_date").desc(nulls_last=True))
return bills
```
#### File: openstates.org/utils/orgs.py
```python
from .common import abbr_to_jid, pretty_url
from openstates.data.models import Organization
def get_chambers_from_abbr(abbr):
jid = abbr_to_jid(abbr)
orgs = list(
Organization.objects.filter(
jurisdiction_id=jid, classification__in=["upper", "lower", "legislature"]
)
)
if len(orgs) == 3:
orgs = [org for org in orgs if org.classification != "legislature"]
return orgs
def get_legislature_from_abbr(abbr):
legislature = Organization.objects.select_related("jurisdiction").get(
classification="legislature", jurisdiction_id=abbr_to_jid(abbr)
)
return legislature
def org_as_dict(org):
return {
"id": org.id,
"name": org.name,
"chamber": org.parent.classification,
"pretty_url": pretty_url(org),
"member_count": org.member_count,
}
```
#### File: openstates.org/utils/people.py
```python
from .common import pretty_url
def person_as_dict(person):
return {
"id": person.id,
"name": person.name,
"image": person.image,
"primary_party": person.primary_party,
"current_role": person.current_role,
"pretty_url": pretty_url(person),
}
``` |
{
"source": "JohnSell620/machine-learning-goodreads-reviews",
"score": 3
} |
#### File: scrapy/spiders/goodreads_spider.py
```python
from __future__ import print_function
import sys
from scrapy import Spider
from scrapy.selector import Selector
from scrapy.http import Request
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
from goodreads.items import GoodreadsItem
from HTMLParser import HTMLParser
from w3lib.html import remove_tags
import logging
logger = logging.getLogger(__name__)
logger.setLevel('DEBUG')
class GoodreadsSpider(Spider):
name = "goodreads"
allowed_domains = ['www.goodreads.com']
# crawler will scrape more genres than these listed
start_urls = [
'https://www.goodreads.com/genres/art',
'https://www.goodreads.com/genres/history',
'https://www.goodreads.com/genres/philosophy',
'https://www.goodreads.com/genres/religion',
'https://www.goodreads.com/genres/science',
]
def __init__(self):
self.driver = webdriver.Firefox()
def parse(self,response):
book_urls = Selector(response).xpath('//div[@class="leftContainer"]/\
div/div[@class="bigBoxBody"]/div/div/\
div[@class="leftAlignedImage bookBox"]/\
div[@class="coverWrapper"]/a/@href')
for book_url in book_urls:
book_url = book_url.extract()
url = "https://www.goodreads.com" + book_url
self.driver.get(url)
request = Request(url,callback=self.parse2)
request.meta['book_url'] = url
yield request
self.driver.close()
def parse2(self,response):
try:
timeout = WebDriverWait(self.driver,10)
except:
print("Timed out waiting for page load.")
self.driver.quit()
title = Selector(response).xpath(
'//div[@class="leftContainer"]/div/div/div/div/ \
a/img[@id="coverImage"]/@alt'
)
genre = Selector(response).xpath(
'//div[@class="rightContainer"]/div/div/ \
div[@class="bigBoxBody"]/div/div/div[@class="left"]/a/text()'
)
rating = Selector(response).xpath(
'//div[@class="leftContainer"]/div/div[@id="metacol"]/ \
div[@id="bookMeta"]/span/span[@class="average"]/text()'
)
reviews = Selector(response).xpath(
'//div[@id="bookReviews"]/ \
div[@class="friendReviews elementListBrown"]'
)
for review in reviews:
try:
item = GoodreadsItem()
item['title'] = title.extract()[0]
item['rating'] = rating.extract()[0]
item['book_url'] = response.meta['book_url']
item['genre'] = genre.extract()[0]
item['link_url'] = review.xpath(
'.//div/div/link/@href').extract()[0]
item['reviewDate'] = review.xpath(
'.//div/div/div/div/a/text()').extract()[0]
item['user'] = review.xpath(
'.//div/div/div/div/span/a/text()').extract()[0]
review_text = review.xpath('.//div/div/div/ \
div[@class="reviewText stacked"]/span/ \
span[2]/text()'
).extract()[0]
# remove html tags
item['review'] = remove_tags(review_text)
except IndexError as e:
print(e,": title: ",item['title'], "user: ",item['user'])
logger.error(e.args[0])
raise
yield item
``` |
{
"source": "Johnsel/litex-boards",
"score": 2
} |
#### File: litex_boards/platforms/digilent_cmod_a7.py
```python
from litex.build.generic_platform import Pins, Subsignal, IOStandard, Misc
from litex.build.xilinx import XilinxPlatform
from litex.build.openocd import OpenOCD
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("clk12", 0, Pins("L17"), IOStandard("LVCMOS33")),
# Buttons
("cpu_reset", 0, Pins("A18"), IOStandard("LVCMOS33")),
("user_btn", 0, Pins("B18"), IOStandard("LVCMOS33")),
# Leds
("user_led", 0, Pins("A17"), IOStandard("LVCMOS33")),
("user_led", 1, Pins("C16"), IOStandard("LVCMOS33")),
("rgb_led", 0,
Subsignal("r", Pins("C17")),
Subsignal("g", Pins("B16")),
Subsignal("b", Pins("B17")),
IOStandard("LVCMOS33"),
),
# Serial
("serial", 0,
Subsignal("tx", Pins("J18")),
Subsignal("rx", Pins("J17")),
IOStandard("LVCMOS33")),
# SRAM
("issiram", 0,
Subsignal("addr", Pins(
"M18 M19 K17 N17 P17 P18 R18 W19",
"U19 V19 W18 T17 T18 U17 U18 V16",
"W16 W17 V15"),
IOStandard("LVCMOS33")),
Subsignal("data", Pins(
"W15 W13 W14 U15 U16 V13 V14 U14"),
IOStandard("LVCMOS33")),
Subsignal("wen", Pins("R19"), IOStandard("LVCMOS33")),
Subsignal("cen", Pins("N19"), IOStandard("LVCMOS33")),
Misc("SLEW=FAST"),
),
# SPIFlash
("spiflash", 0,
Subsignal("cs_n", Pins("K19")),
Subsignal("clk", Pins("E19")),
Subsignal("mosi", Pins("D18")),
Subsignal("miso", Pins("D19")),
Subsignal("wp", Pins("G18")),
Subsignal("hold", Pins("F18")),
IOStandard("LVCMOS33"),
),
("spiflash4x", 0,
Subsignal("cs_n", Pins("K19")),
Subsignal("clk", Pins("E19")),
Subsignal("dq", Pins("D18 D19 G18 F18")),
IOStandard("LVCMOS33")
),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors = []
# Platform -----------------------------------------------------------------------------------------
class Platform(XilinxPlatform):
default_clk_name = "clk12"
default_clk_period = 1e9/12e6
def __init__(self, variant="a7-35", toolchain="vivado"):
device = {
"a7-35": "xc7a35tcpg236-1"
}[variant]
XilinxPlatform.__init__(self, device, _io, _connectors, toolchain=toolchain)
def create_programmer(self):
bscan_spi = "bscan_spi_xc7a15t.bit" if "xc7a15t" in self.device else "bscan_spi_xc7a35t.bit"
return OpenOCD("openocd_xc7_ft2232.cfg", bscan_spi)
def do_finalize(self,fragment):
XilinxPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk12", loose=True), self.default_clk_period)
``` |
{
"source": "johnseremba/RandomRoomAllocator",
"score": 4
} |
#### File: johnseremba/RandomRoomAllocator/main.py
```python
import cmd
import sys
from docopt import docopt, DocoptExit
from dojo_classes.Dojo import Dojo
dojo = Dojo()
def docopt_cmd(func):
"""
This decorator is used to simplify the try/except block and pass the result
of the docopt parsing to the called action.
"""
def fn(self, arg):
try:
opt = docopt(fn.__doc__, arg)
except DocoptExit as e:
# The DocoptExit is thrown when the args do not match.
# We print a message to the user and the usage block.
print('Invalid Command!')
print(e)
return
except SystemExit:
# The SystemExit exception prints the usage for --help
# We do not need to do the print here.
return
return func(self, opt)
fn.__name__ = func.__name__
fn.__doc__ = func.__doc__
fn.__dict__.update(func.__dict__)
return fn
class Main (cmd.Cmd):
intro = 'Welcome to the random room allocator!' \
+ ' (type help for a list of commands.)'
prompt = '(Dojo) '
file = None
@docopt_cmd
def do_create_room(self, arg):
"""Usage: create_room <room_type> <room_name> ..."""
room_type = arg['<room_type>']
room_name = arg['<room_name>']
dojo.create_room(room_type, room_name)
@docopt_cmd
def do_add_person(self, arg):
"""Usage: add_person <first_name> <last_name> <person_type> [<wants_accommodation>]"""
first_name = arg['<first_name>']
last_name = arg['<last_name>']
person_type = arg['<person_type>']
person_name = first_name + " " + last_name
wants_accommodation = arg['<wants_accommodation>']
if wants_accommodation is None:
wants_accommodation = "N"
dojo.add_person(person_name, person_type, wants_accommodation)
@docopt_cmd
def do_print_room(self, arg):
"""Usage: print_room <room_name>"""
room_name = arg['<room_name>'].strip()
dojo.print_room(room_name)
@docopt_cmd
def do_print_allocations(self, arg):
"""Usage: print_allocations [#]"""
file_name = arg['#']
dojo.print_allocations(file_name)
@docopt_cmd
def do_print_unallocated(self, arg):
"""Usage: print_unallocated [#]"""
file_name = arg['#']
dojo.print_unallocated(file_name)
@docopt_cmd
def do_reallocate_person(self, arg):
"""Usage: reallocate_person <person_identifier> <new_room_name>"""
person_id = arg['<person_identifier>']
room_name = arg['<new_room_name>']
dojo.reallocate_person(person_id, room_name)
@docopt_cmd
def do_load_people(self, arg):
"""Usage: load_people"""
dojo.load_people()
@docopt_cmd
def do_save_state(self, arg):
"""Usage: save_state [<database_name>]"""
db_name = arg['<database_name>']
dojo.save_state(db_name)
@docopt_cmd
def do_load_state(self, arg):
"""Usage: load_state <sqlite_database>"""
db_name = arg['<sqlite_database>']
dojo.load_state(db_name)
@docopt_cmd
def do_print_pretty_allocations(self, arg):
"""Usage: print_pretty_allocations"""
dojo.print_pretty_allocations()
@docopt_cmd
def do_print_all_data(self, arg):
"""Usage: print_all_data"""
dojo.print_all_data()
def do_quit(self, arg):
"""Quits out the Random room allocator."""
print('Good Bye!')
exit()
opt = docopt(__doc__, sys.argv[1:])
Main().cmdloop()
```
#### File: RandomRoomAllocator/tests/test_create_room.py
```python
import unittest
from dojo_classes.Dojo import Dojo
from dojo_classes.Room import LivingSpace, Office
class TestCreateRoom (unittest.TestCase):
def setUp(self):
self.dojo = Dojo()
def test_create_room_successfully(self):
new_room = self.dojo.create_room("office", "Blue")
new_room_count = len(new_room)
self.assertEqual(new_room_count, 1, msg="Total rooms should be 1")
def test_create_room_invalid_chars(self):
new_room = self.dojo.create_room("office", "Y3ll@!-=")
new_room_count = len(new_room)
self.assertEqual(new_room_count, 0, msg="Room name contains invalid chars shouldn't be created")
def test_invalid_room_type(self):
new_room = self.dojo.create_room("some_room_type", "Blue")
new_room_count = len(new_room)
self.assertEqual(new_room_count, 0, msg="Room shouldn't be created")
def test_create_many_rooms(self):
new_rooms = self.dojo.create_room("office", "Purple", "Black", "Brown")
new_room_count = len([office for office in new_rooms if isinstance(office, Office)])
self.assertEqual(new_room_count, 3, msg="Created offices should be 3")
def test_create_living_space(self):
new_room = self.dojo.create_room("living_space", "Ruby")
new_room_count = len([living_space for living_space in new_room if isinstance(living_space, LivingSpace)])
self.assertEqual(new_room_count, 1, msg="Total rooms should be 1")
def test_create_many_living_space(self):
new_rooms = self.dojo.create_room("living_space", "Ruby", "Shell", "Python")
new_room_count = len([living_space for living_space in new_rooms if isinstance(living_space, LivingSpace)])
self.assertEqual(new_room_count, 3, msg="Total living spaces should be 3")
def test_create_duplicate_rooms(self):
new_room_1 = self.dojo.create_room("office", "Blue")
new_room_2 = self.dojo.create_room("office", "Blue")
new_rooms = new_room_1 + new_room_2
new_room_count = len(new_rooms)
self.assertEqual(new_room_count, 1,
msg="Should not create two or more rooms with the same names")
``` |
{
"source": "johnsextro/saintsApp",
"score": 3
} |
#### File: saintsApp/SaintsServer/load.py
```python
import logging
import webapp2
from google.appengine.api import urlfetch
from google.appengine.api import memcache
from lxml import etree
import team
import time
from season_service import Season
from team import Team
from available_season import AvailableSeasons
class Load(webapp2.RequestHandler):
GAME_ID = 0
GAME_DATE = 1
GAME_TIME = 2
LOCATION = 3
HOME_TEAM = 4
AWAY_TEAM = 5
SCORE = 6
schoolNames = ["ICOM", "2Rivers", "A.S.H.", "AS", "ASH", "All Saints", "Assumption", "Borromeo", "HS", "HT", "Holy Rosary", "Holy Spirit", "Holy Trinity", "ICD", "ICOM", "IHM", "J and A", "JA", "JandA", "LWCS", "Living Word", "<NAME>", "SC", "SESR", "SESR <NAME>", "SH T", "SH Troy", "SJ", "SJ Cott", "Sacred Heart Troy", "St Cletus", "St Joe", "St <NAME>ottleville", "St Josephville", "St Patrick", "St Paul", "St Peter", "St Peters", "St Theodore", "St. Cletus", "St. Ignatius", "St. Joe", "St. Joe Cottleville", "St. <NAME>", "St. Joe Josephsville", "St. Patrick", "St. Paul", "St. Peter", "St. Rose", "St. Sabina", "St. Theo", "St. Theodore", "St.Joe Cottleville", "St.Patrick", "Sts J and A", "Sts JandA", "Sts. J and A", "Sts. J andA", "Sts. JandA", "St Joseph", "St. Joseph"]
def get(self):
start_time = time.time()
availSeasons = AvailableSeasons()
for s in availSeasons.getSeasons():
logging.info("Beginning data load for season %d" % s.season)
teamIds = self.get_team_ids(s.season)
stcharlesurl = "http://www.cycstcharles.com/schedule.php?team=%s&pfv=y&sort=date&month=999&year=999&season=%d"
for team_id in teamIds:
team_url = stcharlesurl % (team_id[1], s.season)
self.fetch_team_schedule(team_url, team_id)
logging.info("Finished loading schedule data. Elapsed time (in mins): " + str((time.time() - start_time)/60))
if memcache.flush_all():
logging.info("Flushed everything from memcache.")
else:
logging.error("Error trying to flush the memcache.")
t = Team()
seasons = []
for team in t.getSeasons():
season = Season(season=team.season)
if season not in seasons:
seasons.append(season)
if not memcache.add('seasons', seasons):
logging.error('memcache failed to set')
def fetch_team_schedule(self, team_url, team_id):
url = urlfetch.fetch(url=team_url, deadline=99)
if url.status_code == 200:
tree = etree.HTML(url.content)
# logging.info(url.content)
elements = tree.xpath('//table[@class="list"]//tr')
# logging.info(str(season[0].text.strip())
self.save_team_games(elements, team_id[1], team_id[0], self.get_season(tree), self.get_grade(tree))
def get_grade(self, tree):
grade = ''
gradeElement = tree.xpath('//table[@class="list"]//tr/td[@class="smalltext"][7]/select[@class="smalltext"]//option[@selected = "selected"]/../@label')
if (len(gradeElement) == 1):
grade = gradeElement[0].strip()
return grade
def get_season(self, tree):
season = ''
seasonElement = tree.xpath('//table/tr/td[1]/select//option[@selected = "selected"]')
if (len(seasonElement) == 1):
season = seasonElement[0].text.strip()
return season
def get_team_ids(self, seasonId):
teams = []
urlString = "http://www.cycstcharles.com/schedule.php?month=999&year=999&pfv=n&location=-1&leagueid=1&season=%d&conference=-1&division=-1&team=-1" % seasonId
url = urlfetch.fetch(url=urlString, deadline=99)
if url.status_code == 200:
tree = etree.HTML(url.content)
# elements = tree.xpath('//*[@id="maincontent"]/table[2]/tbody/tr/td[2]/div[3]/table/tbody/tr[3]/td/table/tbody/tr/td[7]//option')
elements = tree.xpath('//td[@class="smalltext"][7]/select[@class="smalltext"]//option')
for team_name in elements:
attribs = team_name.attrib
value = attribs["value"]
teams.append([team_name.text.strip(),value[value.find("&team=")+6:]])
return teams
def save_team_games(self, games, team_id, coach, season, grade):
# todo: Need to account for teams that already exist in the database
t = team.Team(key_name=str(team_id))
t.teamId = str(team_id)
for val in self.schoolNames:
if coach.find(val) > -1:
t.school = val
coach = coach[len(val)+1:]
coach = coach.strip()
t.coach = coach
logging.info("School = %s, Coach = %s" % (t.school, t.coach))
t.season = season
t.grade = grade
t.year = 2015
t.schedule = self.jsonify_games(games)
if t.school is not None and t.grade is not None:
t.put()
break
def jsonify_games(self, games):
gamelist = []
for rowindex in range(len(games)):
if len(games[rowindex])>3 and games[rowindex][1].text is not None and games[rowindex][2].text is not None:
try:
game = '{"game_date": "%s", "time": "%s", "home": "%s", "away": "%s", "location": "%s", "id": "%s", "score": "%s"}' % (games[rowindex][self.GAME_DATE].text, games[rowindex][self.GAME_TIME].text, games[rowindex][self.HOME_TEAM].text, games[rowindex][self.AWAY_TEAM].text, games[rowindex][self.LOCATION][0].text, games[rowindex][self.GAME_ID].text, games[rowindex][self.SCORE].text)
# {"games": [{"game_date": "4/1/2013", "time": "1:00 PM", "home": "St. J & A", "away": "ICD", location": "St. Joes"}]}
gamelist.append(game)
except IndexError, e:
logging.debug(e)
logging.debug(games[rowindex])
continue
return '{"games": [%s]}' % ", ".join(gamelist)
app = webapp2.WSGIApplication([('/crontask/scrape', Load)],debug=True)
if __name__ == '__main__':
run_wsgi_app(application)
# http://www.cycstcharles.com/schedule.php?season=32&conference=779&division=-1&team=-1&month=3&year=2013&pfv=y&sort=date
```
#### File: saintsApp/SaintsServer/schedule_service.py
```python
from protorpc import messages
from protorpc import remote
from protorpc.wsgi import service
from team import Team
import logging
package = 'SaintsSchedule'
# Create the request string containing the user's name
class ScheduleRequest(messages.Message):
team_id = messages.StringField(1, required=True)
# Create the response string
class ScheduleResponse(messages.Message):
schedule = messages.StringField(1, required=True)
# Create the RPC service to exchange messages
class ScheduleService(remote.Service):
@remote.method(ScheduleRequest, ScheduleResponse)
def schedule(self, request):
t = Team()
games = t.getGames(request.team_id)
return ScheduleResponse(schedule=games)
# Map the RPC service and path (/schedule)
app = service.service_mappings([('/schedule.*', ScheduleService)])
```
#### File: saintsApp/SaintsServer/season_service.py
```python
from protorpc import messages
from protorpc import remote
from protorpc.wsgi import service
from team import Team
from google.appengine.api import memcache
import logging
package = 'SaintsSchedule'
class Season(messages.Message):
season = messages.StringField(1, required=True)
class SeasonRequest(messages.Message):
seasonRequest = messages.StringField(1, required=False)
# Create the response string
class SeasonResponse(messages.Message):
seasons = messages.MessageField(Season, 1, repeated=True)
# Create the RPC service to exchange messages
class SeasonService(remote.Service):
@remote.method(SeasonRequest, SeasonResponse)
def season(self, request):
theCache = memcache.get('seasons')
if theCache is None:
t = Team()
seasons = []
for team in t.getSeasons():
season = Season(season=team.season)
if season not in seasons:
seasons.append(season)
if not memcache.add('seasons', seasons):
logging.error('memcache failed to set')
return SeasonResponse(seasons=seasons)
else:
return SeasonResponse(seasons=theCache)
# Map the RPC service and path (/schedule)
app = service.service_mappings([('/season.*', SeasonService)])
``` |
{
"source": "johnshajiang/pyssldemo",
"score": 3
} |
#### File: johnshajiang/pyssldemo/certs.py
```python
from enum import Enum, unique
from pyssldemo.params import KeyAlgos, SigAlgos, HashAlgos
class Cert(object):
def __init__(
self,
key_algo,
sig_algo,
hash_algo,
cert_name):
self.key_algo = key_algo
self.sig_algo = sig_algo
self.hash_algo = hash_algo
self.cert_name = cert_name
def __eq__(self, other):
return self.cert_name == other.cert_name
def __repr__(self):
return 'KeyAlgo: %s, SigAlgo: %s, HashAlgo: %s\nCert: %s' % (
self.key_algo, self.sig_algo, self.hash_algo, self.cert_name)
def __str__(self):
return 'KeyAlgo: %s, SigAlgo: %s, HashAlgo: %s' % (
self.key_algo, self.sig_algo, self.hash_algo)
@unique
class Certs(Enum):
CA_ECDSA_SECP256R1 = Cert(
KeyAlgos.EC,
SigAlgos.ECDSA,
HashAlgos.SHA256,
'CA_ECDSA_SECP256R1')
CA_ECDSA_SECP384R1 = Cert(
KeyAlgos.EC,
SigAlgos.ECDSA,
HashAlgos.SHA256,
'CA_ECDSA_SECP384R1')
CA_ECDSA_SECP521R1 = Cert(
KeyAlgos.EC,
SigAlgos.ECDSA,
HashAlgos.SHA256,
'CA_ECDSA_SECP521R1')
CA_RSA = Cert(
KeyAlgos.RSA,
SigAlgos.RSA,
HashAlgos.SHA256,
'CA_RSA')
SERVER_ECDSA_SECP256R1 = Cert(
KeyAlgos.EC,
SigAlgos.ECDSA,
HashAlgos.SHA256,
'SERVER_ECDSA_SECP256R1')
SERVER_ECDSA_SECP384R1 = Cert(
KeyAlgos.EC,
SigAlgos.ECDSA,
HashAlgos.SHA256,
'SERVER_ECDSA_SECP384R1')
SERVER_ECDSA_SECP521R1 = Cert(
KeyAlgos.EC,
SigAlgos.ECDSA,
HashAlgos.SHA256,
'SERVER_ECDSA_SECP521R1')
SERVER_RSA = Cert(
KeyAlgos.RSA,
SigAlgos.RSA,
HashAlgos.SHA256,
'SERVER_RSA')
CLIENT_ECDSA_SECP256R1 = Cert(
KeyAlgos.EC,
SigAlgos.ECDSA,
HashAlgos.SHA256,
'CLIENT_ECDSA_SECP256R1')
CLIENT_ECDSA_SECP384R1 = Cert(
KeyAlgos.EC,
SigAlgos.ECDSA,
HashAlgos.SHA256,
'CLIENT_ECDSA_SECP384R1')
CLIENT_ECDSA_SECP521R1 = Cert(
KeyAlgos.EC,
SigAlgos.ECDSA,
HashAlgos.SHA256,
'CLIENT_ECDSA_SECP521R1')
CLIENT_RSA = Cert(
KeyAlgos.RSA,
SigAlgos.RSA,
HashAlgos.SHA256,
'CLIENT_RSA')
class CertGroup(object):
def __init__(self, ca, server_cert, client_cert):
self.ca = ca
self.server_cert = server_cert
self.client_cert = client_cert
def __str__(self):
return f'CA: {self.ca}\nServer cert: {self.server_cert}\nClient cert:{self.client_cert}'
class CertGroups(Enum):
ECDSA_GROUP = CertGroup(
Certs.CA_ECDSA_SECP256R1,
Certs.SERVER_ECDSA_SECP256R1,
Certs.CLIENT_ECDSA_SECP256R1)
RSA_GROUP = CertGroup(
Certs.CA_RSA,
Certs.SERVER_RSA,
Certs.CLIENT_RSA)
```
#### File: pyssldemo/demo/resumption_demo.py
```python
import ssl
from pyssldemo import utils
from pyssldemo.client import Client
from pyssldemo.params import Protocols
from pyssldemo.server import Server, ServerThread
@utils.func_separator()
def run_case(context, port):
print(f'Protocol: {utils.tls_protocol(context.minimum_version).value.name}')
with Client(context) as _client1:
_client1.connect(port=port)
_session = _client1.get_session()
with Client(context=_client1.context, session=_session) as _client2:
_client2.connect(port=port)
if not _client2.is_session_resumed():
raise RuntimeWarning('Session is not resumed')
else:
print('Session was resumed')
if __name__ == '__main__':
print(ssl.OPENSSL_VERSION)
with ServerThread(Server()) as _s_thread:
_s_thread.start()
_port = _s_thread.server.get_port()
for _protocol in (Protocols.TLSV1_2, Protocols.TLSV1_3):
_context = utils.create_context(min_protocol=_protocol, max_protocol=_protocol)
run_case(_context, _port)
```
#### File: pyssldemo/demo/run_all_demos.py
```python
import os
import ssl
import subprocess
demo_names = (
'alpn_demo',
'basic_conn_demo',
'client_auth_demo',
'combo_conn_demo',
'conc_conn_demo',
'resumption_demo',
'sni_demo',
'crl_demo'
)
def run_demo(demo_name):
_dir = os.path.dirname(os.path.abspath(__file__))
_demo_path = os.path.join(_dir, demo_name + '.py')
print(f'Demo: {_demo_path}')
return subprocess.run(['python3', _demo_path])
if __name__ == '__main__':
print(ssl.OPENSSL_VERSION)
for _demo_name in demo_names:
cp = run_demo(_demo_name)
if cp.returncode != 0:
raise RuntimeError(f'{_demo_name} failed!')
``` |
{
"source": "JohnShandy/swganh",
"score": 3
} |
#### File: scripts/commands/burstrun.py
```python
from swgpy.command import BaseSwgCommand, Callback
class BurstRunCommand(BaseSwgCommand):
base_run_multiplier = 2.0
base_run_duration_ms = 15000
base_cooldate_timer_ms = 60000
def Validate(self):
actor = self.GetActor()
if actor == None:
return False
if actor.HasFlag("BurstRunning"):
self.GetController().SendSystemMessage('combat_effects', 'burst_run_no')
return False
if actor.HasFlag("BurstRunCooldown"):
self.GetController().SendSystemMessage('combat_effects', 'burst_run_wait')
return False
# @TODO Check for mounts and whether or not in a space station/vehicle
return True
def Run(self):
actor = self.GetActor()
actor.SetFlag("BurstRunning")
actor.SetFlag("BurstRunCooldown")
# increase the actor's run speed
actor.run_speed *= self.base_run_multiplier
self.GetController().SendSystemMessage('cbt_spam', 'burstrun_start_single')
return Callback(self.EndBurstRun, self.base_run_duration_ms)
def EndBurstRun(self):
actor = self.GetActor()
actor.RemoveFlag("BurstRunning")
# decrease the actor's run speed by the increased amount
actor.run_speed /= self.base_run_multiplier
self.GetController().SendSystemMessage('cbt_spam', 'burstrun_stop_single')
self.GetController().SendSystemMessage('combat_effects', 'burst_run_tired')
return Callback(self.EndBurstRunCooldown, self.base_cooldate_timer_ms - self.base_run_duration_ms)
def EndBurstRunCooldown(self):
actor = self.GetActor()
actor.RemoveFlag("BurstRunCooldown")
self.GetController().SendSystemMessage('combat_effects', 'burst_run_not_tired')
```
#### File: scripts/commands/duel.py
```python
from swgpy.command import BaseSwgCommand
from swgpy import PVPSTATUS, OutOfBand, ProseType
class DuelCommand(BaseSwgCommand):
def Run(self):
actor = self.GetActor()
target = self.GetTarget()
Acontroller = actor.Controller()
Tcontroller = target.Controller()
if not actor.in_duel_list(target.id):
actor.add_duel_list(target.id)
#system message
Acontroller.SendSystemMessage(OutOfBand("duel", "challenge_self", ProseType.TT, target.id), False, False)
Tcontroller.SendSystemMessage(OutOfBand("duel", "challenge_target", ProseType.TT, actor.id), False, False)
if actor.in_duel_list(target.id) and target.in_duel_list(actor.id):
# Start The Duel
Acontroller.SendSystemMessage(OutOfBand("duel", "accept_target", ProseType.TT, target.id), False, False)
Tcontroller.SendSystemMessage(OutOfBand("duel", "accept_self", ProseType.TT, actor.id), False, False)
actor.pvp_status = PVPSTATUS.PvPStatus_Attackable
target.pvp_status = PVPSTATUS.PvPStatus_Attackable
actor.activate_auto_attack();
target.activate_auto_attack();
```
#### File: scripts/commands/removefriend.py
```python
import re
from swgpy.command import BaseSwgCommand
class RemoveFriendCommand(BaseSwgCommand):
def Run(self):
split = re.split('\W+', self.GetCommandString())
friend_name = split[0]
player = self.GetActor().get_player()
if player:
# Check if the name is already our friend
if player.is_friend(friend_name):
player.remove_friend(friend_name)
self.GetController().SendSystemMessage(swgpy.OutOfBand('cmnty', 'friend_removed', swgpy.ProseType.TT, friend_name), False, False)
else:
self.GetController().SendSystemMessage(swgpy.OutOfBand('cmnty', 'friend_not_found', swgpy.ProseType.TT, friend_name), False, False)
``` |
{
"source": "john-shaskin/aws-cdk",
"score": 2
} |
#### File: python/%name.PythonModule%/%name.PythonModule%_stack.template.py
```python
from aws_cdk import cdk
class %name.PascalCased%Stack(cdk.Stack):
def __init__(self, app: cdk.App, id: str, **kwargs) -> None:
super().__init__(app, id)
# The code that defines your stack goes here
``` |
{
"source": "john-shaskin/cumulus",
"score": 2
} |
#### File: cumulus/chain/step.py
```python
from cumulus.chain import chaincontext # noqa
class Step:
"""
Define an interface for handling requests.
"""
def __init__(self, name='UnNamed'):
"""
:type name: basestring Friendly name of the step to be used in logical naming
"""
self.name = name
def handle(self, chain_context):
# type: (chaincontext.ChainContext) -> None
raise NotImplementedError("handle must be implemented")
```
#### File: cumulus/policies/cloudformation.py
```python
import awacs
import awacs.aws
import awacs.logs
import awacs.iam
import awacs.s3
import awacs.ecr
from troposphere import iam
def get_policy_cloudformation_general_access(policy_name):
# TODO: Return policy with permissions:
# 1. Full Cloudformation access to stacks prefixed with application name
# 2. IAM access (currently using unlimited access, but this seems like it could be limited a bit)
return iam.Policy(
PolicyName=policy_name,
PolicyDocument=awacs.aws.PolicyDocument(
Version="2012-10-17",
Id="%sId" % policy_name,
Statement=[
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[
awacs.aws.Action("cloudformation", "*"),
awacs.aws.Action("ec2", "*"),
awacs.aws.Action("route53", "*"),
awacs.aws.Action("iam", "*"),
awacs.aws.Action("elasticloadbalancing", "*"),
awacs.aws.Action("s3", "*"),
awacs.aws.Action("autoscaling", "*"),
awacs.aws.Action("apigateway", "*"),
awacs.aws.Action("cloudwatch", "*"),
awacs.aws.Action("cloudfront", "*"),
awacs.aws.Action("rds", "*"),
awacs.aws.Action("dynamodb", "*"),
awacs.aws.Action("lambda", "*"),
awacs.aws.Action("sqs", "*"),
awacs.aws.Action("events", "*"),
awacs.aws.Action("ecr", "*"),
awacs.iam.PassRole,
],
Resource=["*"]
),
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[
awacs.logs.CreateLogGroup,
awacs.logs.CreateLogStream,
awacs.logs.PutLogEvents,
],
# TODO: restrict more accurately
Resource=["*"]
)
]
)
)
```
#### File: steps/dev_tools/cloud_formation_action.py
```python
import awacs
import awacs.aws
import awacs.sts
from troposphere import iam, codepipeline, GetAtt
import cumulus.policies
import cumulus.policies.cloudformation
import cumulus.types.codebuild.buildaction
import cumulus.util.template_query
from cumulus.chain import step
from cumulus.steps.dev_tools import META_PIPELINE_BUCKET_POLICY_REF
class CloudFormationAction(step.Step):
OUTPUT_FILE_NAME = 'StackOutputs.json'
def __init__(self,
action_name,
input_artifact_names,
input_template_path,
input_template_configuration,
stage_name_to_add,
stack_name,
action_mode,
output_artifact_name=None,
cfn_action_role_arn=None,
cfn_action_config_role_arn=None,
cfn_param_overrides=None,
):
"""
:type cfn_action_config_role_arn: [troposphere.iam.Policy]
:type action_name: basestring Displayed on the console
:type input_artifact_names: [basestring] List of input artifacts
:type input_template_path: basestring Full path to cloudformation template (ex. ArtifactName::templatefolder/template.json)
:type input_configuration: basestring Full path to cloudformation config file (ex. ArtifactName::envfolder/parameters.json)
:type stage_name_to_add: basestring Name of the pipeline stage to add this action to
:type stack_name: basestring name of the stack that this action will build
:type action_mode: cumulus.types.cloudformation.action_mode.ActionMode The actual CloudFormation action to execute
"""
step.Step.__init__(self)
self.cfn_param_overrides = cfn_param_overrides
self.action_name = action_name
self.input_artifact_names = input_artifact_names
self.input_template_path = input_template_path
self.input_template_configuration = input_template_configuration
self.stage_name_to_add = stage_name_to_add
self.stack_name = stack_name
self.action_mode = action_mode
self.output_artifact_name = output_artifact_name
self.cfn_action_role_arn = cfn_action_role_arn
self.cfn_action_config_role_arn = cfn_action_config_role_arn
def handle(self, chain_context):
print("Adding action %sstage" % self.action_name)
# if supplied, use the role injected in, otherwise, build one.
if self.cfn_action_config_role_arn:
cfn_configuration_role_arn = self.cfn_action_config_role_arn
else:
cfn_configuration_role = self.get_cfn_role(
chain_context=chain_context,
)
cfn_configuration_role_arn = GetAtt(cfn_configuration_role, 'Arn')
chain_context.template.add_resource(cfn_configuration_role)
input_artifacts = []
for artifact_name in self.input_artifact_names:
input_artifacts.append(codepipeline.InputArtifacts(
Name=artifact_name
))
cloud_formation_action = cumulus.types.codebuild.buildaction.CloudFormationAction(
Name=self.action_name,
InputArtifacts=input_artifacts,
Configuration={
'ActionMode': self.action_mode.value,
# this role needs to be the cfn role above, and it should add the tools account policy
'RoleArn': cfn_configuration_role_arn,
'StackName': self.stack_name,
'Capabilities': 'CAPABILITY_NAMED_IAM',
'TemplateConfiguration': self.input_template_configuration,
'TemplatePath': self.input_template_path,
},
RunOrder="1"
)
# Add optional configuration
if self.output_artifact_name:
output_artifact = codepipeline.OutputArtifacts(
Name=self.output_artifact_name
)
cloud_formation_action.OutputArtifacts = [
output_artifact
]
cloud_formation_action.Configuration['OutputFileName'] = CloudFormationAction.OUTPUT_FILE_NAME
if self.cfn_action_role_arn:
cloud_formation_action.RoleArn = self.cfn_action_role_arn
if self.cfn_param_overrides:
cloud_formation_action.Configuration['ParameterOverrides'] = self.cfn_param_overrides
stage = cumulus.util.template_query.TemplateQuery.get_pipeline_stage_by_name(
template=chain_context.template,
stage_name=self.stage_name_to_add,
)
# TODO accept a parallel action to the previous action, and don't +1 here.
next_run_order = len(stage.Actions) + 1
cloud_formation_action.RunOrder = next_run_order
stage.Actions.append(cloud_formation_action)
def get_cfn_role(self, chain_context, step_policies=None):
"""
Default role for cloudformation with access to the S3 bucket and cloudformation assumerole.
:param chain_context: chaincontext.ChainContext
:type step_policies: [troposphere.iam.Policy]
"""
policy_name = "CloudFormationPolicy%sStage" % chain_context.instance_name
role_name = "CloudFormationRole%sStage" % self.action_name
all_policies = [
cumulus.policies.cloudformation.get_policy_cloudformation_general_access(policy_name)
]
if step_policies:
all_policies += step_policies
cloud_formation_role = iam.Role(
role_name,
Path="/",
AssumeRolePolicyDocument=awacs.aws.Policy(
Statement=[
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[awacs.sts.AssumeRole],
Principal=awacs.aws.Principal(
'Service',
["cloudformation.amazonaws.com"]
)
)]
),
Policies=all_policies,
ManagedPolicyArns=[
chain_context.metadata[META_PIPELINE_BUCKET_POLICY_REF]
]
)
return cloud_formation_role
```
#### File: steps/dev_tools/pipeline.py
```python
import awacs
import awacs.aws
import awacs.awslambda
import awacs.codecommit
import awacs.ec2
import awacs.iam
import awacs.logs
import awacs.s3
import awacs.sts
import awacs.kms
import troposphere
from troposphere import codepipeline, Ref, iam
from troposphere.s3 import Bucket, VersioningConfiguration
import cumulus.steps.dev_tools
from cumulus.chain import step
class Pipeline(step.Step):
def __init__(self,
name,
bucket_name,
pipeline_service_role_arn=None,
create_bucket=True,
pipeline_policies=None,
bucket_policy_statements=None,
bucket_kms_key_arn=None,
):
"""
:type pipeline_service_role_arn: basestring Override the pipeline service role. If you pass this
the pipeline_policies is not used.
:type create_bucket: bool if False, will not create the bucket. Will attach policies either way.
:type bucket_name: the name of the bucket that will be created suffixed with the chaincontext instance name
:type bucket_policy_statements: [awacs.aws.Statement]
:type pipeline_policies: [troposphere.iam.Policy]
:type bucket_kms_key_arn: ARN used to decrypt the pipeline artifacts
"""
step.Step.__init__(self)
self.name = name
self.bucket_name = bucket_name
self.create_bucket = create_bucket
self.pipeline_service_role_arn = pipeline_service_role_arn
self.bucket_policy_statements = bucket_policy_statements
self.pipeline_policies = pipeline_policies or []
self.bucket_kms_key_arn = bucket_kms_key_arn
def handle(self, chain_context):
"""
This step adds in the shell of a pipeline.
* s3 bucket
* policies for the bucket and pipeline
* your next step in the chain MUST be a source stage
:param chain_context:
:return:
"""
if self.create_bucket:
pipeline_bucket = Bucket(
"PipelineBucket%s" % self.name,
BucketName=self.bucket_name,
VersioningConfiguration=VersioningConfiguration(
Status="Enabled"
)
)
chain_context.template.add_resource(pipeline_bucket)
default_bucket_policies = self.get_default_bucket_policy_statements(self.bucket_name)
if self.bucket_policy_statements:
bucket_access_policy = self.get_bucket_policy(
pipeline_bucket=self.bucket_name,
bucket_policy_statements=self.bucket_policy_statements,
)
chain_context.template.add_resource(bucket_access_policy)
pipeline_bucket_access_policy = iam.ManagedPolicy(
"PipelineBucketAccessPolicy",
Path='/managed/',
PolicyDocument=awacs.aws.PolicyDocument(
Version="2012-10-17",
Id="bucket-access-policy%s" % chain_context.instance_name,
Statement=default_bucket_policies
)
)
chain_context.metadata[cumulus.steps.dev_tools.META_PIPELINE_BUCKET_NAME] = self.bucket_name
chain_context.metadata[cumulus.steps.dev_tools.META_PIPELINE_BUCKET_POLICY_REF] = Ref(
pipeline_bucket_access_policy)
default_pipeline_role = self.get_default_pipeline_role()
pipeline_service_role_arn = self.pipeline_service_role_arn or troposphere.GetAtt(default_pipeline_role, "Arn")
generic_pipeline = codepipeline.Pipeline(
"Pipeline",
RoleArn=pipeline_service_role_arn,
Stages=[],
ArtifactStore=codepipeline.ArtifactStore(
Type="S3",
Location=self.bucket_name,
)
)
if self.bucket_kms_key_arn:
encryption_config = codepipeline.EncryptionKey(
"ArtifactBucketKmsKey",
Id=self.bucket_kms_key_arn,
Type='KMS',
)
generic_pipeline.ArtifactStore.EncryptionKey = encryption_config
pipeline_output = troposphere.Output(
"PipelineName",
Description="Code Pipeline",
Value=Ref(generic_pipeline),
)
pipeline_bucket_output = troposphere.Output(
"PipelineBucket",
Description="Name of the input artifact bucket for the pipeline",
Value=self.bucket_name,
)
if not self.pipeline_service_role_arn:
chain_context.template.add_resource(default_pipeline_role)
chain_context.template.add_resource(pipeline_bucket_access_policy)
chain_context.template.add_resource(generic_pipeline)
chain_context.template.add_output(pipeline_output)
chain_context.template.add_output(pipeline_bucket_output)
def get_default_pipeline_role(self):
# TODO: this can be cleaned up by using a policytype and passing in the pipeline role it should add itself to.
pipeline_policy = iam.Policy(
PolicyName="%sPolicy" % self.name,
PolicyDocument=awacs.aws.PolicyDocument(
Version="2012-10-17",
Id="PipelinePolicy",
Statement=[
awacs.aws.Statement(
Effect=awacs.aws.Allow,
# TODO: actions here could be limited more
Action=[awacs.aws.Action("s3", "*")],
Resource=[
troposphere.Join('', [
awacs.s3.ARN(),
self.bucket_name,
"/*"
]),
troposphere.Join('', [
awacs.s3.ARN(),
self.bucket_name,
]),
],
),
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[awacs.aws.Action("kms", "*")],
Resource=['*'],
),
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[
awacs.aws.Action("cloudformation", "*"),
awacs.aws.Action("codebuild", "*"),
],
# TODO: restrict more accurately
Resource=["*"]
),
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[
awacs.codecommit.GetBranch,
awacs.codecommit.GetCommit,
awacs.codecommit.UploadArchive,
awacs.codecommit.GetUploadArchiveStatus,
awacs.codecommit.CancelUploadArchive
],
Resource=["*"]
),
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[
awacs.iam.PassRole
],
Resource=["*"]
),
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[
awacs.aws.Action("lambda", "*")
],
Resource=["*"]
),
],
)
)
pipeline_service_role = iam.Role(
"PipelineServiceRole",
Path="/",
AssumeRolePolicyDocument=awacs.aws.Policy(
Statement=[
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[awacs.sts.AssumeRole],
Principal=awacs.aws.Principal(
'Service',
"codepipeline.amazonaws.com"
)
)]
),
Policies=[pipeline_policy] + self.pipeline_policies
)
return pipeline_service_role
def get_default_bucket_policy_statements(self, pipeline_bucket):
bucket_policy_statements = [
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[
awacs.s3.ListBucket,
awacs.s3.GetBucketVersioning,
],
Resource=[
troposphere.Join('', [
awacs.s3.ARN(),
pipeline_bucket,
]),
],
),
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[
awacs.s3.HeadBucket,
],
Resource=[
'*'
]
),
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[
awacs.s3.GetObject,
awacs.s3.GetObjectVersion,
awacs.s3.PutObject,
awacs.s3.ListObjects,
awacs.s3.ListBucketMultipartUploads,
awacs.s3.AbortMultipartUpload,
awacs.s3.ListMultipartUploadParts,
awacs.aws.Action("s3", "Get*"),
],
Resource=[
troposphere.Join('', [
awacs.s3.ARN(),
pipeline_bucket,
'/*'
]),
],
)
]
return bucket_policy_statements
def get_bucket_policy(self, pipeline_bucket, bucket_policy_statements):
policy = troposphere.s3.BucketPolicy(
"PipelineBucketPolicy",
Bucket=pipeline_bucket,
PolicyDocument=awacs.aws.Policy(
Statement=bucket_policy_statements,
),
)
return policy
```
#### File: steps/dev_tools/pipeline_source_action.py
```python
import awacs
import awacs.aws
import awacs.ec2
import awacs.iam
import awacs.logs
import awacs.s3
import awacs.sts
import troposphere
from troposphere import iam, \
codepipeline
import cumulus.policies
import cumulus.policies.codebuild
from cumulus.chain import step
from cumulus.steps.dev_tools import META_PIPELINE_BUCKET_POLICY_REF
from cumulus.types.codebuild.buildaction import SourceS3Action
from cumulus.util.template_query import TemplateQuery
class PipelineSourceAction(step.Step):
def __init__(self,
action_name,
output_artifact_name,
s3_bucket_name,
s3_object_key
):
"""
:type s3_object_key: basestring Path of the artifact in the bucket.
:type s3_bucket_name: basestring or troposphere.Ref Object of the bucket name.
:type input_artifact_name: basestring The artifact name in the pipeline.
(should contain buildspec.yml. You can override that name in a codebuild action)
:type action_name: basestring Displayed on the console
:type environment: troposphere.codebuild.Environment Optional if you need ENV vars or a different build.
:type vpc_config.Vpc_Config: Only required if the codebuild step requires access to the VPC
"""
step.Step.__init__(self)
self.s3_object_key = s3_object_key
self.s3_bucket_name = s3_bucket_name
self.output_artifact_name = output_artifact_name
self.action_name = action_name
def handle(self, chain_context):
print("Adding source action %s." % self.action_name)
template = chain_context.template
policy_name = "CodeBuildPolicy%s" % chain_context.instance_name
codebuild_policy = cumulus.policies.codebuild.get_policy_code_build_general_access(policy_name)
role_name = "PipelineSourceRole%s" % self.action_name
codebuild_role = iam.Role(
role_name,
Path="/",
AssumeRolePolicyDocument=awacs.aws.Policy(
Statement=[
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[awacs.sts.AssumeRole],
Principal=awacs.aws.Principal(
'Service',
"codebuild.amazonaws.com"
)
)]
),
Policies=[
codebuild_policy
],
ManagedPolicyArns=[
chain_context.metadata[META_PIPELINE_BUCKET_POLICY_REF]
]
)
source_action = SourceS3Action(
Name=self.action_name,
OutputArtifacts=[
codepipeline.OutputArtifacts(
Name=self.output_artifact_name
)
],
Configuration={
"S3Bucket": self.s3_bucket_name,
"S3ObjectKey": self.s3_object_key
},
)
template.add_resource(codebuild_role)
found_pipelines = TemplateQuery.get_resource_by_type(
template=chain_context.template,
type_to_find=codepipeline.Pipeline)
pipeline = found_pipelines[0]
# Alternate way to get this
# dummy = TemplateQuery.get_resource_by_title(chain_context.template, 'AppPipeline')
stages = pipeline.Stages # type: list
# TODO: find stage by name
first_stage = stages[0]
# TODO accept a parallel action to the previous action, and don't +1 here.
first_stage.Actions.append(source_action)
template.add_output(
troposphere.Output(
"PipelineBucket%s" % self.action_name,
Value=self.s3_bucket_name,
Description="A pipeline source bucket",
)
)
template.add_output(
troposphere.Output(
"PipelineTriggerObject%s" % self.action_name,
Value=self.s3_object_key,
Description="An s3 object key in the pipeline bucket "
"that will trigger the pipeline",
)
)
```
#### File: steps/ec2/alb.py
```python
from cumulus.chain import step
from troposphere import (
Ref, Not, Equals, Join, ec2,
If, Output
)
from troposphere import elasticloadbalancingv2 as alb
SG_NAME = "%sSecurityGroup"
ALB_LISTENER = "%sListener"
ALB_NAME = "LoadBalancer"
TARGET_GROUP_DEFAULT = "TargetGroup"
class Alb(step.Step):
def __init__(self,
prefix,
):
"""
:type prefix: basestring prefix to name components uniquely
"""
step.Step.__init__(self, name='Alb')
self.prefix = prefix
def handle(self, chain_context):
sg_name = self.prefix + SG_NAME % self.name
self.create_conditions(chain_context.template)
self.create_security_groups(chain_context.template, sg_name)
self.create_default_target_group(chain_context.template)
self.create_load_balancer_alb(chain_context.template, sg_name)
self.add_listener(chain_context.template)
def create_conditions(self, template):
template.add_condition(
"UseSSL",
Not(Equals(Ref("ALBCertName"), ""))
)
template.add_condition(
"UseIAMCert",
Not(Equals(Ref("ALBCertType"), "acm")))
def create_security_groups(self, template, sg_name):
template.add_resource(
ec2.SecurityGroup(
sg_name,
GroupName=sg_name,
GroupDescription=sg_name,
VpcId=Ref("VpcId"),
Tags=[{'Key': 'Name', 'Value': sg_name}]
))
template.add_output(
Output("InternalAlbSG", Value=Ref(sg_name))
)
sg_ingress_name = "SecurityGroupIngressTo443"
# TODO: take a list of Cidr's
# Allow Internet to connect to ALB
template.add_resource(ec2.SecurityGroupIngress(
sg_ingress_name,
IpProtocol="tcp", FromPort="443", ToPort="443",
CidrIp="10.0.0.0/0",
GroupId=Ref(sg_name),
))
def create_load_balancer_alb(self, template, sg_name):
alb_name = ALB_NAME
load_balancer = template.add_resource(alb.LoadBalancer(
alb_name,
Scheme="internal",
Subnets=Ref("PrivateSubnets"),
SecurityGroups=[Ref(sg_name)]
))
template.add_output(
Output(
"CanonicalHostedZoneID",
Value=load_balancer.GetAtt("CanonicalHostedZoneID")
)
)
template.add_output(
Output("DNSName", Value=load_balancer.GetAtt("DNSName"))
)
def add_listener(self, template):
# Choose proper certificate source ?-> always acm?
acm_cert = Join("", [
"arn:aws:acm:",
Ref("AWS::Region"),
":",
Ref("AWS::AccountId"),
":certificate/", Ref("ALBCertName")])
# We probably don't need this code for an IAM Cert
iam_cert = Join("", [
"arn:aws:iam::",
Ref("AWS::AccountId"),
":server-certificate/",
Ref("ALBCertName")])
cert_id = If("UseIAMCert", iam_cert, acm_cert)
alb_name = ALB_NAME
with_ssl = alb.Listener(
ALB_LISTENER % self.name,
Port="443",
Protocol="HTTPS",
LoadBalancerArn=Ref(alb_name),
DefaultActions=[alb.Action(
Type="forward",
TargetGroupArn=Ref(TARGET_GROUP_DEFAULT)
)],
Certificates=[alb.Certificate(
CertificateArn=cert_id
)]
)
template.add_resource(with_ssl)
template.add_output(
Output("IAlbListener", Value=with_ssl.Ref())
)
def create_default_target_group(self, template):
"""
:param template:
:param instance_name:
"""
template.add_resource(alb.TargetGroup(
TARGET_GROUP_DEFAULT,
Port='80',
Protocol="HTTP",
VpcId=Ref("VpcId"),
))
```
#### File: steps/ec2/target_group.py
```python
from troposphere import elasticloadbalancingv2 as alb
from cumulus.chain import step
from cumulus.steps.ec2 import META_TARGET_GROUP_NAME
class TargetGroup(step.Step):
def __init__(self,
port,
vpc_id, ):
step.Step.__init__(self,
name='TargetGroup')
self.port = port
self.vpc_id = vpc_id
def handle(self, chain_context):
chain_context.metadata[META_TARGET_GROUP_NAME] = self.name
template = chain_context.template
template.add_resource(alb.TargetGroup(
self.name,
HealthCheckPath="/",
HealthCheckIntervalSeconds="30",
HealthCheckProtocol="HTTP",
HealthCheckTimeoutSeconds="10",
HealthyThresholdCount="4",
Matcher=alb.Matcher(HttpCode="200"),
Port=self.port,
Protocol="HTTP",
UnhealthyThresholdCount="3",
VpcId=self.vpc_id
))
``` |
{
"source": "john-shine/build-bitcoin-SBS",
"score": 2
} |
#### File: john-shine/build-bitcoin-SBS/sign.py
```python
from constants import *
import hashlib
import RPC
import base58
import ecdsa
from binascii import unhexlify, hexlify
# 椭圆曲线相关
_p = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F
_a = 0x0000000000000000000000000000000000000000000000000000000000000000
_b = 0x0000000000000000000000000000000000000000000000000000000000000007
_Gx = 0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798
_Gy = 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8
_r = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
def gen_random_secret():
random_char = lambda: chr(random.randint(0, 255))
convert_to_int = lambda array: int("".join(array).encode("hex"), 16)
byte_array = [random_char() for i in range(32)]
return convert_to_int(byte_array)
"""
私钥产生公钥
"""
def sign(key):
# K 公钥
# k 私钥
# G 是生成点
K = k * G
def validate_address(bitcoin_address):
proxy = RPC.RPCServer(TESTNET_SERVICE_URL)
info = proxy.validateaddress(bitcoin_address)
try:
assert info['isvalid'] == True
except:
return False
return True
def get_point_pubkey(point):
# 奇偶数表示point是在x轴上面还是下面
if point.y() & 1:
# 奇数
key = '03' + '%064x' % point.x()
else:
# 偶数
key = '02' + '%064x' % point.x()
# print(key)
return key
def get_point_pubkey_uncompressed(point):
key = '04'+ '%064x' % point.x() + '%064x' % point.y()
return key
# :param public_key: str hex格式压缩公钥
def uncompress_point_by_pubkey(public_key):
# remove prefix
point_x = int(public_key[2:], 16)
beta = pow(int(point_x * point_x * point_x + _a * point_x + _b), int((_p + 1) // 4), int(_p))
if (beta + int(public_key[:2], 16)) % 2:
point_y = _p - beta
else:
point_y = beta
class point(object):
def x():
return point_x
def y():
return point_y
return point
# :praram str private_key: WIF格式编码的私钥
# :return str: 压缩hex格式的公钥
def make_public_key(private_key) ->str:
# wif compressed格式私钥需要下去掉第一字节0x80或0xEF得到32字节hex私钥
decoded_secrect = base58.base58check_decode(private_key)
# @todo 最后一个字节是什么
# 已经验证这里没有问题
hex_secret = decoded_secrect[1:][:-1]
curve_secp256k1 = ecdsa.ellipticcurve.CurveFp(_p, _a, _b)
generator_secp256k1 = ecdsa.ellipticcurve.Point(curve_secp256k1, _Gx, _Gy, _r)
# 比特币生成点
generator = generator_secp256k1
point = int.from_bytes(hex_secret, byteorder='big') * generator
# Get the public key point.
return get_point_pubkey(point)
# :param str public_key: hex格式公钥,压缩
# :param str prefix: 前缀, hex格式
# @todo public_key是否需要解压缩,各不相同吗
def make_bitcoin_address(public_key, prefix):
ripemd160 = hashlib.new('ripemd160')
ripemd160.update(hashlib.sha256(unhexlify(public_key)).digest())
hash160 = ripemd160.digest()
return base58.base58check_encode(unhexlify(prefix) + hash160)
```
#### File: john-shine/build-bitcoin-SBS/utxos.py
```python
def select_outputs(unspent, min_value):
# 如果utxo是空表示失败
if not unspent:
return None
# 分割成两个列表
# 小于min_value列表
lessers = [utxo for utxo in unspent if utxo.value < min_value]
key_func = lambda utxo: utxo.value
# 大于等于min_value列表
greaters = [utxo for utxo in unspent if utxo.value >= min_value]
if greaters:
# 非空的话,寻找最小的greaters
min_greater = min(greaters)
change = min_greater.value - min_value
return [min_greater], change
# 没有找到greaters,重新尝试若干更小的
# 从大到小排序,我们需要尽可能使用最小的输入量
lessers.sort(key=key_func, reverse=True)
result = []
accum = 0
for utxo in lessers:
result.append(utxo)
accum += utxo.value
if accum >= min_value:
change = accum - min_value
return result, "Change: %d Satoshis" % change
# not found
return None, 0
``` |
{
"source": "john-shine/python-cardano-chain-importer",
"score": 2
} |
#### File: john-shine/python-cardano-chain-importer/db.py
```python
from lib.logger import get_logger
from lib import utils
import json
from datetime import datetime
import psycopg2
from config import config
from operator import itemgetter
from psycopg2.extras import RealDictCursor, execute_values
from constants.transaction import TX_SUCCESS_STATUS, TX_PENDING_STATUS
class DB:
def __init__(self):
self._cursor = None
self._connect = None
self.logger = get_logger('DB')
@property
def conn(self):
if not self._cursor or self._cursor.closed:
self._cursor = self.connect.cursor(cursor_factory=RealDictCursor)
return self._cursor
@property
def connect(self):
if not self._connect:
self._connect = psycopg2.connect(
dbname=config['db']['database'],
user=config['db']['user'],
password=<PASSWORD>['<PASSWORD>'],
host=config['db']['host'],
port=config['db']['port'],
connect_timeout=config['db']['timeout']
)
self._connect.autocommit = True
return self._connect
# def auto_commit(self, is_auto=True):
# self.connect.autocommit = is_auto
def close(self):
if self._connect:
self._connect.close()
async def save_utxos(self, utxos: list):
sql = 'INSERT INTO utxos '\
'(utxo_id, tx_hash, tx_index, receiver, amount, block_num) values %s '\
'ON CONFLICT (utxo_id) DO UPDATE '\
'SET tx_hash=EXCLUDED.tx_hash, '\
' tx_index=EXCLUDED.tx_index, '\
' receiver=EXCLUDED.receiver, '\
' amount=EXCLUDED.amount, '\
' block_num=EXCLUDED.block_num'
self.logger.info('store %d utxos in db', len(utxos))
with self.conn as cursor:
execute_values(cursor, sql, utxos, "(%(utxo_id)s, %(tx_hash)s, %(tx_index)s, %(receiver)s, %(amount)s, %(block_num)s)")
return True
async def get_best_block_num(self):
sql = 'SELECT block_hash, block_height, epoch, slot FROM blocks ORDER BY block_height DESC LIMIT 1'
with self.conn as cursor:
cursor.execute(sql)
row = cursor.fetchone()
if not row:
return {'height': 0, 'epoch': 0, 'hash': None, 'slot': None}
return {
'hash': row['block_hash'],
'height': row['block_height'],
'epoch': row['epoch'],
'slot': row['slot']
}
async def update_best_block_num(self, best_block_num: int):
self.logger.info('update best block num in db to: %d', best_block_num)
with self.conn as cursor:
cursor.execute('UPDATE bestblock SET best_block_num=%s', (best_block_num, ))
return True
async def rollback_txs_from_height(self, block_height: int):
self.logger.info('rollback transactions from block height: %s', block_height)
sql = 'UPDATE txs '\
'SET tx_state=%s, block_num=%s, time=%s, last_update=%s '\
'WHERE block_num > %s'
with self.conn as cursor:
data = TX_PENDING_STATUS, None, None, None, datetime.now(), block_height
cursor.execute(sql, data)
return True
async def delete_invalid_utxos_and_backup(self, block_height: int):
self.logger.info('delete invalid utxos from block height: %s', block_height)
sql = 'DELETE FROM utxos WHERE block_num > %s'
with self.conn as cursor:
cursor.execute(sql, (block_height, ))
sql = 'DELETE FROM utxos_backup WHERE block_num > %s'
with self.conn as cursor:
cursor.execute(sql, (block_height, ))
return True
async def rollback_utxos_backup(self, block_height: int):
self.logger.info('rollback utxo_backup to block height: %s', block_height)
await self.delete_invalid_utxos_and_backup(block_height)
sql = 'WITH moved_utxos AS ('\
' DELETE FROM utxos_backup '\
' WHERE block_num < %s AND delete_block_num > %s RETURNING *'\
') '\
'INSERT INTO utxos SELECT * FROM moved_utxos'
with self.conn as cursor:
cursor.execute(sql, (block_height, block_height))
return True
async def rollback_blocks_from_height(self, block_height: int):
self.logger.info('rollback block_history to block height: %s', block_height)
with self.conn as cursor:
cursor.execute('DELETE FROM blocks WHERE block_height > %s', (block_height, ))
return True
async def save_block(self, block):
if not block:
return False
sql = 'INSERT INTO blocks (block_hash, block_height, epoch, slot) VALUES '\
'(%(block_hash)s, %(block_height)s, %(epoch)s, %(slot)s)'
try:
with self.conn as cursor:
cursor.execute(sql, vars(block))
except Exception as e:
self.logger.exception('error on save block: %s', block)
return False
return True
async def save_blocks(self, blocks):
if not blocks:
return False
sql = 'INSERT INTO blocks (block_hash, block_height, epoch, slot) VALUES %s'
try:
with self.conn as cursor:
execute_values(
cursor,
sql,
blocks,
"(%(block_hash)s, %(block_height)s, %(epoch)s, %(slot)s)"
)
except Exception as e:
self.logger.exception('error on save %s blocks', len(blocks))
return False
return True
async def save_tx_addresses(self, tx_id: str, addresses: list):
db_fields = [{
'tx_hash': tx_id,
'address': utils.fix_long_address(address),
} for address in addresses]
query = 'INSERT INTO tx_addresses (tx_hash, address) VALUES %s '\
'ON CONFLICT (tx_hash, address) DO UPDATE '\
'SET tx_hash=EXCLUDED.tx_hash, address=EXCLUDED.address'
try:
with self.conn as cursor:
execute_values(cursor, query, db_fields, "(%(tx_hash)s, %(address)s)")
except Exception as e:
self.logger.exception('addresses for %s already stored', tx_id)
return False
return True
async def remove_and_backup_utxos(self, utxo_ids: list, deleted_block_num: int):
if not utxo_ids:
return False
sql = 'WITH moved_utxos AS (DELETE FROM utxos WHERE utxo_id IN (%s) RETURNING *) '\
' INSERT INTO utxos_backup '\
' (utxo_id, tx_hash, tx_index, receiver, amount, block_num, deleted_block_num) '\
' (SELECT utxo_id, tx_hash, tx_index, receiver, amount, block_num, %s AS deleted_block_num FROM moved_utxos)'
str_ids = ', '.join(utxo_ids)
with self.conn as cursor:
cursor.execute(sql, (str_ids, deleted_block_num))
self.logger.info('backup and remove utxos: %s', str_ids)
return True
async def get_utxos_by_ids(self, utxo_ids: list):
if not utxo_ids:
return []
sql = 'SELECT * FROM utxos WHERE utxo_id IN (\'{}\')'
with self.conn as cursor:
cursor.execute(sql.format('\', \''.join(utxo_ids)))
rows = cursor.fetchall()
return [{
'address': row['receiver'],
'amount': row['amount'],
'id': row['utxo_id'],
'index': row['tx_index'],
'txHash': row['tx_hash'],
} for row in rows]
async def get_txs_by_hashes(self, tx_hashes: list):
if not tx_hashes:
return {}
sql = 'SELECT * FROM txs where hash in (\'{}\')'
with self.conn as cursor:
cursor.execute(sql.format('\', \''.join(tx_hashes)))
rows = cursor.fetchall()
res = {}
for row in rows:
res[row['hash']] = (row['address'], row['amount'])
return res
async def is_genesis_loaded(self):
# Check whether utxo and blocks tables are empty.
query = 'SELECT (SELECT count(*) FROM utxos) + (SELECT count(*) FROM blocks) as cnt'
with self.conn as cursor:
cursor.execute(query)
count = cursor.fetchone()
return count['cnt'] > 0
async def convert_txs(self, tx: dict, tx_utxos: dict):
inputs, outputs, tx_id, block_num, block_hash = tx['inputs'], tx['outputs'], tx['id'], tx['blockNum'], tx['block_hash']
self.logger.info('store tx: %s', tx_utxos)
if not tx_utxos:
input_utxo_ids = []
for inp in inputs:
input_utxo_ids.append(utils.get_utxo_id(inp))
input_utxos = await self.get_utxos_by_ids(input_utxo_ids)
else:
input_utxos = tx_utxos
input_addresses = [inp['address'] for inp in input_utxos]
output_addresses = [utils.fix_long_address(out['address']) for out in outputs]
input_ammounts = [int(item['amount']) for item in input_utxos]
output_ammounts = [int(item['value']) for item in outputs]
return {
'hash': tx_id,
'inputs': json.dumps(input_utxos),
'inputs_address': input_addresses,
'inputs_amount': input_ammounts,
'outputs_address': output_addresses,
'outputs_amount': output_ammounts,
'block_num': block_num,
'block_hash': block_hash,
'tx_state': tx['status'] if tx.get('status') else TX_SUCCESS_STATUS,
'tx_body': tx['txBody'],
'tx_ordinal': tx['txOrdinal'],
'time': tx['txTime'],
'last_update': datetime.now()
}
async def save_txs(self, tx: dict, tx_utxos: dict):
tx_db_fields = await self.convert_txs(tx, tx_utxos)
sql = 'INSERT INTO txs ({}) VALUES ({}) '\
'ON CONFLICT (hash) DO UPDATE '\
'SET block_num=EXCLUDED.block_num, '\
' block_hash=EXCLUDED.block_hash, '\
' time=EXCLUDED.time, '\
' tx_state=EXCLUDED.tx_state, '\
' last_update=EXCLUDED.last_update, '\
' tx_ordinal=EXCLUDED.tx_ordinal'
sql = sql.format(
', '.join(tx_db_fields.keys()),
', '.join(['%s'] * len(tx_db_fields))
)
self.logger.info('insert into txs: %s', sql)
with self.conn as cursor:
cursor.execute(sql, tuple(tx_db_fields.values()))
addresses = list(set(input_addresses + output_addresses))
await self.save_tx_addresses(tx_id, addresses)
```
#### File: python-cardano-chain-importer/lib/utils.py
```python
import base58
import base64
import binascii
from cbor import cbor
from operator import itemgetter
from hashlib import blake2b, sha3_256
def generate_utxo_hash(address):
data = base58.b58decode(address)
return blake2b(data, digest_size=32).hexdigest()
def get_utxo_id(input):
return f'{input["txId"]}{input["idx"]}'
def struct_utxo(receiver, amount, utxo_hash, tx_index=0, block_num=0):
return {
'utxo_id': f'{utxo_hash}{tx_index}',
'tx_hash': utxo_hash,
'tx_index': tx_index,
'receiver': receiver,
'amount': amount,
'block_num': block_num,
}
"""
* We need to use this function cuz there are some extra-long addresses
* existing on cardano mainnet. Some of them exceed 10K characters in length,
* and Postgres can't store it.
* We don't care about making these non-standard addresses spendable, so any address
* over 1K characters is just truncated.
"""
def fix_long_address(address: str):
if isinstance(address, bytes):
address = address.decode()
if address and len(address) > 1000:
return f'{address[0:497]}...{address[len(address) - 500:500]}'
else:
return address
def get_txs_utxos(txs):
ret = {}
for tx in txs:
tx_id, outputs, block_num = itemgetter('id', 'outputs', 'blockNum')(tx)
for index, output in enumerate(outputs):
utxo = struct_utxo(
fix_long_address(output['address']),
output['value'],
tx_id,
index,
block_num
)
ret[f'{tx_id}{index}'] = utxo
return ret
def decoded_tx_to_base(decoded_tx):
if isinstance(decoded_tx, list):
if len(decoded_tx) == 2:
signed = decoded_tx
return signed[0]
elif len(decoded_tx) == 3:
base = decoded_tx
return base
raise Exception('invalid decoded tx structure: %s' % decoded_tx)
class CborIndefiniteLengthArray:
def __call__(self, elements):
ret = [bytes([0x9f])]
for e in elements:
ret.append(cbor.dumps(e))
ret.append(bytes([0xff]))
return ret
def pack_raw_txid_and_body(decoded_tx_body):
if not decoded_tx_body:
raise Exception('can not decode empty tx!')
try:
inputs, outputs, attributes = decoded_tx_to_base(decoded_tx_body)
cbor_indef_array = CborIndefiniteLengthArray()
enc = cbor.dumps([
cbor_indef_array(inputs),
cbor_indef_array(outputs),
attributes,
])
tx_id = blake2b(enc, digest_size=32).hexdigest()
tx_body = enc.hex()
return [tx_id, tx_body]
except Exception as e:
raise Exception(f'fail to convert raw tx to ID! {str(e)}')
def convert_raw_tx_to_obj(tx: list, extraData: dict):
tx_inputs, tx_outputs, tx_witnesses = tx[0][0], tx[0][1], tx[1]
tx_id, tx_body = pack_raw_txid_and_body(tx)
inputs, outputs, witnesses = [], [], []
for inp in tx_inputs:
types, tagged = inp
input_tx_id, idx = cbor.loads(tagged.value)
inputs.append({'type': types, 'txId': input_tx_id.hex(), 'idx': idx})
for out in tx_outputs:
address, value = out
outputs.append({'address': base58.b58encode(cbor.dumps(address)), 'value': value})
for wit in tx_witnesses:
types, tagged = wit
witnesses.append({'type': types, 'sign': cbor.loads(tagged.value)})
ret = {
'id': tx_id,
'inputs': inputs,
'outputs': outputs,
'witnesses': witnesses,
'txBody': tx_body
}
ret.update(extraData)
return ret
def header_to_id(header, tx_type: int):
header_data = cbor.dumps([tx_type, header])
return blake2b(header_data, digest_size=32).hexdigest()
def redeem_key_to_address(public_redeem_key):
pk = base64.urlsafe_b64decode(public_redeem_key)
addr = [2, [2, pk], {}]
addr_hash = blake2b(sha3_256(cbor.dumps(addr, sort_keys=True)).digest(), digest_size=28).digest()
tag = cbor.dumps([addr_hash, {}, 2], sort_keys=True)
address = cbor.dumps([
cbor.Tag(24, tag),
binascii.crc32(tag)
])
return base58.b58encode(address).decode()
```
#### File: python-cardano-chain-importer/models/http_bridge.py
```python
import json
from urllib.parse import urljoin
from models.network import Network
from models.parser import Parser
from lib.logger import get_logger
from tornado.httpclient import AsyncHTTPClient, HTTPClientError
class HttpBridge:
def __init__(self):
self.network_url = Network().network_url
self.parser = Parser()
self.client = AsyncHTTPClient()
self.logger = get_logger('http-bridge')
async def get(self, path: str, params={}):
endpoint_url = urljoin(self.network_url, path)
self.logger.info('GET %s params: %s', endpoint_url, params)
try:
resp = await self.client.fetch(endpoint_url, method='GET')
return resp
except HTTPClientError as e:
if e.code == 'ECONNREFUSED':
raise Exception('cardano-http-bridge is not accessible (ECONNREFUSED)')
raise
async def post(self, path: str, data: str):
endpoint_url = urljoin(self.network_url, path)
self.logger.info('POST %s data: %s', endpoint_url, data)
try:
resp = await self.client.fetch(endpoint_url, method='POST', body=data)
return resp
except HTTPClientError as e:
if e.code == 'ECONNREFUSED':
raise Exception('cardano-http-bridge is not accessible (ECONNREFUSED)')
raise
async def get_json(self, path: str):
resp = await self.get(path)
try:
resp = json.loads(resp.body)
return resp
except Exception as e:
raise Exception('invalid json resp: %s' % str(resp.body)[:100])
async def get_tip(self):
resp = await self.get_json('tip')
return resp
async def post_signed_tx(self, payload: str):
resp = await self.post('txs/signed', payload)
return resp
async def get_epoch(self, id: int):
resp = await self.get_json(f'epoch/{id}')
return resp
async def get_block(self, id: str):
resp = await self.get_json(f'block/{id}')
return resp
async def get_genesis(self, hash: str):
return await self.get_json(f'genesis/{hash}')
async def get_status(self):
resp = await self.get_json('status')
return resp
async def get_block_by_height(self, height: int):
resp = await self.get(f'height/{height}')
return self.parser.parse_block(resp.body)
async def get_parsed_epoch_by_id(self, epoch_id: int, is_omit_ebb=False):
resp = await self.get(f'epoch/{epoch_id}')
blocks_iterator = self.parser.parse_epoch(resp.body, {'omitEbb': is_omit_ebb})
return blocks_iterator
```
#### File: python-cardano-chain-importer/models/network.py
```python
from config import config
from urllib.parse import urljoin
class Network:
def __init__(self):
network = config.get('network')
self.network_url = urljoin(config['bridgeUrl'], network['name']) + '/'
self.genesis_hash = network['genesis']
self.start_time = network['startTime']
self.network_magic = network['networkMagic']
``` |
{
"source": "johnshiver/algo_study_helper",
"score": 4
} |
#### File: algo_study_helper/problems/find_rotation_point.py
```python
INSTRUCTIONS = """
I opened up a dictionary to a page in the middle and started flipping through, looking for words I didn't know. I put each word I didn't know at increasing indices in a huge list I created in memory. When I reached the end of the dictionary, I started from the beginning and did the same thing until I reached the page I started at.
Now I have a list of words that are mostly alphabetical, except they start somewhere in the middle of the alphabet, reach the end, and then start from the beginning of the alphabet. In other words, this is an alphabetically ordered list that has been "rotated." For example:
words = [
'ptolemaic',
'retrograde',
'supplant',
'undulate',
'xenoepist',
'asymptote', # <-- rotates here!
'babka',
'banoffee',
'engender',
'karpatka',
'othellolagkage',
]
Write a function for finding the index of the "rotation point," which is where I started working from the beginning of the dictionary. This list is huge (there are lots of words I don't know) so we want to be efficient here.
"""
from utils.decorators import time_this
@time_this
def solution(inputs):
first_word = words[0]
floor_index = 0
ceiling_index = len(words) - 1
while floor_index < ceiling_index:
# Guess a point halfway between floor and ceiling
guess_index = floor_index + ((ceiling_index - floor_index) / 2)
# If guess comes after first word or is the first word
if words[guess_index] >= first_word:
# Go right
floor_index = guess_index
else:
# Go left
ceiling_index = guess_index
# If floor and ceiling have converged
if floor_index + 1 == ceiling_index:
# Between floor and ceiling is where we flipped to the beginning
# so ceiling is alphabetically first
return ceiling_index
test_case_inputs = [
]
```
#### File: algo_study_helper/problems/number_of_1_bits.py
```python
INSTRUCTIONS = """
Thanks leetcode: https://leetcode.com/problems/number-of-1-bits/description/
Write a function that takes an unsigned integer and returns the number of ’1' bits it has (also known as the Hamming weight).
For example, the 32-bit integer ’11' has binary representation 00000000000000000000000000001011, so the function should return 3
"""
from utils.decorators import time_this
@time_this
def solution(inputs):
n = inputs
count = 0
while n:
# adds 1 to count if right most bit is 1
count += n & 1
# shift bits to right by 1
n >>= 1
return count
import random
test_case_inputs = [random.randrange(1, 10000000) for _ in range(10000)]
``` |
{
"source": "johnshiver/football_tools",
"score": 2
} |
#### File: management/commands/calculate_weekly_scores.py
```python
from django.core.management.base import BaseCommand
from ...models import WeeklyStats, Player
class Command(BaseCommand):
def handle(self, *args, **options):
weekly_stats = WeeklyStats.objects.all()
for stat in weekly_stats:
stat.total_score = stat.calc_total_score()
stat.save()
print stat
for player in Player.objects.all():
player.calculate_draft_bot_score()
```
#### File: management/commands/fill_fantasy_draft_stats.py
```python
import csv
from django.core.management.base import BaseCommand
from ...models import Player
class Command(BaseCommand):
def handle(self, *args, **options):
with open("fantasy_stat.csv") as stat_file:
fantasy_stats = csv.reader(stat_file)
name = 1
team = 3
avg_adp_ = 7
std_dev_ = 8
i = 0
for stat in fantasy_stats:
i += 1
if i > 5:
first_name, last_name = tuple(stat[name].split())[:2]
avg_adp = float(stat[avg_adp_])
std_dev = float(stat[std_dev_])
team_abrv = stat[team]
try:
player = Player.objects.filter(first_name=first_name,
last_name=last_name)
if len(player) > 1:
player = player.filter(team__abrv=team_abrv)
elif len(player) < 1:
print "{} {} not found".format(first_name,
last_name)
continue
except Exception as e:
print "There was an error {} fetching {} {}".format(e,
first_name,
last_name)
else:
player = player.get()
player.avg_adp = avg_adp
player.std_dev = std_dev
print player, player.avg_adp
player.save()
``` |
{
"source": "johnshiver/vote_location",
"score": 2
} |
#### File: management/commands/report_district_url_stats.py
```python
import requests
from django.core.management.base import BaseCommand, CommandError
from districts.models import DistrictDetail
class Command(BaseCommand):
def handle(self, *args, **kwargs):
all_districts = DistrictDetail.objects.all()
good, bad = 0, 0
for district in all_districts:
r1 = requests.get(district.district_ballotpedia_url)
r2 = requests.get(district.politician_ballotpedia_url)
if not 200 <= r1.status_code < 300:
print(district.district_ballotpedia_url)
bad += 1
else:
good += 1
if not 200 <= r2.status_code < 300:
print(district.politician_ballotpedia_url)
bad += 1
else:
good += 1
print("Total good urls: {}".format(good))
print("Total bad urls: {}".format(bad))
``` |
{
"source": "JohnShullTopDev/generating-traning-data-for-healthcare-machine-learningcare-",
"score": 3
} |
#### File: generating-traning-data-for-healthcare-machine-learningcare-/bin/blood_pressure.py
```python
from patient import Patient
def BloodPressure(bp, prefix=""):
"""Generates BloodPressure Observation JSON object"""
if "id" not in bp:
raise BaseException("BloodPressure requires 'id'!")
if "pid" not in bp:
raise BaseException("BloodPressure requires 'pid'!")
if prefix:
prefix += "-"
patient = Patient.mpi[bp["pid"]]
out = {
"resourceType": "Observation",
"id": prefix + bp["id"],
"status": "final",
"effectiveDateTime": bp["date"],
"subject": {
"reference": "Patient/" + prefix + bp["pid"]
},
"text": {
"status": "generated",
"div": '<div xmlns="http://www.w3.org/1999/xhtml">' +
"%s: Blood pressure %s/%s mmHg</div>" %
(bp["date"], bp["systolic"], bp["diastolic"])
},
"category": [
{
"coding": [
{
"system" : "http://hl7.org/fhir/observation-category",
"code" : "vital-signs",
"display": "Vital Signs"
}
],
"text": "Vital Signs"
}
],
"code": {
"coding": [
{
"system" : "http://loinc.org",
"code" : "55284-4",
"display": "Blood pressure"
}
],
"text": "Blood pressure"
},
"performer": [
{
"reference": "Practitioner/" + prefix + "Practitioner-" + patient.gp
}
],
"component": [
{
"code": {
"coding": [
{
"system" : "http://loinc.org",
"code" : "8480-6",
"display": "Systolic blood pressure"
}
],
"text": "Systolic blood pressure"
},
"valueQuantity": {
"value": bp["systolic"],
"unit": "mmHg",
"system": "http://unitsofmeasure.org",
"code": "mm[Hg]"
}
},
{
"code": {
"coding": [
{
"system" : "http://loinc.org",
"code" : "8462-4",
"display": "Diastolic blood pressure"
}
],
"text": "Diastolic blood pressure"
},
"valueQuantity": {
"value": bp["diastolic"],
"unit": "mmHg",
"system": "http://unitsofmeasure.org",
"code": "mm[Hg]"
}
}
]
}
# Encounter
if "encounter_id" in bp:
out["context"] = {
"reference": "Encounter/%s"%bp["encounter_id"]
}
# bodySite
if "site_system" in bp and "site_code" in bp and "site" in bp:
out["bodySite"] = {
"coding": [
{
"system" : bp["site_system"],
"code" : bp["site_code"],
"display": bp["site"]
}
],
"text": bp["site"]
}
# bodySite
if "method_system" in bp and "method_code" in bp and "method" in bp:
out["method"] = {
"coding": [
{
"system" : bp["method_system"],
"code" : bp["method_code"],
"display": bp["method"]
}
],
"text": bp["method"]
}
# position
if "position" in bp and "position_system" in bp and "position_code" in bp:
out["extension"] = [
{
"url": "http://fhir-registry.smarthealthit.org/StructureDefinition/vital-signs#position",
"valueCodeableConcept" : {
"coding": [
{
"system": bp["position_system"],
"code" : bp["position_code"],
"display": bp["position"]
}
],
"text": bp["position"]
}
}
]
return out
```
#### File: generating-traning-data-for-healthcare-machine-learningcare-/bin/encounter.py
```python
def Encounter(data, prefix=""):
"""Generates an Encounter JSON"""
if prefix:
prefix += "-"
resource = {
"resourceType": "Encounter",
"id": data["id"],
"status": "finished",
"class": {
"code": data["encounter_type"]
},
"text": {
"status": "generated",
"div": '<div xmlns="http://www.w3.org/1999/xhtml">' +
data["start_date"] +": " + data["encounter_type"] +
" encounter</div>"
},
"type": [
{
"coding": [
{
"system": "http://snomed.info",
"code": "270427003",
"display": "Patient-initiated encounter"
}
],
"text": "Patient-initiated " + data["encounter_type"] + " encounter"
}
],
"subject": {
"reference": "Patient/" + prefix + data["pid"]
},
"period": {
"start": data["start_date"],
"end": data["end_date"]
}
}
return resource
```
#### File: generating-traning-data-for-healthcare-machine-learningcare-/bin/generate.py
```python
import argparse
# import sys
import os
from patient import Patient
from vitals import VitalSigns
from lab import Lab
from socialhistory import SocialHistory
from familyhistory import FamilyHistory
from immunization import Immunization
from procedure import Procedure
from condition import Condition
from med import Med
from refill import Refill
from document import Document
from allergy import Allergy
from clinicalnote import ClinicalNote
from practitioner import Practitioner
def initData():
"""Load data and mappings from Raw data files and mapping files"""
Patient.generate()
Patient.load()
VitalSigns.load()
Lab.load()
Procedure.load()
Immunization.load()
FamilyHistory.load()
SocialHistory.load()
Condition.load()
Med.load()
Refill.load()
Document.load()
Allergy.load()
ClinicalNote.load()
Practitioner.load()
def displayPatientSummary(pid):
"""writes a patient summary to stdout"""
if not pid in Patient.mpi: return
print (Patient.mpi[pid].asTabString())
print ("PROBLEMS: ")
# if not pid in Problem.problems: print "None",
# else:
# for prob in Problem.problems[pid]: print prob.name+"; ",
# print "\nMEDICATIONS: ",
# if not pid in Med.meds: print "None",
# else:
# for med in Med.meds[pid]:
# print med.name+"{%d}; "%len(Refill.refill_list(pid,med.rxn)),
# print "\nLABS: ",
# if not pid in Lab.results: print "None",
# else:
# print "%d results"%len(Lab.results[pid])
print (",\n")
if __name__ == '__main__':
# Create the parser
PARSER = argparse.ArgumentParser(description='SMART on FHIR Test Data Generator')
GROUP = PARSER.add_mutually_exclusive_group()
GROUP.add_argument(
'--summary',
metavar='pid',
nargs='?',
const="all",
help="displays patient summary (default is 'all')"
)
GROUP.add_argument(
'--write-fhir',
dest='writeFHIR',
metavar='dir',
nargs='?',
const='.',
help="writes patient XML files to an FHIR sample data directory dir (default='.')"
)
PARSER.add_argument(
'--id-prefix',
dest='prefix',
metavar='id_prefix',
nargs='?',
const='',
help="adds the given prefix to the FHIR resource IDs (default=none)"
)
PARSER.add_argument(
'--base-url',
dest='baseURL',
metavar='base_url',
nargs='?',
const='',
help="uses the supplied URL base to generate absolute resource references (default='')"
)
ARGS = PARSER.parse_args()
# print summary ------------------------------------------------------------
if ARGS.summary:
initData()
if ARGS.summary == "all": # Print a summary of all patients
for pid in Patient.mpi:
displayPatientSummary(pid)
else: # Just print a single patient's summary
displayPatientSummary(ARGS.summary)
PARSER.exit()
if ARGS.writeFHIR:
import fhir
print ("Writing files to %s:"%ARGS.writeFHIR)
initData()
path = ARGS.writeFHIR
baseURL = ARGS.baseURL or ""
if not os.path.exists(path):
PARSER.error("Invalid path: '%s'.Path must already exist."%path)
if ARGS.prefix:
prefix = ARGS.prefix
else:
prefix = None
for pid in Patient.mpi:
fhir.FHIRSamplePatient(pid, path, baseURL).writePatientData(prefix)
# Show progress with '.' characters
# print "%s %s - %s" % (baseURL, prefix, pid)
# sys.stdout.flush()
# PARSER.exit(0, "\nDone writing %d patient FHIR files!\n"%len(Patient.mpi))
PARSER.exit()
PARSER.error("No arguments given")
```
#### File: generating-traning-data-for-healthcare-machine-learningcare-/bin/socialhistory.py
```python
import csv
from testdata import SOCIALHISTORY_FILE
from testdata import rndDate
from patient import Patient
SMOKINGCODES = {
'428041000124106': 'Current some day smoker',
'266919005' : 'Never smoker',
'449868002' : 'Current every day smoker',
'266927001' : 'Unknown if ever smoked',
'8517006' : 'Former smoker'
}
class SocialHistory(object):
"""Create instances of SocialHistory; also maintains socialHistory by patient id"""
socialHistories = {} # Dictionary of socialHistory by patient ID
@classmethod
def load(cls):
"""Loads patient SocialHistory"""
# Loop through socialHistories and build patient socialHistory lists:
histories = csv.reader(open(SOCIALHISTORY_FILE, 'U'), dialect='excel-tab')
header = next(histories)
for history in histories:
cls(dict(zip(header, history))) # Create a socialHistory instance
def __init__(self, p):
self.pid = p['PID']
self.id = p['ID']
self.smokingStatusCode = p['SMOKINGSTATUSCODE']
self.smokingStatusText = SMOKINGCODES[self.smokingStatusCode]
# Append socialHistory to the patient's socialHistory list:
if self.pid in self.__class__.socialHistories:
raise "Found >1 socialHistory for a patient"
else:
self.__class__.socialHistories[self.pid] = self
def toJSON(self, prefix=""):
if prefix:
prefix += "-"
patient = Patient.mpi[self.pid]
return {
"request": {
"method": "PUT",
"url": "Observation/" + prefix + "smokingstatus-" + self.id
},
"resource": {
"id": prefix + "smokingstatus-" + self.id,
"resourceType": "Observation",
"status": "final",
"identifier": [
{
"use" : "official",
"system": "http://www.bmc.nl/zorgportal/identifiers/observations",
"value" : prefix + self.id
}
],
"text": {
"status": "generated",
"div": '<div xmlns="http://www.w3.org/1999/xhtml">' +
'Tobacco smoking status: %s</div>'%self.smokingStatusText
},
"performer": [
{
"reference": "Practitioner/" + prefix + "Practitioner-" + patient.gp
}
],
"effectiveDateTime": rndDate(2016).isoformat(),
"code": {
"coding": [
{
"system" : "http://loinc.org",
"code" : "72166-2",
"display": "Tobacco smoking status"
}
],
"text": "Tobacco smoking status"
},
"subject": {
"reference": "Patient/" + prefix + self.pid
},
"category": [
{
"coding": [
{
"system" : "http://hl7.org/fhir/observation-category",
"code" : "social-history",
"display": "Social History"
}
],
"text": "Social History"
}
],
"valueCodeableConcept": {
"coding": [
{
"system" : "http://snomed.info/sct",
"code" : self.smokingStatusCode,
"display": self.smokingStatusText
}
],
"text": self.smokingStatusText
}
}
}
``` |
{
"source": "johnshumon/fastapi-boilerplate",
"score": 2
} |
#### File: handlers/v1/home.py
```python
from typing import Any
from fastapi import APIRouter
router = APIRouter()
@router.get("")
async def read_root() -> Any:
"""Main handler for the root path"""
return {"message": "Hello FastAPI!"}
```
#### File: handlers/v1/token.py
```python
from typing import Any
from fastapi import APIRouter
from app.auth import generate_token
from app.schemas import CreateToken, CreateTokenResponse
router = APIRouter()
@router.post("", response_model=CreateTokenResponse, status_code=201)
async def create_token(body: CreateToken) -> Any:
"""
JWT handler.
Takes user details and uses user-email to create a
JWT as a access token.
"""
access_token = generate_token(body.email)
return {"access_token": access_token}
```
#### File: handlers/v1/user.py
```python
from typing import Any
from fastapi import APIRouter, Depends, Response, status
from sqlalchemy.orm import Session
from app.auth import generate_token
from app.crud.user import user
from app.db.session import db_connection
from app.schemas import CreateUser, CreateUserResponse
router = APIRouter()
@router.post("", response_model=CreateUserResponse, status_code=201)
async def create_user(userdata: CreateUser, response: Response, db: Session = Depends(db_connection)) -> Any:
"""
User signup handler
Takes user details, and uses user-email to create a
JWT as a access token.
"""
try:
user.create(userdata, db)
access_token = generate_token(userdata.email)
return {
"status": True,
"message": "Successfully created the user!",
"access_token": access_token,
}
except Exception as err:
# TODO: replace print with logger
print("-----------------------------")
print("Error: ", err)
print("-----------------------------")
response = status.HTTP_400_BAD_REQUEST
return {
"status": False,
"message": "Could not create the user.",
"access_token": "",
}
```
#### File: app/db/session.py
```python
from typing import Any, Generator
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from app.core import settings
engine = create_engine(settings.DB_URL, pool_pre_ping=True)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
def initialise() -> bool:
"""
Database connection initializer.
"""
try:
if engine.connect():
return True
except Exception as err:
print("-----------------------------")
print("Err: ", err)
print("-----------------------------")
raise err
def db_connection() -> Generator:
"""
Database connection generator.
Returns a generator object if engine connects successfully.
"""
try:
if initialise():
db = SessionLocal()
yield db
finally:
db.close()
```
#### File: fastapi-boilerplate/app/server.py
```python
import uvicorn
from fastapi import FastAPI
from app.api.handlers.v1 import api_router
from app.core import settings
log_config = uvicorn.config.LOGGING_CONFIG
app = FastAPI(title=settings.PROJECT_NAME)
app.include_router(api_router, prefix=settings.API_V1_STR)
def init_server() -> None:
"""
Configure logging and log format.
Spin up the server.
"""
# Log format
log_config["formatters"]["access"][
"fmt"
] = "[%(asctime)s.%(msecs)d] | %(levelname)s | [%(name)s:%(filename)s:%(lineno)d] - %(message)s"
log_config["formatters"]["default"][
"fmt"
] = "[%(asctime)s.%(msecs)d] | %(levelname)s | [%(name)s:%(filename)s:%(lineno)d] - %(message)s"
# Date format
date_fmt = "%Y-%m-%d:%H:%M:%S"
log_config["formatters"]["default"]["datefmt"] = date_fmt
log_config["formatters"]["access"]["datefmt"] = date_fmt
# Run the server
uvicorn.run(
"app.server:app", log_config=log_config, host="0.0.0.0", port=9000, reload=True
)
``` |
{
"source": "johnsimcall/openshift4-deploy",
"score": 2
} |
#### File: openshift4-deploy/app/bundle.py
```python
import logging
import os
import subprocess
from . import BASE_DIR, OpenShiftBase, slugify
logger = logging.getLogger(__name__)
class OpenShiftBundle(OpenShiftBase):
def __init__(self, vars_file, bundle_dir=None):
super().__init__(vars_file)
# Capture bundle directory structure
if bundle_dir:
self.bundle_dir = os.path.join(bundle_dir, self.openshift_version)
else:
self.bundle_dir = os.path.join(BASE_DIR, 'bundle', self.openshift_version)
self.bundle_dirs = {
'images': os.path.join(self.bundle_dir, 'images'),
'release': os.path.join(self.bundle_dir, 'release'),
'rhcos': os.path.join(self.bundle_dir, 'rhcos'),
}
self.container_volumes = [
(self.bundle_dir, '/mnt/bundle'),
]
self.required_images = [
'docker.io/library/registry:2',
self.container_image,
]
def _export_required_images(self):
"""
Export the required images.
"""
for image in self.required_images:
image_archive_filename = os.path.join(
self.bundle_dirs['images'],
'{}.tar'.format(slugify(image))
)
if not os.path.exists(image_archive_filename):
logger.info('Exporting {}'.format(image))
if image.startswith('localhost'):
subprocess.call([
'podman',
'save',
'--format', 'docker-archive',
'--output', image_archive_filename,
image
])
else:
subprocess.call([
'skopeo',
'copy',
'docker://{}'.format(image),
'docker-archive:{}'.format(image_archive_filename)
])
logging.info('Exported image to {}'.format(
image_archive_filename))
else:
logger.info('{} already exported to {}'.format(
image, image_archive_filename))
logger.debug('Exporting required images complete')
def _export_release(self, playbook_args=[]):
"""
Execute playbook to export release images.
"""
logger.info('Run playbook to export release images')
subprocess.call(
self._playbook_run_command(
'playbooks/export_release.yml',
playbook_args=playbook_args,
volumes=self.container_volumes,
)
)
logger.debug('Playbook to export release images complete')
def _export_rhcos(self, playbook_args=[]):
"""
Execute playbook to export RHCOS.
"""
logger.info('Run playbook to export RHCOS')
subprocess.call(
self._playbook_run_command(
'playbooks/export_rhcos.yml',
playbook_args=playbook_args,
volumes=self.container_volumes,
)
)
logger.debug('Playbook to export RHCOS complete')
def _export_create_tar(self):
"""
Create tar file of bundle
"""
logger.info('Create tar file of bundle content')
cmd = [
'tar',
'--directory', self.bundle_dir,
'--create',
'--verbose',
'--file', os.path.join(
self.bundle_dir,
'bundle.tar'
),
] + [os.path.basename(i) for i in self.bundle_dirs.values()]
subprocess.call(cmd)
logger.debug('Creating tar file of bundle content complete')
def export_bundle(self, playbook_args=[]):
"""
Export bundle content for an air-gapped cluster.
"""
self._build_container_if_needed()
# Create directory structure required for export
logger.debug('Creating bundle directory structure')
for i in self.bundle_dirs.values():
os.makedirs(i, mode=0o755, exist_ok=True)
self._export_required_images()
self._export_release(playbook_args)
self._export_rhcos(playbook_args)
self._export_create_tar()
print()
print('The bundle has completed exporting. You can find the bundle at:')
print()
print(os.path.join(self.bundle_dir, 'bundle.tar'))
print()
def import_bundle(self, bundle_file, playbook_args=[]):
"""
Import bundle content for an air-gapped cluster.
"""
pass
``` |
{
"source": "JohnSitarski/gestalt",
"score": 2
} |
#### File: amq/rpc/rpc-client.py
```python
import argparse
import asyncio
import logging
import random
from aio_pika.exceptions import DeliveryError
from gestalt.amq.requester import Requester
from gestalt.serialization import CONTENT_TYPE_JSON
from gestalt.runner import run
logger = logging.getLogger(__name__)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="AMQP RPC Client Example")
parser.add_argument(
"--amqp-url", metavar="<url>", type=str, default=None, help="The AMQP URL"
)
parser.add_argument(
"--exchange-name",
metavar="<name>",
type=str,
default="",
help="The AMQP exchange name. Defaults to a empty string which is the default exchange.",
)
parser.add_argument(
"--service-name",
metavar="<pattern>",
type=str,
default="clock-service",
help="The service name. Defaults to 'clock-service'.",
)
parser.add_argument(
"--log-level",
type=str,
choices=["debug", "info", "error"],
default="error",
help="Logging level. Default is 'error'.",
)
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s.%(msecs)03.0f [%(levelname)s] [%(name)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=getattr(logging, args.log_level.upper()),
)
requester = Requester(
amqp_url=args.amqp_url,
exchange_name=args.exchange_name,
service_name=args.service_name,
serialization=CONTENT_TYPE_JSON,
)
async def message_requester(r: Requester) -> None:
""" Generate a new request message, in various formats, and publish it """
counter = 0
while True:
counter += 1
request_msg = dict(sequence_number=counter, utc=True)
# For demonstration purposes randomly choose to use an invalid
# service name to show that the message gets returned and raises
# a DeliveryError exception.
service_name = (
r.service_name if random.random() < 0.8 else "invalid_service_name"
)
try:
logger.info(f"Sending request {request_msg} to {service_name}")
response_msg = await r.request(
request_msg, expiration=2, service_name=service_name
)
logger.info(f"Received response: {response_msg}")
except asyncio.TimeoutError as exc:
logger.info(f"Request was timed-out: {exc}")
except asyncio.CancelledError as exc:
logger.info(f"Request was cancelled: {exc}")
except DeliveryError as exc:
logger.info(f"Request delivery error: {exc}")
# Wait some time before sending another request
await asyncio.sleep(3)
async def start_requesting(r):
await r.start()
await asyncio.sleep(1)
asyncio.get_event_loop().create_task(message_requester(r))
run(start_requesting(requester), finalize=requester.stop)
```
#### File: stream/linereceiver/delimited.py
```python
import logging
from gestalt.stream.endpoint import StreamClient
from gestalt.stream.endpoint import StreamServer
from gestalt.stream.protocols.base import BaseStreamProtocol
logger = logging.getLogger(__name__)
class LineDelimitedStreamProtocol(BaseStreamProtocol):
def __init__(
self,
on_message=None,
on_peer_available=None,
on_peer_unavailable=None,
delimiter=b"\n",
**kwargs,
):
super().__init__(
on_message=on_message,
on_peer_available=on_peer_available,
on_peer_unavailable=on_peer_unavailable,
)
self.delimiter = delimiter
self._buffer = bytearray()
def send(self, data: bytes, **kwargs):
""" Sends a message by writing it to the transport.
Messages with zero bytes are not sent as they are considered invalid.
:param data: a bytes object containing the message payload.
"""
if not isinstance(data, bytes):
logger.error(f"data must be bytes - can't send message. data={type(data)}")
return
if data:
logger.debug(f"Sending msg with {len(data)} bytes")
self.transport.write(data + self.delimiter)
def data_received(self, data):
""" Process some bytes received from the transport.
Upon receiving some bytes from the stream they are added to a buffer
and then an attempt is made to extract any messages in the buffer
by splitting on the delimiter.
This method should support the worst case scenario of receiving a
single byte at a time, however, a more likely scenario is receiving
one or more messages at once.
"""
self._buffer.extend(data)
msgs = self._buffer.split(self.delimiter)
# When the buffer ends with a delimiter the split produces an empty
# bytearray. Discard the trailing empty element and clear the buffer.
if msgs[-1] == bytearray():
self._buffer.clear()
msgs.pop(-1)
else:
# A partial msg was in buffer, return it to the buffer.
self._buffer = msgs.pop(-1)
for msg in msgs:
try:
if self._on_message_handler:
self._on_message_handler(self, self._identity, msg)
except Exception:
logger.exception("Error in on_message callback method")
class LineDelimitedStreamServer(StreamServer):
protocol_class = LineDelimitedStreamProtocol
class LineDelimitedStreamClient(StreamClient):
protocol_class = LineDelimitedStreamProtocol
```
#### File: JohnSitarski/gestalt/setup.py
```python
import pathlib
import re
import sys
from setuptools import setup, find_packages
assert sys.version_info >= (3, 6, 0), "gestalt requires Python 3.6+"
THIS_DIR = pathlib.Path(__file__).parent
def get_version() -> str:
init_file = THIS_DIR / "src" / "gestalt" / "__init__.py"
version_re = re.compile(r".*__version__\s=\s+[\'\"](?P<version>.*?)[\'\"]")
with open(init_file, "r", encoding="utf8") as init_fd:
match = version_re.search(init_fd.read())
if match:
version = match.group("version")
else:
raise RuntimeError(f"Cannot find __version__ in {init_file}")
return version
def get_long_description() -> str:
readme_file = THIS_DIR / "README.md"
with open(readme_file, encoding="utf8") as fd:
readme = fd.read()
changes_file = THIS_DIR / "CHANGELOG.md"
with open(changes_file, encoding="utf8") as fd:
changes = fd.read()
return "\n\n".join([readme, changes])
def get_requirements(requirements_file: str) -> str:
with open(requirements_file, encoding="utf8") as fd:
requirements = []
for line in fd.read().split("\n"):
line = line.strip()
if line and not line.startswith("#"):
requirements.append(line)
return requirements
if __name__ == "__main__":
setup(
name="gestalt",
description="gestalt is a Python application framework for building distributed systems",
long_description=get_long_description(),
long_description_content_type="text/markdown",
license="MIT license",
url="https://github.com/claws/gestalt",
version=get_version(),
author="<NAME>",
python_requires=">=3.6",
install_requires=get_requirements(THIS_DIR / "requirements.txt"),
package_dir={"": "src"},
packages=find_packages("src"),
extras_require={
"develop": get_requirements(THIS_DIR / "requirements.dev.txt"),
"amq": ["aio_pika"],
"protobuf": ["protobuf"],
"yaml": ["PyYAML"],
"avro": ["avro-python3"],
"msgpack": ["msgpack-python"],
"snappy": ["python-snappy"],
"brotli": ["brotli"],
},
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
],
keywords=["gestalt", "framework", "communications"],
)
```
#### File: src/gestalt/compression.py
```python
import abc
import zlib
from collections import namedtuple
from typing import Any, Callable, Dict, Optional, Tuple, Union
try:
import bz2
have_bz2 = True
except ImportError:
have_bz2 = False
try:
import lzma
have_lzma = True
except ImportError:
have_lzma = False
try:
import brotli
have_brotli = True
except ImportError:
have_brotli = False
try:
import snappy
have_snappy = True
except ImportError:
have_snappy = False
CodecType = Callable[[bytes], bytes]
COMPRESSION_NONE = "none"
COMPRESSION_GZIP = "applications/x-gzip"
COMPRESSION_BZ2 = "applications/x-bz2"
COMPRESSION_LZMA = "applications/x-lzma"
COMPRESSION_BROTLI = "applications/x-brotli"
COMPRESSION_SNAPPY = "application/x-snappy"
COMPRESSION_ZLIB = "application/zlib"
COMPRESSION_DEFLATE = "application/deflate"
codec = namedtuple("codec", ("content_type", "compressor"))
class ICompressor(abc.ABC):
"""
This class represents the base interface for a compressor.
"""
@abc.abstractmethod # pragma: no branch
def compress(self, data):
""" Returns compressed data """
@abc.abstractmethod # pragma: no branch
def decompress(self, data):
""" Returns decompressed data """
class CompressorRegistry:
""" This registry keeps track of compression strategies.
A convenience name or the specific content-type string can be used to
reference a specific compressor that is capable of encoding and decoding.
"""
def __init__(self) -> None:
self._compressors = {} # type: Dict[Optional[str], codec]
self._default_codec = None # type: Optional[str]
self.type_to_name = {} # type: Dict[Optional[str], Optional[str]]
self.name_to_type = {} # type: Dict[Optional[str], Optional[str]]
def register(
self,
name: Union[str, None],
compressor: ICompressor,
content_type: Optional[str],
) -> None:
""" Register a new encoder/decoder.
:param name (str): A convenience name for the compression method.
:param compressor: An object that implements the ICompressor interface
that can compress and decompress data back into the original object.
:param content_type (str): The mime-type describing the serialized
structure.
"""
if not isinstance(compressor, ICompressor):
raise Exception(
f"Invalid compressor '{name}'. Expected an instance of ICompressor"
)
self._compressors[name] = codec(content_type, compressor)
# map convenience name to mime-type and back again.
self.type_to_name[content_type] = name
self.name_to_type[name] = content_type
def set_default(self, name_or_type: Optional[str]) -> None:
""" Set the default compression method used by this library.
:param name_or_type: The convenience name or the mime-type for the
compression strategy.
Raises:
Exception: If the name_or_type requested is not available.
"""
name, _content_type = self._resolve(name_or_type)
self._default_codec = name
@property
def compressors(self):
""" Return a dict of the available compressors (codecs) """
return self._compressors
def get_compressor(self, name_or_type: str):
""" Return a specific compressor.
:param name_or_type: The convenience name or the mime-type for the
compression strategy. The value may be the alias name (e.g. zlib)
or the mime-type (e.g. application/zlib).
:returns: A compressor object that can encode and decode bytes
using the named strategy.
"""
name, _content_type = self._resolve(name_or_type)
return self._compressors[name].compressor
def get_codec(self, name_or_type: str):
""" Return codec attributes for a specific compressor.
:param name_or_type: The convenience name or the mime-type for the
compression strategy. The value may be the alias name (e.g. zlib)
or the mime-type (e.g. application/zlib).
:returns: A codec named tuple.
"""
name, _content_type = self._resolve(name_or_type)
return self._compressors[name]
def compress(
self, data: Any, name_or_type: Optional[str] = None
) -> Tuple[Optional[str], bytes]:
""" Compress some data.
Compress data into a bytes object suitable for sending as a message body.
:param data: The message data to send.
:param name_or_type: The convenience name or the mime-type for the
compression strategy. The value may be the alias name (e.g. zlib)
or the mime-type (e.g. application/zlib). Defaults to none.
:returns: A tuple containing a string specifying the compression mime-type
(e.g. `application/gzip`) and a bytes object representing the compressed data.
Raises:
Exception: If the compression method requested is not available.
"""
name, content_type = self._resolve(name_or_type)
payload = self._compressors[name].compressor.compress(data)
return content_type, payload
def decompress(
self, data: bytes, name_or_type: Optional[str] = None, **kwargs
) -> Tuple[Optional[str], bytes]:
""" Decompress some data.
Decompress a data blob that was compressed using `compress` based on
`compression`.
:param data (bytes, buffer, str): The message data to decompress.
:param name_or_type: The convenience name or the mime-type for the
compression strategy. The value may be the alias name (e.g. zlib)
or the mime-type (e.g. application/zlib). Defaults to none.
Raises:
Exception: If the decompression method requested is not available.
Returns:
Any: Decompressed data.
"""
name, content_type = self._resolve(name_or_type)
payload = self._compressors[name].compressor.decompress(data)
return content_type, payload
def _resolve(
self, name_or_type: Optional[str]
) -> Tuple[Optional[str], Optional[str]]:
""" Resolve the compression name and mime-type.
:param name_or_type: The convenience name or the mime-type for the
compression strategy. The value may be the alias name (e.g. zlib)
or the mime-type (e.g. application/zlib).
Raises:
Exception: If the compression method requested is not available.
"""
if name_or_type in self.name_to_type:
name = name_or_type
content_type = self.name_to_type[name_or_type]
elif name_or_type in self.type_to_name:
content_type = name_or_type
name = self.type_to_name[content_type]
else:
raise Exception(f"Invalid compressor '{name_or_type}'")
return name, content_type
def register_none(reg: CompressorRegistry):
""" The compression you have when you don't want compression. """
class NoneCompressor(ICompressor):
def compress(self, data):
"""
Return data as a bytes object.
"""
if not isinstance(data, bytes):
raise Exception(f"Can only compress bytes, got {type(data)}")
return data
def decompress(self, data):
return data
compressor = NoneCompressor()
reg.register(None, compressor, None)
def register_zlib(reg: CompressorRegistry):
""" Register a compressor/decompressor for zlib compression. """
class ZlibCompressor(ICompressor):
def compress(self, data):
""" Create a RFC 1950 data format (zlib) compressor and compress
some data.
After calling flush the decompressor can't be used again. Hence,
a new decompressor is created for each use.
:return: data as a bytes object.
"""
if not isinstance(data, bytes):
raise Exception("Can only compress bytes, got {}".format(type(data)))
compressor = zlib.compressobj(level=9, wbits=zlib.MAX_WBITS)
data = compressor.compress(data) + compressor.flush()
return data
def decompress(self, data):
""" Create a RFC 1950 data format (zlib) decompressor and
decompress some data.
After calling flush the decompressor can't be used again. Hence,
a new decompressor is created for each use.
:return: data as a bytes object.
"""
decompressor = zlib.decompressobj(zlib.MAX_WBITS)
data = decompressor.decompress(data) + decompressor.flush()
return data
compressor = ZlibCompressor()
reg.register("zlib", compressor, COMPRESSION_ZLIB)
def register_deflate(reg: CompressorRegistry):
""" Register a compressor/decompressor for deflate compression. """
class DeflateCompressor(ICompressor):
def compress(self, data):
""" Create a RFC 1951 data format (deflate) compressor and compress
some data.
After calling flush the decompressor can't be used again. Hence,
a new decompressor is created for each use.
:return: data as a bytes object.
"""
if not isinstance(data, bytes):
raise Exception("Can only compress bytes, got {}".format(type(data)))
compressor = zlib.compressobj(level=9, wbits=-zlib.MAX_WBITS)
data = compressor.compress(data) + compressor.flush()
return data
def decompress(self, data):
""" Create a RFC 1951 data format (deflate) decompressor and
decompress some data.
After calling flush the decompressor can't be used again. Hence,
a new decompressor is created for each use.
:return: data as a bytes object.
"""
decompressor = zlib.decompressobj(-zlib.MAX_WBITS)
data = decompressor.decompress(data) + decompressor.flush()
return data
compressor = DeflateCompressor()
reg.register("deflate", compressor, COMPRESSION_DEFLATE)
def register_gzip(reg: CompressorRegistry):
""" Register a compressor/decompressor for gzip compression. """
class GzipCompressor(ICompressor):
def compress(self, data):
""" Create a RFC 1952 data format (gzip) compressor and compress
some data.
After calling flush the compressor can't be used again. Hence,
a new compressor is created for each use.
:return: data as a bytes object.
"""
if not isinstance(data, bytes):
raise Exception(f"Can only compress bytes, got {type(data)}")
compressor = zlib.compressobj(level=9, wbits=zlib.MAX_WBITS | 16)
data = compressor.compress(data) + compressor.flush()
return data
def decompress(self, data):
""" Create a RFC 1952 data format (gzip) decompressor and
decompress some data.
After calling flush the decompressor can't be used again. Hence,
a new decompressor is created for each use.
:return: data as a bytes object.
"""
decompressor = zlib.decompressobj(zlib.MAX_WBITS | 16)
data = decompressor.decompress(data) + decompressor.flush()
return data
compressor = GzipCompressor()
reg.register("gzip", compressor, COMPRESSION_GZIP)
def register_bz2(reg: CompressorRegistry):
""" Register a compressor/decompressor for bz2 compression. """
if have_bz2:
class Bz2Compressor(ICompressor):
def compress(self, data):
""" Create a bz2 compressor and compress some data.
After calling flush the compressor can't be used again. Hence,
a new compressor is created for each use.
:return: data as a bytes object.
"""
if not isinstance(data, bytes):
raise Exception(f"Can only compress bytes, got {type(data)}")
compressor = bz2.BZ2Compressor()
data = compressor.compress(data) + compressor.flush()
return data
def decompress(self, data):
""" Create a bz2 decompressor and decompress some data.
:return: data as a bytes object.
"""
decompressor = bz2.BZ2Decompressor()
data = decompressor.decompress(data)
return data
compressor = Bz2Compressor()
reg.register("bzip2", compressor, COMPRESSION_BZ2)
def register_lzma(reg: CompressorRegistry):
""" Register a compressor/decompressor for lzma compression. """
if have_lzma:
class LzmaCompressor(ICompressor):
def compress(self, data):
""" Create a lzma compressor and compress some data.
After calling flush the compressor can't be used again. Hence,
a new compressor is created for each use.
:return: data as a bytes object.
"""
if not isinstance(data, bytes):
raise Exception(f"Can only compress bytes, got {type(data)}")
compressor = lzma.LZMACompressor()
data = compressor.compress(data) + compressor.flush()
return data
def decompress(self, data):
""" Create a lzma decompressor and decompress some data.
:return: data as a bytes object.
"""
decompressor = lzma.LZMADecompressor()
data = decompressor.decompress(data)
return data
compressor = LzmaCompressor()
reg.register("lzma", compressor, COMPRESSION_LZMA)
def register_brotli(reg: CompressorRegistry):
""" Register a compressor/decompressor for brotli compression. """
if have_brotli:
class BrotliCompressor(ICompressor):
def compress(self, data):
""" Compress data using a brotli compressor.
:return: data as a bytes object.
"""
if not isinstance(data, bytes):
raise Exception(f"Can only compress bytes, got {type(data)}")
return brotli.compress(data)
def decompress(self, data):
""" Decompress data using a brotli decompressor.
:return: data as a bytes object.
"""
return brotli.decompress(data)
compressor = BrotliCompressor()
reg.register("brotli", compressor, COMPRESSION_BROTLI)
def register_snappy(reg: CompressorRegistry):
""" Register a compressor/decompressor for snappy compression. """
if have_snappy:
class SnappyCompressor(ICompressor):
def compress(self, data):
""" Compress data using a snappy compressor.
:return: data as a bytes object.
"""
if not isinstance(data, bytes):
raise Exception(f"Can only compress bytes, got {type(data)}")
return snappy.compress(data)
def decompress(self, data):
""" Decompress data using a snappy decompressor.
:return: data as a bytes object.
"""
return snappy.uncompress(data)
compressor = SnappyCompressor()
reg.register("snappy", compressor, COMPRESSION_SNAPPY)
def initialize(reg: CompressorRegistry):
""" Register compression methods and set a default """
register_none(reg)
register_zlib(reg)
register_deflate(reg)
register_gzip(reg)
register_bz2(reg)
register_lzma(reg)
register_brotli(reg)
register_snappy(reg)
reg.set_default(None)
registry = CompressorRegistry()
compress = registry.compress
decompress = registry.decompress
initialize(registry)
```
#### File: datagram/protocols/base.py
```python
import asyncio
import binascii
import logging
import os
from typing import Tuple
logger = logging.getLogger(__name__)
class BaseDatagramProtocol(asyncio.DatagramProtocol):
""" Datagram protocol for an endpoint. """
def __init__(
self,
on_message=None,
on_peer_available=None,
on_peer_unavailable=None,
**kwargs,
):
"""
:param on_message: A callback function that will be passed each message
that the protocol receives.
:param on_peer_available: A callback function that will be called when
the protocol is connected with a transport. In this state the protocol
can send and receive messages.
:param on_peer_unavailable: A callback function that will be called when
the protocol has lost the connection with its transport. In this state
the protocol can not send or receive messages.
"""
self._on_message_handler = on_message
self._on_peer_available_handler = on_peer_available
self._on_peer_unavailable_handler = on_peer_unavailable
self._identity = b""
self._remote_address = None # type: Optional[Tuple[str, int]]
self._local_address = None # type: Optional[Tuple[str, int]]
self.transport = None
@property
def identity(self):
""" Return the protocol's unique identifier """
return self._identity
@property
def raddr(self) -> Tuple[str, int]:
""" Return the remote address the protocol is connected with """
return self._remote_address
@property
def laddr(self) -> Tuple[str, int]:
""" Return the local address the protocol is using """
return self._local_address
def connection_made(self, transport):
self.transport = transport
self._identity = binascii.hexlify(os.urandom(5))
self._local_address = transport.get_extra_info("sockname")
self._remote_address = transport.get_extra_info("peername")
logger.debug(f"UDP protocol connection made. id={self._identity}")
try:
if self._on_peer_available_handler:
self._on_peer_available_handler(self, self._identity)
except Exception:
logger.exception("Error in on_peer_available callback method")
def connection_lost(self, exc):
"""
Called by the event loop when the protocol is disconnected from a transport.
"""
logger.debug(f"UDP protocol connection lost. id={self._identity}")
try:
if self._on_peer_unavailable_handler:
self._on_peer_unavailable_handler(self, self._identity)
except Exception:
logger.exception("Error in on_peer_unavailable callback method")
self.transport = None
self._identity = None
self._local_address = None
def close(self):
""" Close this connection """
logger.debug(f"Closing connection. id={self._identity}")
if self.transport:
self.transport.close()
def send(self, data, addr=None, **kwargs):
"""
Send a message to a remote UDP endpoint by writing it to the transport.
:param data: a bytes object containing the message payload.
:param addr: The address of the remote endpoint as a (host, port)
tuple. If remote_addr was specified when the endpoint was created then
the addr is optional.
"""
if not isinstance(data, bytes):
logger.error(f"data must be bytes - can't send message. data={type(data)}")
return
self.transport.sendto(data, addr=addr)
def datagram_received(self, data, addr):
"""
Process a datagram received from the transport.
When passing a message up to the endpoint, the datagram protocol
passes the senders address as an extra kwarg.
:param data: The datagram payload
:param addr: A (host, port) tuple defining the source address
"""
try:
if self._on_message_handler:
self._on_message_handler(self, self._identity, data, addr=addr)
except Exception:
logger.exception("Error in on_message callback method")
def error_received(self, exc):
"""
In many conditions undeliverable datagrams will be silently dropped.
In some rare conditions the transport can sometimes detect that the
datagram could not be delivered to the recipient.
:param exc: an OSError instance.
"""
logger.error(f"Datagram error: {exc}")
```
#### File: datagram/protocols/netstring.py
```python
import logging
import struct
from .base import BaseDatagramProtocol
logger = logging.getLogger(__name__)
NETSTRING_HEADER_FORMAT = "I"
NETSTRING_HEADER_SIZE = struct.calcsize(NETSTRING_HEADER_FORMAT)
class NetstringDatagramProtocol(BaseDatagramProtocol):
"""
The netstring protocol implements a message framing strategy that
wraps a messages with a frame header that consists of a single
uint32 field containing a value that represents the number of bytes
in the payload.
.. code-block:: console
+-----------------+----------------------+
| header | payload |
+-----------------+----------------------+
| Message_Length | DATA .... |
| uint32 | |
+-----------------+----------------------+
Messages with a payload size of zero are invalid.
"""
def send(
self, data: bytes, addr=None, add_frame_header=True, **kwargs
): # pylint: disable=arguments-differ
"""
Send a message to a remote UDP endpoint by writing it to the transport.
:param data: a bytes object containing the message payload.
:param addr: The address of the remote endpoint as a (host, port)
tuple. If remote_addr was specified when the endpoint was created then
the addr is optional.
:param add_frame_header: A flag that informs the sending function
whether it needs to wrap the payload data with the frame header.
Defaults to True. This parameter should be set to False when sending
pre-formed messages - such as in a relay type application.
"""
if not isinstance(data, bytes):
logger.error(f"data must be bytes - can't send message. data={data}")
return
header = struct.pack(NETSTRING_HEADER_FORMAT, len(data))
msg = header + data
logger.debug(f"Sending msg with {len(msg)} bytes")
self.transport.sendto(msg, addr=addr)
def datagram_received(self, data, addr):
"""
Process a datagram received from the transport.
When passing a message up to the endpoint, the datagram protocol
passes the senders address as an extra kwarg.
:param data: The datagram payload
:param addr: A (host, port) tuple defining the source address
"""
# Remember that msg_len value represents the length of the payload,
# not the total message length which has the frame header too.
msg_len = struct.unpack(NETSTRING_HEADER_FORMAT, data[:NETSTRING_HEADER_SIZE])[
0
]
eom = NETSTRING_HEADER_SIZE + msg_len
msg = data[NETSTRING_HEADER_SIZE:eom]
try:
if self._on_message_handler:
self._on_message_handler(self, self._identity, msg, addr=addr)
except Exception:
logger.exception("Error in on_message callback method")
```
#### File: src/gestalt/serialization.py
```python
import abc
import io
import json
from collections import namedtuple
from typing import Any, Optional, Tuple
try:
from avro import io as avro_io
from avro import schema
have_avro = True
except ImportError:
have_avro = False
try:
import msgpack
have_msgpack = True
except ImportError:
have_msgpack = False
try:
import yaml
have_yaml = True
except ImportError:
have_yaml = False
try:
from google.protobuf.message import Message
from google.protobuf import symbol_database
have_protobuf = True
except ImportError:
have_protobuf = False
CONTENT_TYPE_AVRO = "application/x-avro"
CONTENT_TYPE_DATA = "application/data"
CONTENT_TYPE_JSON = "application/json"
CONTENT_TYPE_MSGPACK = "application/msgpack"
CONTENT_TYPE_PROTOBUF = "application/vnd.google.protobuf"
CONTENT_TYPE_TEXT = "text/plain"
CONTENT_TYPE_YAML = "application/yaml"
codec = namedtuple("codec", ("content_type", "content_encoding", "serializer"))
class ISerializer(abc.ABC):
"""
This class represents the base interface for a serializer.
"""
@abc.abstractmethod # pragma: no branch
def encode(self, data, **kwargs):
""" Returns serialized data as a bytes object. """
@abc.abstractmethod # pragma: no branch
def decode(self, data, **kwargs):
""" Returns deserialized data """
class SerializerRegistry:
""" This registry keeps track of serialization strategies.
A convenience name or the specific content-type string can be used to
reference a specific serializer that is capable of encoding and decoding.
"""
def __init__(self):
self._serializers = {}
self._default_codec = None
self.type_to_name = {}
self.name_to_type = {}
def register(
self,
name: Optional[str],
serializer: ISerializer,
content_type: str,
content_encoding: str = "utf-8",
):
""" Register a new serializer.
:param name: A convenience name for the serialization method.
:param serializer: An object that implements the ISerializer interface
that can encode objects and decode data back into the original object.
:param content_type: The mime-type describing the serialized structure.
:param content_encoding: The content encoding (character set) that
the decoder method will be returning. Will usually be `utf-8` or `binary`.
"""
if not isinstance(serializer, ISerializer):
raise Exception(
f"Invalid serializer '{name}'. Expected an instance of ISerializer"
)
self._serializers[name] = codec(content_type, content_encoding, serializer)
# map convenience name to mime-type and back again.
self.type_to_name[content_type] = name
self.name_to_type[name] = content_type
def set_default(self, name_or_type: str):
""" Set the default serialization method used by this library.
:param name_or_type: a string specifying the serialization strategy.
The string may be the alias name (e.g. json) or the mime-type
(e.g. application/json).
Raises:
Exception: If the serialization method requested is not available.
"""
self._default_codec = self._resolve(name_or_type)
@property
def serializers(self):
return self._serializers
def get_serializer(self, name_or_type: str):
""" Return a specific serializer.
:param name_or_type: a string specifying the serialization strategy
to apply to the data (e.g. ``json``). The string may be the
convenience name (e.g. json) or the mime-type (e.g. application/json).
:returns: A serializer object that can encode and decode bytes
using the named strategy.
"""
name = self._resolve(name_or_type)
return self._serializers[name].serializer
def get_codec(self, name_or_type: str):
""" Return codec attributes for a specific serializer.
:param name_or_type: a string specifying the serialization strategy
to apply to the data (e.g. ``json``). The string may be the
convenience name (e.g. json) or the mime-type (e.g. application/json).
:returns: A codec named tuple.
"""
name = self._resolve(name_or_type)
return self._serializers[name]
def dumps(
self, data: Any, name_or_type: str = None, **kwargs
) -> Tuple[Optional[str], str, bytes]:
""" Encode data.
Serialize a data structure into a bytes object suitable for sending
as a message body.
:param data: The message data to send.
:param name_or_type: A string representing the serialization strategy
to apply to the data (e.g. ``json``, etc). If not specified then a
best effort guess will be made. If data is a string then text will
be used, if the data is bytes then data will be used, otherwise the
default serializer will be used (JSON).
Keywords:
:param type_identifier: An integer that uniquely identifies a
registered message.
:returns: A string specifying the content type (e.g.,
`application/json`), a string specifying the content encoding, (e.g.
`utf-8`) and a three-item tuple containing the serialized data as
bytes.
Raises:
Exception: If the serialization method requested is not available.
"""
if name_or_type:
if name_or_type not in self._serializers:
raise Exception(f"Invalid serializer {name_or_type}, can't encode.")
content_type, content_encoding, serializer = self._serializers[name_or_type]
payload = serializer.encode(data, **kwargs)
else:
# Make a best guess based on data type
if isinstance(data, bytes):
content_type = CONTENT_TYPE_DATA
content_encoding = "binary"
payload = data
elif isinstance(data, str):
content_type = CONTENT_TYPE_TEXT
content_encoding = "utf-8"
payload = data.encode("utf-8")
else:
# Use the default encoder
content_type, content_encoding, serializer = self._serializers[
self._default_codec
]
payload = serializer.encode(data)
return content_type, content_encoding, payload
def loads(
self,
data: bytes,
content_type: Optional[str],
content_encoding: Optional[str],
**kwargs,
) -> Any:
""" Decode serialized data.
Deserialize a data blob that was serialized using `dumps` based on `content_type`.
:param data: The message data to deserialize.
:param content_type: The content-type of the data (e.g., application/json).
:param content_encoding: The content-encoding of the data. (e.g., utf-8,
binary). NOTE: This parameter is not currently used.
Keywords:
:param type_identifier: An integer that uniquely identifies a
registered message.
Raises:
Exception: If the serialization method requested is not available.
Returns:
The deserialized data.
"""
content_type = content_type if content_type else CONTENT_TYPE_DATA
# Currently the implementation only supports text data (text, json,
# yaml) as utf-8. If/when more is needed then the content_encoding
# parameter will need to be fed down into the serializers.
# content_encoding = (content_encoding or "utf-8").lower()
if data:
name = self._resolve(content_type)
_ct, _ce, serializer = self._serializers[name]
return serializer.decode(data, **kwargs)
return data
def _resolve(self, x: str) -> str:
""" Return a serializer alias string.
:param x: a string specifying the serialization strategy.
The string may be the alias name (e.g. json) or the mime-type
(e.g. application/json).
"""
if x in self.name_to_type: # pylint: disable=no-else-return
return x
elif x in self.type_to_name:
return self.type_to_name[x]
else:
raise Exception(f"Invalid serializer '{x}'")
def register_none(reg: SerializerRegistry):
""" The serialization you have when you don't want serialization. """
class NoneSerializer(ISerializer):
def encode(self, data, **kwargs):
""" Returns serialized data as a bytes object. """
if not isinstance(data, bytes):
raise Exception(f"Can only serialize bytes, got {type(data)}")
return data
def decode(self, data, **kwargs):
""" Returns deserialized data """
return data
serializer = NoneSerializer()
reg.register(
None, serializer, content_type=CONTENT_TYPE_DATA, content_encoding="binary"
)
def register_text(reg: SerializerRegistry) -> None:
""" Register an encoder/decoder for TEXT serialization. """
class TextSerializer(ISerializer):
def encode(self, data, **kwargs) -> bytes:
""" Encode a string and return a :class:`bytes` object.
:returns: a serialized message as a bytes object.
"""
return data.encode("utf-8")
def decode(self, data: bytes, **kwargs) -> str:
""" Decode *data* from :class:`bytes` to the original data structure.
:param data: a bytes object containing a serialized message.
:returns: A str object.
"""
return data.decode("utf-8")
serializer = TextSerializer()
reg.register(
"text", serializer, content_type=CONTENT_TYPE_TEXT, content_encoding="utf-8"
)
def register_json(reg: SerializerRegistry) -> None:
""" Register an encoder/decoder for JSON serialization. """
class JsonSerializer(ISerializer):
def encode(self, data: Any, **kwargs) -> bytes:
""" Encode an object into JSON and return a :class:`bytes` object.
:returns: a serialized message as a bytes object.
"""
return json.dumps(data).encode("utf-8")
def decode(self, data: bytes, **kwargs) -> str:
""" Decode *data* from :class:`bytes` to the original data structure.
:param data: a bytes object containing a serialized message.
:returns: A Python object.
"""
return json.loads(data if isinstance(data, str) else data.decode("utf-8"))
serializer = JsonSerializer()
reg.register(
"json", serializer, content_type=CONTENT_TYPE_JSON, content_encoding="utf-8"
)
def register_msgpack(reg: SerializerRegistry) -> None:
""" Register an encoder/decoder for MsgPack serialization. """
if have_msgpack:
class MsgpackSerializer(ISerializer):
"""
Must use the use_bin_type flag to ensure that str objects
are returned back as str objects. This avoids the well
known problem of msgpack 'raw' which returns str and bytes
objects as bytes.
"""
def encode(self, data: Any, **kwargs) -> bytes:
""" Encode an object into MsgPack and return a :class:`bytes` object.
:returns: a serialized message as a bytes object.
"""
return msgpack.packb(data, use_bin_type=True)
def decode(self, data: bytes, **kwargs) -> str:
""" Decode *data* from :class:`bytes` to the original data structure.
:param data: a bytes object containing a serialized message.
:returns: A Python object.
"""
return msgpack.unpackb(data, raw=False)
serializer = MsgpackSerializer()
reg.register(
"msgpack",
serializer,
content_type=CONTENT_TYPE_MSGPACK,
content_encoding="binary",
)
def register_yaml(reg: SerializerRegistry) -> None:
""" Register an encoder/decoder for YAML serialization.
It is slower than JSON, but allows for more data types to be serialized.
Useful if you need to send data such as dates.
"""
if have_yaml:
class YamlSerializer(ISerializer):
def encode(self, data: Any, **kwargs) -> bytes:
""" Encode an object into YAML and return a :class:`bytes` object.
:returns: a serialized message as a bytes object.
"""
return yaml.safe_dump(data).encode("utf-8")
def decode(self, data: bytes, **kwargs) -> str:
""" Decode *data* from :class:`bytes` to the original data structure.
:param data: a bytes object containing a serialized message.
:returns: A Python object.
"""
return yaml.safe_load(data.decode("utf-8"))
serializer = YamlSerializer()
reg.register(
"yaml", serializer, content_type=CONTENT_TYPE_YAML, content_encoding="utf-8"
)
def register_avro(reg: SerializerRegistry, schema_registry=None):
""" Register an encoder/decoder for Apache Avro serialization. """
if have_avro:
class SchemaRegistry:
def __init__(self):
self.id2schema = {} # type: Dict[int, schema.Schema]
self._id = 0
def register_message(self, obj: dict, type_identifier: int = None) -> int:
"""
:param obj: A message object to register.
:param type_identifier: An optional message type identifier to
use for the object. If not specified then a number will be
automatically assigned.
"""
if isinstance(obj, dict):
avro_schema = schema.SchemaFromJSONData(obj, schema.Names())
else:
avro_schema = obj
if type_identifier is None:
self._id += 1
type_identifier = self._id
self.id2schema[type_identifier] = avro_schema
return type_identifier
def get_schema_by_id(self, schema_identifier: int) -> int:
return self.id2schema[schema_identifier]
class AvroSerializer(ISerializer):
def __init__(self, schema_registry=None):
"""
:param schema_registry: A schema.Schema object populated with the
schemas that will be used.
"""
self.registry = schema_registry if schema_registry else SchemaRegistry()
def encode(
self, data, *, type_identifier: int = None, **kwargs
): # pylint: disable=arguments-differ
""" Encode an object into Avro and return a :class:`bytes` object.
:returns: a serialized message as a bytes object.
"""
avroSchema = self.registry.get_schema_by_id(type_identifier)
bytes_writer = io.BytesIO()
encoder = avro_io.BinaryEncoder(bytes_writer)
datum_writer = avro_io.DatumWriter(avroSchema)
datum_writer.write(data, encoder)
return bytes_writer.getvalue()
def decode(
self, data, *, type_identifier: int = None, **kwargs
): # pylint: disable=arguments-differ
""" Decode *data* from :class:`bytes` to the original data structure.
:param data: a bytes object containing a serialized message.
:param type_identifier: An integer specifying the identity of a
registered Avro schema. If specified the schema name is used to
lookup the schema in a schema registry.
:returns: A Python object.
"""
avroSchema = self.registry.get_schema_by_id(type_identifier)
bytes_reader = io.BytesIO(data)
decoder = avro_io.BinaryDecoder(bytes_reader)
datum_reader = avro_io.DatumReader(avroSchema)
return datum_reader.read(decoder)
serializer = AvroSerializer(schema_registry=schema_registry)
reg.register(
"avro",
serializer,
content_type=CONTENT_TYPE_AVRO,
content_encoding="binary",
)
def register_protobuf(reg: SerializerRegistry, object_registry=None) -> None:
""" Register an encoder/decoder for Google Protocol Buffers serialization. """
if have_protobuf:
class ObjectRegistry:
def __init__(self):
self.symDb = symbol_database.Default()
self.id2sym = {}
self.sym2id = {}
self._id = 0
def register_message(
self, obj: Message, type_identifier: int = None
) -> int:
"""
:param obj: A message object to register.
:param type_identifier: An optional message type identifier to
use for the object. If not specified then a number will be
automatically assigned.
"""
symbol_name = obj.DESCRIPTOR.name
if type_identifier is None:
self._id += 1
type_identifier = self._id
self.id2sym[type_identifier] = symbol_name
self.sym2id[symbol_name] = type_identifier
return type_identifier
def get_object_by_id(self, type_identifier: int) -> str:
symbol_name = self.id2sym.get(type_identifier)
messageClass = self.symDb.GetSymbol(symbol_name)
return messageClass()
def get_id_for_object(self, obj: Message) -> int:
return self.sym2id[obj.DESCRIPTOR.name]
class ProtobufSerializer(ISerializer):
""" Google Protocol Buffers serialization.
When you parse a serialized protocol buffer message you have to know
what kind of type you're expecting. However, a serialized protocol
buffer message does not provide this identifying information.
This becomes a problem when you want to be able to parse different
message types from a single stream or file.
One way to accomplish this is to put all the message types inside a
OneOf field in a top level wrapper message. This is probably the
simplest option if you have control of the system (the senders, the
receivers and the .proto definitions).
If you can't do this then you need to implement a mechanism that can
supply the type hint information about the original object type in
order to decode the data. The type hint must be transferred with the
serialized message data, typically in a message frame header.
This serializer relies on an object registry to manage the associated
between an identifier and an object. The identifier is used as a
lookup key to construct the message class object from the symbol
database and decode data into it.
When a pb2 file is imported it automatically adds the messages to the
default symbol database.
"""
def __init__(self, object_registry=None):
"""
:param object_registry: An object that is responsible for translating
a Protocol Buffers object to a type identifier and back.
"""
self.registry = object_registry if object_registry else ObjectRegistry()
def encode(self, obj, **kwargs): # pylint: disable=arguments-differ
""" Encode the given object and return a :class:`bytes` object.
:param obj: A Protobuf object to serialize into bytes.
:returns: a serialized message as a bytes object.
"""
assert isinstance(obj, Message)
return obj.SerializeToString()
def decode(self, data: bytes, **kwargs):
""" Decode *data* from :class:`bytes` to the original data structure.
:param data: a bytes object containing a serialized message.
:keywords type_identifier: An integer that can be used to uniquely
identify the Protobuf message. The identifier is used to find the
matching class object and instantiate it. The data is then decoded
into the new message instance.
:returns: A Protobuf message object.
:raises: KeyError if matching symbol type is not found.
"""
type_identifier = kwargs.get("type_identifier")
try:
obj = self.registry.get_object_by_id(type_identifier)
except KeyError:
raise Exception(
f"Unable to load '{type_identifier}' from symbol database"
) from None
obj.ParseFromString(data)
return obj
serializer = ProtobufSerializer(object_registry=object_registry)
reg.register(
"protobuf",
serializer,
content_type=CONTENT_TYPE_PROTOBUF,
content_encoding="binary",
)
def initialize(reg: SerializerRegistry):
""" Register serialization methods and set a default """
register_none(reg)
register_text(reg)
register_json(reg)
register_msgpack(reg)
register_yaml(reg)
register_avro(reg)
register_protobuf(reg)
reg.set_default("json")
registry = SerializerRegistry()
dumps = registry.dumps
loads = registry.loads
initialize(registry)
```
#### File: gestalt/tests/test_compression.py
```python
import unittest
from gestalt import compression
TEST_DATA = b"The Quick Brown Fox Jumps Over The Lazy Dog"
class CompressionTestCase(unittest.TestCase):
def test_expected_codecs_are_present(self):
codecs = compression.registry.compressors
# Some codecs are always expected be present, confirm they are
expected_codecs = (None, "zlib", "deflate", "gzip", "bzip2")
for codec_name in expected_codecs:
with self.subTest(f"Check that {codec_name} is present"):
self.assertIn(codec_name, codecs)
def test_expected_codec_attributes(self):
codecs = compression.registry.compressors
for name, settings in codecs.items():
with self.subTest(f"Check that {name} has expected attributes"):
for key in ("content_type", "compressor"):
self.assertTrue(hasattr(settings, key))
def test_fetch_compressor_by_name_or_type(self):
codecs = compression.registry.compressors
for name in codecs.keys():
with self.subTest(f"Check fetch using '{name}'"):
compressor = compression.registry.get_compressor(name)
for content_type, _compressor in codecs.values():
with self.subTest(f"Check fetch using '{content_type}'"):
compressor = compression.registry.get_compressor(content_type)
def test_fetch_codec_by_name_or_type(self):
codecs = compression.registry.compressors
for name in codecs.keys():
with self.subTest(f"Check fetch using '{name}'"):
compressor = compression.registry.get_codec(name)
for content_type, _compressor in codecs.values():
with self.subTest(f"Check fetch using '{content_type}'"):
compressor = compression.registry.get_codec(content_type)
def test_register_invalid_compressor(self):
class InvalidCompressor(object):
pass
compressor = InvalidCompressor()
with self.assertRaises(Exception) as cm:
compression.registry.register(
"invalid", compressor, content_type="application/invalid"
)
self.assertIn("Expected an instance of ICompressor", str(cm.exception))
def test_fetch_codec_with_invalid_name_or_type(self):
with self.assertRaises(Exception) as cm:
compression.registry.get_codec("invalid")
self.assertIn("Invalid compressor", str(cm.exception))
def test_fetch_compressor_with_invalid_name_or_type(self):
with self.assertRaises(Exception) as cm:
compression.registry.get_compressor("invalid")
self.assertIn("Invalid compressor", str(cm.exception))
def test_decompress_with_invalid_name_or_type(self):
with self.assertRaises(Exception) as cm:
compression.decompress(b"a", "invalid")
self.assertIn("Invalid compressor", str(cm.exception))
def test_compress_with_invalid_name_or_type(self):
with self.assertRaises(Exception) as cm:
compression.compress(b"", "invalid")
self.assertIn("Invalid compressor", str(cm.exception))
def test_compress_with_unspecified_name_or_type(self):
content_type, payload = compression.compress(b"")
self.assertEqual(content_type, None)
def test_compression_roundtrip(self):
codecs = compression.registry.compressors
for name, settings in codecs.items():
with self.subTest(f"Check {name} compression roundtrip"):
convenience_name = name
mime_type = compression.registry.name_to_type[name]
# The convenience name or the content_type can be used when
# specifying compression method. Check both.
for c_label in (convenience_name, mime_type):
content_type, payload = compression.compress(TEST_DATA, c_label)
self.assertEqual(content_type, mime_type)
if c_label is None:
self.assertEqual(TEST_DATA, payload)
else:
self.assertNotEqual(TEST_DATA, payload)
content_type, d = compression.decompress(payload, content_type)
self.assertEqual(content_type, mime_type)
self.assertEqual(d, TEST_DATA)
def test_none_compression(self):
codecs = compression.registry.compressors
self.assertIn(None, codecs)
# check exception is raised when bytes are not passed in
with self.assertRaises(Exception) as cm:
compression.compress({})
self.assertIn("Can only compress bytes", str(cm.exception))
# perform roundtrip check
content_type, payload = compression.compress(TEST_DATA)
self.assertIsNone(content_type)
self.assertEqual(TEST_DATA, payload)
content_type, d = compression.decompress(payload)
self.assertIsNone(content_type)
self.assertEqual(d, TEST_DATA)
def test_zlib_compression(self):
convenience_name = "zlib"
mime_type = compression.COMPRESSION_ZLIB
# check exception is raised when bytes are not passed in
with self.assertRaises(Exception) as cm:
compression.compress({}, mime_type)
self.assertIn("Can only compress bytes", str(cm.exception))
# perform roundtrip check
for c_name in (convenience_name, mime_type):
with self.subTest(f"Check zlib compression roundtrip using {c_name}"):
content_type, payload = compression.compress(TEST_DATA, c_name)
self.assertEqual(content_type, mime_type)
self.assertNotEqual(TEST_DATA, payload)
content_type, d = compression.decompress(payload, content_type)
self.assertEqual(content_type, mime_type)
self.assertEqual(d, TEST_DATA)
def test_deflate_compression(self):
convenience_name = "deflate"
mime_type = compression.COMPRESSION_DEFLATE
# check exception is raised when bytes are not passed in
with self.assertRaises(Exception) as cm:
compression.compress({}, mime_type)
self.assertIn("Can only compress bytes", str(cm.exception))
# perform roundtrip check
for c_name in (convenience_name, mime_type):
with self.subTest(f"Check deflate compression roundtrip using {c_name}"):
content_type, payload = compression.compress(TEST_DATA, c_name)
self.assertEqual(content_type, mime_type)
self.assertNotEqual(TEST_DATA, payload)
content_type, d = compression.decompress(payload, content_type)
self.assertEqual(content_type, mime_type)
self.assertEqual(d, TEST_DATA)
def test_gzip_compression(self):
convenience_name = "gzip"
mime_type = compression.COMPRESSION_GZIP
# check exception is raised when bytes are not passed in
with self.assertRaises(Exception) as cm:
compression.compress({}, mime_type)
self.assertIn("Can only compress bytes", str(cm.exception))
# perform roundtrip check
for c_name in (convenience_name, mime_type):
with self.subTest(f"Check gzip compression roundtrip using {c_name}"):
content_type, payload = compression.compress(TEST_DATA, c_name)
self.assertEqual(content_type, mime_type)
self.assertNotEqual(TEST_DATA, payload)
content_type, d = compression.decompress(payload, content_type)
self.assertEqual(content_type, mime_type)
self.assertEqual(d, TEST_DATA)
@unittest.skipUnless(compression.have_bz2, "requires bz2")
def test_bzip2_compression(self):
convenience_name = "bzip2"
mime_type = compression.COMPRESSION_BZ2
# check exception is raised when bytes are not passed in
with self.assertRaises(Exception) as cm:
compression.compress({}, mime_type)
self.assertIn("Can only compress bytes", str(cm.exception))
# perform roundtrip check
for c_name in (convenience_name, mime_type):
with self.subTest(f"Check bzip2 compression roundtrip using {c_name}"):
content_type, payload = compression.compress(TEST_DATA, c_name)
self.assertEqual(content_type, mime_type)
self.assertNotEqual(TEST_DATA, payload)
content_type, d = compression.decompress(payload, content_type)
self.assertEqual(content_type, mime_type)
self.assertEqual(d, TEST_DATA)
@unittest.skipUnless(compression.have_lzma, "requires lzma")
def test_lzma_compression(self):
convenience_name = "lzma"
mime_type = compression.COMPRESSION_LZMA
# check exception is raised when bytes are not passed in
with self.assertRaises(Exception) as cm:
compression.compress({}, mime_type)
self.assertIn("Can only compress bytes", str(cm.exception))
# perform roundtrip check
for c_name in (convenience_name, mime_type):
with self.subTest(f"Check lzma compression roundtrip using {c_name}"):
content_type, payload = compression.compress(TEST_DATA, c_name)
self.assertEqual(content_type, mime_type)
self.assertNotEqual(TEST_DATA, payload)
content_type, d = compression.decompress(payload, content_type)
self.assertEqual(content_type, mime_type)
self.assertEqual(d, TEST_DATA)
@unittest.skipUnless(compression.have_brotli, "requires brotli")
def test_brotli_compression(self):
convenience_name = "brotli"
mime_type = compression.COMPRESSION_BROTLI
# check exception is raised when bytes are not passed in
with self.assertRaises(Exception) as cm:
compression.compress({}, mime_type)
self.assertIn("Can only compress bytes", str(cm.exception))
# perform roundtrip check
for c_name in (convenience_name, mime_type):
with self.subTest(f"Check brotli compression roundtrip using {c_name}"):
content_type, payload = compression.compress(TEST_DATA, c_name)
self.assertEqual(content_type, mime_type)
self.assertNotEqual(TEST_DATA, payload)
content_type, d = compression.decompress(payload, content_type)
self.assertEqual(content_type, mime_type)
self.assertEqual(d, TEST_DATA)
@unittest.skipUnless(compression.have_snappy, "requires snappy")
def test_snappy_compression(self):
convenience_name = "snappy"
mime_type = compression.COMPRESSION_SNAPPY
# check exception is raised when bytes are not passed in
with self.assertRaises(Exception) as cm:
compression.compress({}, mime_type)
self.assertIn("Can only compress bytes", str(cm.exception))
# perform roundtrip check
for c_name in (convenience_name, mime_type):
with self.subTest(f"Check snappy compression roundtrip using {c_name}"):
content_type, payload = compression.compress(TEST_DATA, c_name)
self.assertEqual(content_type, mime_type)
self.assertNotEqual(TEST_DATA, payload)
content_type, d = compression.decompress(payload, content_type)
self.assertEqual(content_type, mime_type)
self.assertEqual(d, TEST_DATA)
```
#### File: gestalt/tests/test_datagram_mti.py
```python
import asyncio
import asynctest
import logging
import socket
import unittest.mock
from gestalt import serialization
from gestalt.datagram.mti import MtiDatagramEndpoint
class MtiDatagramEndpointTestCase(asynctest.TestCase):
async def test_start_receiver(self):
receiver_on_message_mock = unittest.mock.Mock()
receiver_on_started_mock = unittest.mock.Mock()
receiver_on_stopped_mock = unittest.mock.Mock()
receiver_on_peer_available_mock = unittest.mock.Mock()
receiver_on_peer_unavailable_mock = unittest.mock.Mock()
receiver_ep = MtiDatagramEndpoint(
on_message=receiver_on_message_mock,
on_started=receiver_on_started_mock,
on_stopped=receiver_on_stopped_mock,
on_peer_available=receiver_on_peer_available_mock,
on_peer_unavailable=receiver_on_peer_unavailable_mock,
)
# Expect an exception if local_addr or remote addr are not
# specified
with self.assertRaises(Exception) as cm:
await receiver_ep.start()
expected = "At least one of local_addr or remote addr must be defined"
self.assertIn(expected, str(cm.exception))
await receiver_ep.start(local_addr=("0.0.0.0", 0))
self.assertTrue(receiver_on_started_mock.called)
self.assertTrue(receiver_on_peer_available_mock.called)
address, port = receiver_ep.bindings[0]
# Check that starting a receiver that is already started does not
# have any consequences
await receiver_ep.start()
await receiver_ep.stop()
self.assertTrue(receiver_on_stopped_mock.called)
# Check that stopping a receiver that is already stopped does not
# have any consequences
await receiver_ep.stop()
async def test_start_receiver_on_unavailable_port(self):
""" check starting receiver on a used port raises an exception """
# Occupy a port by starting a UDP endpoint on it first.
first_ep = MtiDatagramEndpoint()
await first_ep.start(local_addr=("0.0.0.0", 0))
host, occupied_port = first_ep.bindings[0]
try:
receiver_on_message_mock = unittest.mock.Mock()
receiver_on_started_mock = unittest.mock.Mock()
receiver_on_stopped_mock = unittest.mock.Mock()
receiver_on_peer_available_mock = unittest.mock.Mock()
receiver_on_peer_unavailable_mock = unittest.mock.Mock()
receiver_ep = MtiDatagramEndpoint(
on_message=receiver_on_message_mock,
on_started=receiver_on_started_mock,
on_stopped=receiver_on_stopped_mock,
on_peer_available=receiver_on_peer_available_mock,
on_peer_unavailable=receiver_on_peer_unavailable_mock,
)
with self.assertLogs(
"gestalt.datagram.endpoint", level=logging.ERROR
) as log:
with self.assertRaises(Exception):
await receiver_ep.start(local_addr=(host, occupied_port))
address, port = receiver_ep.bindings[0]
self.assertFalse(receiver_on_started_mock.called)
await receiver_ep.stop()
# endpoint never actually started so it should not really need
# to be stopped.
self.assertFalse(receiver_on_stopped_mock.called)
finally:
await first_ep.stop()
async def test_sender_receiver_interaction(self):
""" check sender and receiver interactions """
receiver_on_message_mock = asynctest.CoroutineMock()
receiver_on_started_mock = asynctest.CoroutineMock()
receiver_on_stopped_mock = asynctest.CoroutineMock()
receiver_on_peer_available_mock = asynctest.CoroutineMock()
receiver_on_peer_unavailable_mock = asynctest.CoroutineMock()
receiver_ep = MtiDatagramEndpoint(
on_message=receiver_on_message_mock,
on_started=receiver_on_started_mock,
on_stopped=receiver_on_stopped_mock,
on_peer_available=receiver_on_peer_available_mock,
on_peer_unavailable=receiver_on_peer_unavailable_mock,
)
await receiver_ep.start(local_addr=("127.0.0.1", 0))
self.assertTrue(receiver_on_started_mock.called)
address, port = receiver_ep.bindings[0]
sender_on_message_mock = asynctest.CoroutineMock()
sender_on_started_mock = asynctest.CoroutineMock()
sender_on_stopped_mock = asynctest.CoroutineMock()
sender_on_peer_available_mock = asynctest.CoroutineMock()
sender_on_peer_unavailable_mock = asynctest.CoroutineMock()
sender_ep = MtiDatagramEndpoint(
on_message=sender_on_message_mock,
on_started=sender_on_started_mock,
on_stopped=sender_on_stopped_mock,
on_peer_available=sender_on_peer_available_mock,
on_peer_unavailable=sender_on_peer_unavailable_mock,
)
await sender_ep.start(remote_addr=(address, port))
await asyncio.sleep(0.3)
self.assertTrue(sender_on_started_mock.called)
self.assertTrue(sender_on_peer_available_mock.called)
self.assertTrue(receiver_on_peer_available_mock.called)
# Send a msg without identifier from sender to receiver
sent_msg = b"Hello World"
sender_ep.send(sent_msg)
await asyncio.sleep(0.1)
self.assertTrue(receiver_on_message_mock.called)
(args, kwargs) = receiver_on_message_mock.call_args_list[0]
ep, received_msg = args
self.assertIsInstance(ep, MtiDatagramEndpoint)
self.assertEqual(received_msg, sent_msg)
self.assertIn("addr", kwargs)
received_sender_id = kwargs["addr"]
self.assertIn("type_identifier", kwargs)
received_msg_id = kwargs["type_identifier"]
self.assertEqual(received_msg_id, 0)
await sender_ep.stop()
await asyncio.sleep(0.1)
self.assertTrue(sender_on_stopped_mock.called)
self.assertTrue(sender_on_peer_unavailable_mock.called)
await receiver_ep.stop()
self.assertTrue(receiver_on_stopped_mock.called)
self.assertTrue(receiver_on_peer_unavailable_mock.called)
async def test_json_sender_receiver_interactions(self):
""" check JSON sender and receiver interactions """
receiver_on_message_mock = asynctest.CoroutineMock()
receiver_on_started_mock = asynctest.CoroutineMock()
receiver_on_stopped_mock = asynctest.CoroutineMock()
receiver_on_peer_available_mock = asynctest.CoroutineMock()
receiver_on_peer_unavailable_mock = asynctest.CoroutineMock()
receiver_ep = MtiDatagramEndpoint(
on_message=receiver_on_message_mock,
on_started=receiver_on_started_mock,
on_stopped=receiver_on_stopped_mock,
on_peer_available=receiver_on_peer_available_mock,
on_peer_unavailable=receiver_on_peer_unavailable_mock,
content_type=serialization.CONTENT_TYPE_JSON,
)
await receiver_ep.start(local_addr=("127.0.0.1", 0))
self.assertTrue(receiver_on_started_mock.called)
address, port = receiver_ep.bindings[0]
sender_on_message_mock = asynctest.CoroutineMock()
sender_on_started_mock = asynctest.CoroutineMock()
sender_on_stopped_mock = asynctest.CoroutineMock()
sender_on_peer_available_mock = asynctest.CoroutineMock()
sender_on_peer_unavailable_mock = asynctest.CoroutineMock()
sender_ep = MtiDatagramEndpoint(
on_message=sender_on_message_mock,
on_started=sender_on_started_mock,
on_stopped=sender_on_stopped_mock,
on_peer_available=sender_on_peer_available_mock,
on_peer_unavailable=sender_on_peer_unavailable_mock,
content_type=serialization.CONTENT_TYPE_JSON,
)
await sender_ep.start(remote_addr=(address, port))
await asyncio.sleep(0.3)
self.assertTrue(sender_on_started_mock.called)
self.assertTrue(sender_on_peer_available_mock.called)
self.assertTrue(receiver_on_peer_available_mock.called)
# Send a msg without identifier from sender to receiver
test_msg_in = dict(latitude=130.0, longitude=-30.0, altitude=50.0)
sender_ep.send(test_msg_in)
await asyncio.sleep(0.1)
self.assertTrue(receiver_on_message_mock.called)
(args, kwargs) = receiver_on_message_mock.call_args_list[0]
ep, received_msg = args
received_sender_id = kwargs["addr"]
received_msg_id = kwargs["type_identifier"]
self.assertIsInstance(ep, MtiDatagramEndpoint)
self.assertEqual(received_msg, test_msg_in)
self.assertEqual(received_msg_id, 0)
# Send a msg with identifier from sender to receiver
receiver_on_message_mock.reset_mock()
type_identifier = 2
sender_ep.send(test_msg_in, type_identifier=type_identifier)
await asyncio.sleep(0.1)
self.assertTrue(receiver_on_message_mock.called)
(args, kwargs) = receiver_on_message_mock.call_args_list[0]
ep, received_msg = args
received_sender_id = kwargs["addr"]
received_msg_id = kwargs["type_identifier"]
self.assertIsInstance(ep, MtiDatagramEndpoint)
self.assertEqual(received_msg, test_msg_in)
self.assertEqual(received_msg_id, type_identifier)
# graceful shutdown
await sender_ep.stop()
await asyncio.sleep(0.1)
self.assertTrue(sender_on_stopped_mock.called)
self.assertTrue(sender_on_peer_unavailable_mock.called)
await receiver_ep.stop()
self.assertTrue(receiver_on_stopped_mock.called)
self.assertTrue(receiver_on_peer_unavailable_mock.called)
@unittest.skipUnless(serialization.have_msgpack, "requires msgpack")
async def test_msgpack_sender_receiver_interactions(self):
""" check msgpack sender and receiver interactions """
receiver_on_message_mock = asynctest.CoroutineMock()
receiver_on_started_mock = asynctest.CoroutineMock()
receiver_on_stopped_mock = asynctest.CoroutineMock()
receiver_on_peer_available_mock = asynctest.CoroutineMock()
receiver_on_peer_unavailable_mock = asynctest.CoroutineMock()
receiver_ep = MtiDatagramEndpoint(
on_message=receiver_on_message_mock,
on_started=receiver_on_started_mock,
on_stopped=receiver_on_stopped_mock,
on_peer_available=receiver_on_peer_available_mock,
on_peer_unavailable=receiver_on_peer_unavailable_mock,
content_type=serialization.CONTENT_TYPE_MSGPACK,
)
await receiver_ep.start(local_addr=("127.0.0.1", 0))
self.assertTrue(receiver_on_started_mock.called)
address, port = receiver_ep.bindings[0]
sender_on_message_mock = asynctest.CoroutineMock()
sender_on_started_mock = asynctest.CoroutineMock()
sender_on_stopped_mock = asynctest.CoroutineMock()
sender_on_peer_available_mock = asynctest.CoroutineMock()
sender_on_peer_unavailable_mock = asynctest.CoroutineMock()
sender_ep = MtiDatagramEndpoint(
on_message=sender_on_message_mock,
on_started=sender_on_started_mock,
on_stopped=sender_on_stopped_mock,
on_peer_available=sender_on_peer_available_mock,
on_peer_unavailable=sender_on_peer_unavailable_mock,
content_type=serialization.CONTENT_TYPE_MSGPACK,
)
await sender_ep.start(remote_addr=(address, port))
await asyncio.sleep(0.3)
self.assertTrue(sender_on_started_mock.called)
self.assertTrue(sender_on_peer_available_mock.called)
self.assertTrue(receiver_on_peer_available_mock.called)
# Send a msg without identifier from sender to receiver
test_msg_in = dict(latitude=130.0, longitude=-30.0, altitude=50.0)
sender_ep.send(test_msg_in)
await asyncio.sleep(0.1)
self.assertTrue(receiver_on_message_mock.called)
(args, kwargs) = receiver_on_message_mock.call_args_list[0]
ep, received_msg = args
received_sender_id = kwargs["addr"]
received_msg_id = kwargs["type_identifier"]
self.assertIsInstance(ep, MtiDatagramEndpoint)
self.assertEqual(received_msg, test_msg_in)
self.assertEqual(received_msg_id, 0)
# Send a msg with identifier from sender to receiver
receiver_on_message_mock.reset_mock()
type_identifier = 2
sender_ep.send(test_msg_in, type_identifier=type_identifier)
await asyncio.sleep(0.1)
self.assertTrue(receiver_on_message_mock.called)
(args, kwargs) = receiver_on_message_mock.call_args_list[0]
ep, received_msg = args
received_sender_id = kwargs["addr"]
received_msg_id = kwargs["type_identifier"]
self.assertIsInstance(ep, MtiDatagramEndpoint)
self.assertEqual(received_msg, test_msg_in)
self.assertEqual(received_msg_id, type_identifier)
# graceful shutdown
await sender_ep.stop()
await asyncio.sleep(0.1)
self.assertTrue(sender_on_stopped_mock.called)
self.assertTrue(sender_on_peer_unavailable_mock.called)
await receiver_ep.stop()
self.assertTrue(receiver_on_stopped_mock.called)
self.assertTrue(receiver_on_peer_unavailable_mock.called)
@unittest.skipUnless(serialization.have_protobuf, "requires google protobuf")
async def test_protobuf_sender_receiver_interactions(self):
""" check protobuf sender and receiver interactions """
from position_pb2 import Position
protobuf_data = Position(
latitude=130.0, longitude=-30.0, altitude=50.0, status=Position.SIMULATED
)
receiver_on_message_mock = asynctest.CoroutineMock()
receiver_on_started_mock = asynctest.CoroutineMock()
receiver_on_stopped_mock = asynctest.CoroutineMock()
receiver_on_peer_available_mock = asynctest.CoroutineMock()
receiver_on_peer_unavailable_mock = asynctest.CoroutineMock()
receiver_ep = MtiDatagramEndpoint(
on_message=receiver_on_message_mock,
on_started=receiver_on_started_mock,
on_stopped=receiver_on_stopped_mock,
on_peer_available=receiver_on_peer_available_mock,
on_peer_unavailable=receiver_on_peer_unavailable_mock,
content_type=serialization.CONTENT_TYPE_PROTOBUF,
)
await receiver_ep.start(local_addr=("127.0.0.1", 0))
self.assertTrue(receiver_on_started_mock.called)
address, port = receiver_ep.bindings[0]
sender_on_message_mock = asynctest.CoroutineMock()
sender_on_started_mock = asynctest.CoroutineMock()
sender_on_stopped_mock = asynctest.CoroutineMock()
sender_on_peer_available_mock = asynctest.CoroutineMock()
sender_on_peer_unavailable_mock = asynctest.CoroutineMock()
sender_ep = MtiDatagramEndpoint(
on_message=sender_on_message_mock,
on_started=sender_on_started_mock,
on_stopped=sender_on_stopped_mock,
on_peer_available=sender_on_peer_available_mock,
on_peer_unavailable=sender_on_peer_unavailable_mock,
content_type=serialization.CONTENT_TYPE_PROTOBUF,
)
await sender_ep.start(remote_addr=(address, port))
await asyncio.sleep(0.3)
self.assertTrue(sender_on_started_mock.called)
self.assertTrue(sender_on_peer_available_mock.called)
self.assertTrue(receiver_on_peer_available_mock.called)
type_identifier = 2
receiver_ep.register_message(type_identifier, Position)
# Send a msg with identifier from sender to receiver
test_msg_in = protobuf_data
sender_ep.send(test_msg_in, type_identifier=type_identifier)
await asyncio.sleep(0.1)
self.assertTrue(receiver_on_message_mock.called)
(args, kwargs) = receiver_on_message_mock.call_args_list[0]
ep, received_msg = args
received_sender_id = kwargs["addr"]
received_msg_id = kwargs["type_identifier"]
self.assertIsInstance(ep, MtiDatagramEndpoint)
self.assertEqual(received_msg, test_msg_in)
self.assertEqual(received_msg_id, type_identifier)
await sender_ep.stop()
await asyncio.sleep(0.1)
self.assertTrue(sender_on_stopped_mock.called)
self.assertTrue(sender_on_peer_unavailable_mock.called)
await receiver_ep.stop()
self.assertTrue(receiver_on_stopped_mock.called)
self.assertTrue(receiver_on_peer_unavailable_mock.called)
``` |
{
"source": "johnsjob/master-thesis",
"score": 2
} |
#### File: master-thesis/source/calibration_algorithm.py
```python
from __future__ import division
#----------------------------------------#
import numpy
import numpy as n
import utils
from pylab import xlim, ylim
from numpy.linalg import solve, lstsq, det, inv, cond, svd
from numpy import array as mat, log10, diag
#----------------------------------------
# custom imports
from helperfunctions_math import *
from helperfunctions_plot import *
import plane_relative as plane_tools
#----------------------------------------#
## --Refactoring information--
## geometry_info = {'plane':current_plane}
## geometry_info['local_tool_orientation'] : 3x3-dim numpy array
## geometry_info['local_delta_vector'] : 3-dim numpy array
## geometry_info['correct_solution_geometry'] : solution_tensor: 3x4-dim numpy array
##
##
## geometry_info['angles'] = list_of:
## {
## rot_val : float
## tilt_Val : float
## skew_val : float
## }
## # Xtcp (flange) orientation in global space, generated relative to the paper plane
## geometry_info['Xflange_orientation_relative_to_paper_plane']:
## list_of: hom
##
## #generate pen-tip position in Anoto2d in mm
## geometry_info['pentip_2d']:
## list_of: 2d tuple
##
## # generate global Xtcp position in mm
## geometry_info['Xtcp0']:
## list_of: 3-dim numpy array
##
## # generate relative-tool-orientation in world coordinates
## geometry_info['global_tool_orientation']:
## list_of: 3x3-dim of numpy array
##
## geometry_info['forward_kinematics']:
## list_of: hom
## from: global_tool_orientation, Xtcp0
# num_points = 12 absolute minimum, actually 12+1
num_points = 500
#========================================#
# placing paper origin
o = mat([1000*rand(), 1000*rand(), 1000*rand()])
### defining paper orientation
r,t,s = -rand_range(-180,180), rand_range(-180,180), rand_range(-180, 180)
# define the paper-orientation in global (robot-system) directions
# however, the paper uses "euclidean" orientation ('local' = euclidean)
plane = plane_tools.define_plane_from_angles(o, r, t, s, 'local')
#################################################
# define the units so less mistakes are made
chosen_unit = 'mm'
unit_descriptor = {
'm' : 1.0,
'mm': 1000.0
}
unit = unit_descriptor[chosen_unit]
metre = unit
millimetre = unit / 1000.0
#----------------------------------------
# define delta vector which we want to find (in tool-space / local space)
# in unit lengths
L = 233 * millimetre
local_delta_vector = mat([1,2,3])
local_delta_vector = (local_delta_vector / norm(local_delta_vector))*L #length L
# Orientation of the tool in tool (local) coordinate system
local_tool_orientation = rotation_matrix_rot_tilt_skew(-10, 20, 30)
#----------------------------------------
# define the anoto point spread in unit lengths
#plane_point_spread = 47 * millimetre
plane_point_spread = 200 * millimetre
# define max-tilt of pen
pen_max_tilt = 40
#pen_max_tilt = 10
#----------------------------------------
def merge_dicts(*list_of_dicts):
# init
ret = {}
keys = []
# get all unique keys
for d in list_of_dicts:
keys += d.keys()
keys = set().union(keys)
# for all keys ...
for k in keys:
# prepare a k:th-list if none exists
if not ret.has_key(k):
ret[k] = []
# for all dicts ...
for d in list_of_dicts:
# if dict has key ...
if d.has_key(k):
# check so that the key is not an empty list ...
empty = False
try:
empty = len(d[k]) == 0
except:
# not a list/array-type, equivalent to non-empty list
pass
# append item or non-empty list
if not empty:
ret[k].append( d[k] )
# for all keys ...
for k in keys:
# if we only got one item for this key from al the dicts ...
if len(ret[k]) == 1:
# un-list it
ret[k] = ret[k][0]
# remove empy lists if any manage to get here
elif len(ret[k]) == 0:
del ret[k]
else:
# turn remaining lists into numpy-arrays
ret[k] = mat(ret[k])
return ret
#----------------------------------------
def rad_to_ang(v):
return v*180/pi
#----------------------------------------
def vec_ang(v,w):
res = matmul(v,w) / (norm(v) * norm(w))
return numpy.arccos(res) * 180 / numpy.pi
#----------------------------------------
def vec_diff(v1, v2):
err = norm(v1 - v2)
norm_err = abs(norm(v1) - norm(v2))
angle_err = rad_to_ang(acos( (v1/norm(v1)).dot((v2/norm(v2))) ))
return err, norm_err, angle_err
#----------------------------------------
def problem_formulation(dx, dy, dR):
r11,r12,r13,r21,r22,r23,r31,r32,r33 = (-dR).reshape(9)
S1 = [dx, dy, r11, 0, 0, r12, 0, 0, r13]
S2 = [0, 0, r21, dx, dy, r22, 0, 0, r23]
S3 = [0, 0, r31, 0, 0, r32, dx, dy, r33]
row_value = 3
col_value = 9
return mat([S1, S2, S3]), row_value, col_value
#----------------------------------------
def generate_random_Anoto_Point(L):
px = L*rand()
py = L*rand()
return px, py
#----------------------------------------
def solve_tool0_tip_alt(array_forward_kinematics_T44,
array_anoto2D,
array_lhs_sys_eq = None):
try:
num_points,m,n = array_forward_kinematics_T44.shape
except Exception as e:
print 'solve_tool0_tip:\n\tWrong shape or type for input parameter: array_forward_kinematics_T44'
try:
m,n = array_anoto2D.shape
except Exception as e:
print 'solve_tool0_tip:\n\tWrong shape or type for input parameter: array_anoto2D'
l_xtcp = array_forward_kinematics_T44[:, 0:3, 3]
l_R = array_forward_kinematics_T44[:, 0:3, 0:3]
dxtcp = diff(l_xtcp, axis=0)
dR = diff(l_R, axis=0)
danoto2D = diff(array_anoto2D, axis=0)
lhs = []
rhs = []
l_cond = []
l_err = []
for i in xrange(0, num_points-1): #one less after forward-differences....
A, row_value, col_value = array_lhs_sys_eq(danoto2D[i,0], danoto2D[i,1], dR[i])
b = dxtcp[i]
lhs.append(A)
rhs.append(b)
lhs = mat(lhs).reshape(((num_points-1) * row_value, col_value))
#shape the rhs depending on shape-info from lhs
if row_value != 1:
rhs = mat(rhs).reshape((num_points-1) * row_value)
else:
rhs = mat(rhs)
L = lhs.T.dot(lhs)
R = lhs.T.dot(rhs)
X, Y, D = solve(L, R).reshape(3,3).T
#X, Y, D = lstsq(lhs,rhs)[0].reshape(3,3).T
X = X / numpy.linalg.norm(X)
Y = Y / numpy.linalg.norm(Y)
Z = cross(X, Y)
# Z = Z / numpy.linalg.norm(Z)
result = mat([X,Y,Z,D]).T
condition = cond(L)
return result, condition
#----------------------------------------
def generate_Xflange_orientation(plane,rot, tilt, skew):
"""
Generate Xtcp-orientation in world coordinates, using Anoto-paper
orientation formulation
Planes are homoenous matrices, if we want the orientation
we need the 0:3,0:3 submatrix.
"""
return plane_tools.define_plane_relative_from_angles(plane, (0,0,0),
rot, tilt, skew,'global')[:3,:3]
#----------------------------------------
def setup_geometry(current_plane, point_spread, num_points, perturbations=None):
global local_delta_vector, local_tool_orientation,\
millimetre, pen_max_tilt
geometry_info = {'plane':current_plane}
geometry_info['local_tool_orientation'] = local_tool_orientation
geometry_info['local_delta_vector'] = local_delta_vector
geometry_info['correct_solution_geometry'] = mat(list(geometry_info['plane'][:3,:3].T.flatten()) +
list(geometry_info['local_delta_vector'])).reshape(4,3).T
#generating points and "forward-kinematics"
collected_data = []
for k in xrange(0,num_points):
info = {}
info['angles'] = \
{
'rot': rand_range(-180,180),
'tilt': rand_range(0, pen_max_tilt),
'skew': rand_range(-90, 180)
}
# Xtcp (flange) orientation in global space, generated relative to the paper plane
info['Xflange_orientation_relative_to_paper_plane'] = \
generate_Xflange_orientation(geometry_info['plane'],**info['angles'])
#generate pen-tip position in Anoto2d in mm
px,py = generate_random_Anoto_Point(point_spread)
info['pentip_2d'] = [px,py]
# generate global Xtcp position in mm
info['Xtcp0'] = (plane_tools.get_plane_point(geometry_info['plane'], px, py)[:3] - \
matmul( info['Xflange_orientation_relative_to_paper_plane'], local_delta_vector[:3]) )
# ^OK
# generate relative-tool-orientation in world coordinates
info['global_tool_orientation'] = matmul( info['Xflange_orientation_relative_to_paper_plane'],
local_tool_orientation )
# ^OK
info['forward_kinematics'] = homogenous_matrix( info['Xflange_orientation_relative_to_paper_plane'],
info['Xtcp0'] )
# perturbations
if type(perturbations) in [list, tuple]:
if 'tip' in perturbations:
tilt = info['angles']['tilt']
r = lambda: (0.6*rand()-0.3) * millimetre
if tilt >= 0:
r2 = lambda: (0.02*rand()-0.01)*(1-abs(info['angles']['tilt'])/numpy.max(abs(info['angles']['tilt']))) * millimetre
info['pentip_2d'] = [px + r() + r2(), py + r() + r2()]
else:
r2 = lambda: (0.02*rand()-0.01) * millimetre
info['pentip_2d'] = [px + r() + r2(), py + r() + r2()]
# ^OK
collected_data.append(info)
geometry_info['data'] = merge_dicts(*collected_data)
geometry_info['data']['angles'] = merge_dicts(*geometry_info['data']['angles'])
return geometry_info
#----------------------------------------
def find_solution_pen_tip(geometry_info, included_solutions_from_start = -1):
result, cond_num = solve_tool0_tip_alt(geometry_info['data']['forward_kinematics'][:included_solutions_from_start],
geometry_info['data']['pentip_2d'][:included_solutions_from_start],
problem_formulation)
return result, cond_num
##def find_solution_pen_ori(geometry_info, included_solutions_from_start = -1):
## # solve for orientation s which should be same as local_tool_orientation
## l,m,n = geometry_info['data']['Xflange_orientation_relative_to_paper_plane'].shape
## flange_orientation_reshaped = geometry_info['data']['Xflange_orientation_relative_to_paper_plane'].reshape(l*m,n)
##
## lhs = matmul( flange_orientation_reshaped.T,
## flange_orientation_reshaped)
##
## l,m,n = geometry_info['data']['global_tool_orientation'].shape
## rhs = matmul( flange_orientation_reshaped.T,
## geometry_info['data']['global_tool_orientation'].reshape(l*m,n))
##
## solved_tool_orientation = linalg.solve(lhs, rhs)
##
## #normalize result
## solved_tool_orientation[:,0] = solved_tool_orientation[:,0] / norm(solved_tool_orientation[:,0])
## solved_tool_orientation[:,1] = solved_tool_orientation[:,1] / norm(solved_tool_orientation[:,1])
## solved_tool_orientation[:,2] = solved_tool_orientation[:,2] / norm(solved_tool_orientation[:,2])
## return solved_tool_orientation, cond(lhs)
def _solve_orientation(As, Bs):
Ac = As - n.mean(As, axis=0)
Bec = Bs - n.mean(Bs, axis=0)
h = lop(n.outer, Ac, Bec)
H = n.sum(h, axis=0)
U,S,V = svd(H)
# solve for solution tensor of order 9x9
# in this tensor the best solution resides in index 0,4,8 in the tensor form
# of 9x3x3 which corresponds to diagonal elements of the 3x3x3x3 solution tensor
solution_tensor = U.dot(V)
# best column-solutions resides in the diagonal of the 3x3x3x3 solution tensor
C1, C2, C3 = solution_tensor[0::3, 0::3],\
solution_tensor[1::3, 1::3],\
solution_tensor[2::3, 2::3]
D1, D2, D3 = solution_tensor[0:3,0:3],\
solution_tensor[3:6,3:6],\
solution_tensor[6:9,6:9]
solution1 = mat([C1.T, C2.T, C3.T])
solution2 = mat([D1, D2, D3])
return solution1, solution2, solution_tensor, [U,S,V]
def find_solution_pen_ori(geometry_info, included_solutions_from_start = -1):
# solve for orientation s which should be same as local_tool_orientation
#l,m,n = geometry_info['data']['Xflange_orientation_relative_to_paper_plane'].shape
flange_orientation = geometry_info['data']['forward_kinematics'][:,:3,:3]
pen_orientation = geometry_info['data']['global_tool_orientation'][:,:3,:3]
solved_tool_orientation = _solve_orientation(flange_orientation, pen_orientation)
## #normalize result
## solved_tool_orientation[:,0] = solved_tool_orientation[:,0] / norm(solved_tool_orientation[:,0])
## solved_tool_orientation[:,1] = solved_tool_orientation[:,1] / norm(solved_tool_orientation[:,1])
## solved_tool_orientation[:,2] = solved_tool_orientation[:,2] / norm(solved_tool_orientation[:,2])
return solved_tool_orientation, 0.0
#----------------------------------------
def perform_solution_run(geometry_info):
interval = range(3,num_points)
list_of_solving = []
for k in interval:
solve_info = {}
tip_wobj_res = find_solution_pen_tip(geometry_info, k)
## solve_info['point_spread_x'] = numpy.std(geometry_info['data']['pentip_2d'][:k], axis=0)[0]
## solve_info['point_spread_y'] = numpy.std(geometry_info['data']['pentip_2d'][:k], axis=0)[1]
solve_info['tipwobj-result'] = tip_wobj_res[0]
solve_info['tip-result'] = tip_wobj_res[0][:,3]
solve_info['wobj-result'] = tip_wobj_res[0][:,:3]
solve_info['tip-cond_num'] = tip_wobj_res[1]
solve_info['orientation-result'], solve_info['orientation-cond_num'] = find_solution_pen_ori(geometry_info, k)
sol1, sol2, tens, (_u,_s,_v) = solve_info['orientation-result']
solve_info['orientation-result'] = sol2
solve_info['err-tipwobj'] = abs(geometry_info['correct_solution_geometry'] - solve_info['tipwobj-result'])
solve_info['err-tip'] = numpy.linalg.norm(solve_info['err-tipwobj'][:,3])
solve_info['err-wobj'] = numpy.linalg.norm(solve_info['err-tipwobj'][:,:3])
solve_info['err-ori'] = numpy.linalg.norm(geometry_info['local_tool_orientation'] - sol2)
list_of_solving.append(solve_info)
solving_data = merge_dicts(*list_of_solving)
solving_data['interval'] = interval
print 'solution max error tip(1) = {}\n'.format( numpy.max( abs( solving_data['err-tip'][1:]) ))
print 'solution max error tip(20) = {}'.format( numpy.max( abs( solving_data['err-tip'][20:]) ))
print 'solution mean error tip(20) = {}\n'.format( numpy.mean( abs( solving_data['err-tip'][1:21]) ))
print 'solution max error tip(40) = {}'.format( numpy.max( abs( solving_data['err-tip'][40:]) ))
print 'solution mean error tip(40) = {}\n'.format( numpy.mean( abs( solving_data['err-tip'][1:41]) ))
print 'solution error ori = {}'.format( numpy.max( abs( solving_data['err-ori'][1:]) ))
return solving_data
#----------------------------------------
def make_plots(solving_data):
global chosen_unit
logcond = log10( solving_data['tip-cond_num'] )
plot(solving_data['interval'], logcond,
'b--',label='Condition number tip/wobj',
linewidth=2)
logerr = log10( solving_data['err-tip'] )
plot(solving_data['interval'], logerr,
'b',label='Error tip (frobenious norm)',
linewidth=2);
logerr = log10( solving_data['err-wobj'] )
plot(solving_data['interval'], logerr,
'g',label='Error wobj (frobenious norm)',
linewidth=2);
## logerr = log10( solving_data['err-ori'] )
## plot(solving_data['interval'], logerr,
## 'r',label='Error ori (frobenious norm)',
## linewidth=2);
if chosen_unit == 'mm':
tol = -1
hlines(tol, solving_data['interval'][0],
solving_data['interval'][-1],
label='Tolerance = 10^{}'.format(tol))
else:
tol = -4
hlines(-4, solving_data['interval'][0],
solving_data['interval'][-1])
xlim(solving_data['interval'][0], solving_data['interval'][-1])
xlabel('Number of points collected', fontsize=14)
ylabel('log10'.format(chosen_unit), fontsize=14)
index = 4-3
plt.annotate("number of points = 4",
xy=(solving_data['interval'][index]+0.01, logerr[index]+0.2), xycoords='data',
xytext=(solving_data['interval'][index]+0.8, logerr[index]+4.5), textcoords='data',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
)
grid()
title('Calibration algorithm verification using simulated geometry')
legend()
show()
#----------------------------------------
if __name__ == '__main__':
res = []
ks = range(1)
for k in ks:
print 'Run {} % complete!'.format(100* k / len(ks))
with utils.timing.Timer() as timer:
try:
print "Sampling points..."
geometry_info = setup_geometry(plane, plane_point_spread,
num_points, perturbations=['tip'])
print 'Collecting solving information...'
solving_data = perform_solution_run(geometry_info)
res.append(solving_data)
print
print 'Preparing plots...'
except Exception as e:
print "Run #{} of {}: failed:".format(k+1, len(ks))
print str(e)
# raise
continue
make_plots(solving_data)
if chosen_unit == 'mm':
length_tol = -1 # 1/10th millimetre
else:
length_tol = -4 # 1/10th millimetre (in meters)
for key,tol, un in zip(['err-tip', 'err-wobj'],
[length_tol, -4],
[chosen_unit, '']):
val = mat( [x[key] for x in res] )
maxval = numpy.max(val, axis=0)
minval = numpy.min(val, axis=0)
meanval = numpy.mean(val, axis=0)
if un:
unit_str = '[{}]'.format(un)
else:
unit_str = ''
plot(log10(maxval),'b', label = 'max {} {}'.format(key, unit_str))
plot(log10(meanval),'g', label = 'mean {} {}'.format(key, unit_str))
plot(log10(minval),'r', label = 'min {} {}'.format(key, unit_str))
## sx = numpy.mean(mat( [x['point_spread_x'] for x in res] ), axis=0)
## sy = numpy.mean(mat( [x['point_spread_y'] for x in res] ), axis=0)
## plot(log10(sx),'k', label = 'sx')
## plot(log10(sy),'k', label = 'sy')
hlines(tol, solving_data['interval'][0],
solving_data['interval'][-1],
label='Tolerance = 10^{} {}'.format(tol, unit_str))
legend()
xlim(solving_data['interval'][0], solving_data['interval'][-1])
xlabel('Number of measured points', fontsize=14)
ylabel('log10', fontsize=14)
if 'tip' in key:
ylim(-4, 4)
elif 'wobj' in key:
ylim(-6, 1)
title('Calibration algorithm verification with repetition using simulated geometry')
grid()
show()
```
#### File: master-thesis/source/denavit_hartenberg140.py
```python
from __future__ import division
#--------------------------#
import sys
#--------------------------#
import numpy as n
#--------------------------#
import unittest
#--------------------------#
import numpy
from numpy import pi, arctan2 as atan2, arccos as acos,\
arcsin as asin, sqrt, arctan as atan
from numpy.linalg import norm, inv
from helperfunctions_math import rand_range, mat, homogenous_rotation_z
from denavit_hartenberg import inverse_kinematics_spherical_wrist as inv_wrist,\
forward_kinematics, calc_wcp, calc_j1, pack_elbow_and_wrists
import utils
#=====================================================#
rad = lambda x: x * pi / 180.0
deg = lambda x: x * 180.0 / pi
cos2 = lambda x: n.cos(rad(x))
sin2 = lambda x: n.sin(rad(x))
cos_sats = lambda a,b,th: a**2 + b**2 - 2*a*b*cos(rad(th)); #ok
ang_sats = lambda c,a,b: deg(acos((c**2 - a**2 - b**2)/(-2*a*b))); #ok
ang_sats2 = lambda c,a,b: deg(acos((c**2 - a**2 - b**2)/(2*a*b))); #ok
round = lambda x: custom_round(x)
atan = lambda x: deg(n.arctan(x))
atan2 = lambda y,x: deg(n.arctan2(y,x))
up_to = lambda i: custom_round(matmul(*[debug[x] for x in range(i)]))
#----------------------------------------------------------------------------------------------------------#
DH_TABLE = { 'table':[-70, 90, 352, 180, 'R',
360, 0, 0, 90, 'R',
0, 90, 0, 180, 'R',
0, 90, 380, 180, 'R',
0, 90, 0, 180, 'R',
0, 0, 65, 0, 'R'],
'unit': 'mm',
'order': ['A','alpha','D','theta'],
'convention': 'standard',
'tool': None
}
#----------------------------------------------------------------------------------------------------------#
def backward_facing_elbow_down(dh_table, T44):
'''
Calculates backward-facing elbow-down.
Note: This is in reality an elbow-down solution,
the name merely implies it is "flipped" up-side-down
due to the base have turned 180 degrees.
'''
#Geometrical paramaters
wcp = calc_wcp(T44, 0.065)
#First angle - j1, used to adjust a point-position
j1 = calc_j1(wcp, flipped=True)
p0 = mat([70e-3, 0, 352e-3])
p0 = homogenous_rotation_z(j1)[0:3,0:3].dot(p0)
x0 = norm(wcp[0:2] - p0[0:2])
h1 = p0[2]
h2 = wcp[2]
s = h2 - h1
x1 = norm(p0 - wcp)
beta = 380e-3
alpha = 360e-3
th3 = ang_sats2(x1, alpha, beta)
j3 = -90 + th3
th21 = atan2(s, x0)
th22 = atan2(beta*sin2(th3), alpha + beta*cos2(th3))
j2 = -90 + th21 - th22
#packing the solutions in a dynamic way
## j4, j5, j6,\
## j41,j51,j61, \
## j42,j52,j62 = inverse_kinematics_spherical_wrist(dh_table, j1, j2, j3, T44)
##
## return (j1, j2, j3, j4, j5, j6),\
## (j1, j2, j3, j41, j51, j61), \
## (j1, j2, j3, j42, j52, j62)
result = pack_elbow_and_wrists(dh_table, j1, j2, j3, T44)
return result
#----------------------------------------------------------------------------------------------------------#
def backward_facing_elbow_up(dh_table, T44):
'''
Calculates backward-facing elbow-up.
Note: This is in reality an elbow-up solution,
the name merely implies it is "flipped" up-side-down
due to the base have turned 180 degrees.
'''
#Geometrical paramaters
wcp = calc_wcp(T44, 0.065)
#First angle - j1, used to adjust a point-position
j1 = calc_j1(wcp, flipped=True)
p0 = mat([70e-3, 0, 352e-3])
p0 = homogenous_rotation_z(j1)[0:3,0:3].dot(p0)
x0 = norm(wcp[0:2] - p0[0:2])
h1 = p0[2]
h2 = wcp[2]
s = h2 - h1
x1 = norm(p0 - wcp)
beta = 380e-3
alpha = 360e-3
th3 = ang_sats2(x1, alpha, beta)
j3 = -90 - th3
th21 = atan2(s, x0)
th22 = atan2(beta*sin2(th3), alpha + beta*cos2(th3))
j2 = -90 + (th21 + th22)
## j4, j5, j6,\
## j41,j51,j61, \
## j42,j52,j62 = inverse_kinematics_spherical_wrist(dh_table, j1, j2, j3, T44)
##
## return (j1, j2, j3, j4, j5, j6),\
## (j1, j2, j3, j41, j51, j61), \
## (j1, j2, j3, j42, j52, j62)
result = pack_elbow_and_wrists(dh_table, j1, j2, j3, T44)
return result
#----------------------------------------------------------------------------------------------------------#
def elbow_up(dh_table, T44):
'''
Calculates forward-facing elbow-up.
'''
#Geometrical paramaters
wcp = calc_wcp(T44, 0.065)
#First angle - j1, used to adjust a point-position
j1 = calc_j1(wcp, flipped=False)
p0 = mat([70e-3, 0, 352e-3])
p0 = homogenous_rotation_z(j1)[0:3,0:3].dot(p0)
x0 = norm(wcp[0:2] - p0[0:2])
h1 = p0[2]
h2 = wcp[2]
s = h2 - h1
x1 = norm(p0 - wcp)
beta = 380e-3
alpha = 360e-3
th3 = ang_sats2(x1, alpha, beta)
j3 = th3 - 90
th21 = atan2(s, x0)
th22 = atan2(beta*sin2(th3), alpha + beta*cos2(th3))
j2 = 90 - (th21 + th22)
if norm(wcp[:2])-norm(p0[:2]) < 0:
j2 = -90 + (th21 - th22)
## j3 = -90-th3
## j4, j5, j6,\
## j41,j51,j61, \
## j42,j52,j62 = inverse_kinematics_spherical_wrist(dh_table, j1, j2, j3, T44)
#### import pdb; pdb.set_trace()
## return (j1, j2, j3, j4, j5, j6),\
## (j1, j2, j3, j41, j51, j61), \
## (j1, j2, j3, j42, j52, j62)
result = pack_elbow_and_wrists(dh_table, j1, j2, j3, T44)
return result
#----------------------------------------------------------------------------------------------------------#
def elbow_down(dh_table, T44):
'''
Calculates forward-facing elbow-down.
'''
#Geometrical paramaters
wcp = calc_wcp(T44, 0.065)
#First angle - j1, used to adjust a point-position
j1 = calc_j1(wcp, flipped=False)
p0 = mat([70e-3, 0, 352e-3])
p0 = homogenous_rotation_z(j1)[0:3,0:3].dot(p0)
x0 = norm(wcp[0:2] - p0[0:2])
h1 = p0[2]
h2 = wcp[2]
s = h2 - h1
x1 = norm(p0 - wcp)
beta = 380e-3
alpha = 360e-3
th3 = ang_sats2(x1, alpha, beta)
j3 = -th3 - 90
th21 = atan2(s, x0)
th22 = atan2(beta*sin2(th3), alpha + beta*cos2(th3))
j2 = 90 - (th21 - th22)
if norm(wcp[:2])-norm(p0[:2]) < 0:
j2 = -90 + (th21+th22)
## j3 = -90-th3
## j4, j5, j6,\
## j41,j51,j61, \
## j42,j52,j62 = inverse_kinematics_spherical_wrist(dh_table, j1, j2, j3, T44)
##
## return (j1, j2, j3, j4, j5, j6),\
## (j1, j2, j3, j41, j51, j61), \
## (j1, j2, j3, j42, j52, j62)
result = pack_elbow_and_wrists(dh_table, j1, j2, j3, T44)
return result
#----------------------------------------------------------------------------------------------------------#
def inverse_kinematics_elbow_up(dh_table, T44, flipped = False):
'''
Wrapper function for forward-facing elbow-up,
and backward-facing elbow-dup
'''
if not flipped:
return elbow_up(dh_table, T44)
else:
return backward_facing_elbow_up(dh_table, T44)
def inverse_kinematics_elbow_down(dh_table, T44, flipped = False):
'''
Wrapper function for forward-facing elbow-down,
and backward-facing elbow-down
'''
if not flipped:
return elbow_down(dh_table, T44)
else:
return backward_facing_elbow_down(dh_table, T44)
#----------------------------------------------------------------------------------------------------------#
# INVERSE KINEMATICS - WRAPPERS
#----------------------------------------------------------------------------------------------------------#
def inverse_kinematics_irb140(dh_table, T44):
'''
Wrapper function that calculates the 20 analytical solutions
to the IRB140 robot.
'''
if type(T44) is list:
T44 = mat(T44)
dim = T44.shape
if len(dim) != 2:
raise ArithmeticError('Forward-kinematics must be a 4x4 matrix!')
if dim[0] != dim[1]:
raise ArithmeticError('Forward-kinematics must be square!')
if dim[0] != 4:
raise ArithmeticError('Forward-kinematics must have dimension of 4!')
if not dh_table.has_key('tool'):
dh_table['tool'] = None
tool = dh_table['tool']
if not (tool is None):
T44 = T44.dot( inv(tool) )
# x5 for each elb_x
sol_elbup = inverse_kinematics_elbow_up(dh_table, T44)
sol_elbdown = inverse_kinematics_elbow_down(dh_table, T44)
sol_elbup_backward = inverse_kinematics_elbow_up(dh_table, T44, flipped = True)
sol_elbdown_backward = inverse_kinematics_elbow_down(dh_table, T44, flipped = True)
#first columnt is first solution and so forth
## ret = mat(zip(sol_elbup1, sol_elbdown1, sol_elbup1_fl, sol_elbdown1_fl,
## sol_elbup2, sol_elbdown2, sol_elbup2_fl, sol_elbdown2_fl,
## sol_elbup3, sol_elbdown3, sol_elbup3_fl, sol_elbdown3_fl))
#first columnt is first solution and so forth
ret = mat( zip( sol_elbup, sol_elbdown, sol_elbup_backward, sol_elbdown_backward ) )
k,m,n = ret.shape
ret = ret.reshape(k*m,n)
return ret.T
def calc_valid_raw_invkin_irb140(T44):
return __inverse_kinematics_pose(T44, filtering=True, raw_solutions=True)
def calc_valid_invkin_irb140(T44):
return __inverse_kinematics_pose(T44, filtering=True, raw_solutions=False)
def calc_invkin_irb140(T44, filtering=False, raw_solutions=False):
return __inverse_kinematics_pose(T44, filtering, raw_solutions)
def __inverse_kinematics_pose(T44, filtering=False, raw_solutions=False):
# perform inverse kinematics on a single frame
angle_solutions = inverse_kinematics_irb140(DH_TABLE, T44)
if not raw_solutions:
extra = [angle_solutions]
for index in xrange(6):
extra.append( generate_modulo_solutions(angle_solutions, index, 360.0))
extra.append( generate_modulo_solutions(angle_solutions, index, -360.0))
pass
angle_solutions = merge_solutions(*extra)
if filtering:
angle_solutions = filter_solutions(angle_solutions)
return mat(angle_solutions.T)
#----------------------------------------------------------------------------------------------------------#
# INVERSE KINEMATICS - SOLUTION HANDLING
#----------------------------------------------------------------------------------------------------------#
def check_solution(j1,j2,j3,j4,j5,j6, inclusive=True):
sol = check_range(j1, -180, 180, inclusive)
sol &= check_range(j2, -90, 110, inclusive)
sol &= check_range(j3, -230, 50, inclusive)
sol &= check_range(j4, -200, 200, inclusive)
sol &= check_range(j5, -115, 115, inclusive)
sol &= check_range(j6, -400, 400, inclusive)
return sol
def check_solution_elbow_up(j1,j2,j3,j4,j5,j6, inclusive=True):
sol = check_solution(j1, j2, j3, j4, j5, j6, inclusive=True)
sol &= (n.round(j2, decimals=3) >= 0.0)
sol &= check_range(j3, -90, 50, inclusive)
return sol
def filter_solutions(solutions, filter_function = check_solution):
result = []
for s in solutions.T:
if filter_function(*s) == True:
result.append( s )
# returns non-flipped, flipped
return mat(zip(*result))
def merge_solutions(*args):
result = []
for m in args:
result += zip(*m)
return mat(zip(*result))
def __modulo_solutions(solution_matrix, index, modulo=360.0):
for s in solution_matrix.T:
result = s.copy()
value = result[index]
result[index] = value + modulo
yield result
def generate_modulo_solutions(solution_matrix, index, modulo=360.0):
return mat(zip(*__modulo_solutions(solution_matrix, index, modulo)))
#----------------------------------------------------------------------------------------------------------#
# INVERSE KINEMATICS - SOLUTION CURVE
#----------------------------------------------------------------------------------------------------------#
def inverse_kinematics_curve(trans_frames):
# perform inverse kinematics over a curve and collect all solutions
all_solutions = []
for point_frame in trans_frames:
all_solutions.append(__inverse_kinematics_pose(point_frame, filtering=True))
return mat(all_solutions)
def __find_solution_path(res, result, curr_point=0, curr_ind=0, tol=20.0):
p_curr = res[curr_point] #get the solutions for current point
solution = p_curr[curr_ind]
if curr_point+1 >= len(res):
result.append(solution)
return
else:
p_dest = res[curr_point+1]
sol_diff = map(norm, solution - p_dest)
z = zip(p_dest, sol_diff, range(len(p_dest)))
z_sorted = sorted(z, key=lambda x: x[1])
z_sorted, _, index = zip(*z_sorted)
z, _, _ = zip(*z)
sel_z = z_sorted[0]
sel_ind = index[0]
if sol_diff[sel_ind] > tol:
del result[:]
return
else:
result.append(solution)
print curr_point
__find_solution_path(res, result,
curr_point = curr_point+1,
curr_ind = sel_ind)
def __find_path(ik_curve, index):
path = []
__find_solution_path(ik_curve, path,
curr_point=0, curr_ind=index)
return path
def find_paths(ik_curve):
result = ik_curve
total = []
with utils.timing.Timer() as t:
for index in xrange(len(result[0])):
path = __find_path(result, index)
if path:
print 'FOUND ONE'
total.append(list(path))
return mat(total)
def find_single_path(ik_curve):
result = ik_curve
total = []
with utils.timing.Timer() as t:
for index in xrange(len(result[0])):
path = __find_path(result, index)
if path:
return mat(path)
#----------------------------------------------------------------------------------------------------------#
# MISC
#----------------------------------------------------------------------------------------------------------#
def check_range(x, _min, _max, inclusive=True):
#swap if needed
if _max < _min:
_max, _min = _min, _max
if inclusive == True:
return _min <= x <= _max
else:
return _min < x < _max
def custom_round(v, prec = 1e-8):
coef = 1 / prec
return n.round(v * coef) / coef
#----------------------------------------------------------------------------------------------------------#
def clear():
for i in xrange(0,100):
print ''
#----------------------------------------------------------------------------------------------------------#
def iterdim(a, axis=0):
"""
Relevant Stackoverflow:
http://stackoverflow.com/questions/1589706/iterating-over-arbitrary-dimension-of-numpy-array
"""
a = numpy.asarray(a)
leading_indices = (slice(None),)*axis
for i in xrange(a.shape[axis]) :
yield a[leading_indices+(i,)]
#----------------------------------------------------------------------------------------------------------#
# TEST-CASE IRB140
#----------------------------------------------------------------------------------------------------------#
class TestIRB140(unittest.TestCase):
def test_elbow_down(self):
print 'test_elbow_down'
for _ in xrange(100):
j1 = rand_range(-180, 180)
j2 = 40
j3 = -100
j4 = rand_range(-200, 200)
j5 = rand_range(-115, 115)
j6 = rand_range(-400, 400)
robot_info = forward_kinematics(j1,j2,j3,j4,j5,j6,**DH_TABLE)
A, debug = robot_info['flange'], robot_info['robot_geometry_local']
s = inverse_kinematics_elbow_down(DH_TABLE, A)
self.assertNotEqual(n.isnan(n.sum(s)), True)
a,b,c = s[0][0:3]
self.assertAlmostEqual(a, j1)
self.assertAlmostEqual(b, j2)
self.assertAlmostEqual(c, j3)
a,b,c = s[1][0:3]
self.assertAlmostEqual(a, j1)
self.assertAlmostEqual(b, j2)
self.assertAlmostEqual(c, j3)
s = inverse_kinematics_elbow_up(DH_TABLE, A)
a,b,c = s[0][0:3]
self.assertAlmostEqual(a, j1)
self.assertNotAlmostEqual(b, j2)
self.assertNotAlmostEqual(c, j3)
a,b,c = s[1][0:3]
self.assertAlmostEqual(a, j1)
self.assertNotAlmostEqual(b, j2)
self.assertNotAlmostEqual(c, j3)
# check if they are stored in the right order i.e.
# elbow_up, elbow_down, elbow_up_fl, delbow_down_fl
sols = inverse_kinematics_irb140(DH_TABLE, A)
for i in xrange(1, len(sols.T), 4):
a,b,c = sols[:,i][:3]
self.assertAlmostEqual(a, j1)
self.assertAlmostEqual(b, j2)
self.assertAlmostEqual(c, j3)
def test_elbow_up(self):
print '\ntest_elbow_up'
for _ in xrange(100):
j1 = rand_range(-180, 180)
j2 = 40
j3 = -30
j4 = rand_range(-200, 200)
j5 = rand_range(-115, 115)
j6 = rand_range(-400, 400)
robot_info = forward_kinematics(j1,j2,j3,j4,j5,j6,**DH_TABLE)
A, debug = robot_info['flange'], robot_info['robot_geometry_local']
s = inverse_kinematics_elbow_up(DH_TABLE, A)
self.assertNotEqual(n.isnan(n.sum(s)), True)
a,b,c = s[0][0:3]
self.assertAlmostEqual(a, j1)
self.assertAlmostEqual(b, j2)
self.assertAlmostEqual(c, j3)
a,b,c = s[1][0:3]
self.assertAlmostEqual(a, j1)
self.assertAlmostEqual(b, j2)
self.assertAlmostEqual(c, j3)
s = inverse_kinematics_elbow_down(DH_TABLE, A)
a,b,c = s[0][0:3]
self.assertAlmostEqual(a, j1)
self.assertNotAlmostEqual(b, j2)
self.assertNotAlmostEqual(c, j3)
a,b,c = s[1][0:3]
self.assertAlmostEqual(a, j1)
self.assertNotAlmostEqual(b, j2)
self.assertNotAlmostEqual(c, j3)
# check if they are stored in the right order i.e.
# elbow_up, elbow_down, elbow_up_fl, delbow_down_fl
sols = inverse_kinematics_irb140(DH_TABLE, A)
for i in xrange(0, len(sols.T), 4):
a,b,c = sols[:,i][:3]
self.assertAlmostEqual(a, j1)
self.assertAlmostEqual(b, j2)
self.assertAlmostEqual(c, j3)
def test_elbow_down_backward_facing(self):
print '\ntest_elbow_down_backward'
for _ in xrange(100):
j1 = rand_range(-180, 180)
j2 = -90
j3 = -30
j4 = rand_range(-200, 200)
j5 = rand_range(-115, 115)
j6 = rand_range(-400, 400)
robot_info = forward_kinematics(j1,j2,j3,j4,j5,j6,**DH_TABLE)
A, debug = robot_info['flange'], robot_info['robot_geometry_local']
s = inverse_kinematics_elbow_down(DH_TABLE, A, flipped = True)
self.assertNotEqual(n.isnan(n.sum(s)), True)
a,b,c = s[0][0:3]
self.assertAlmostEqual(a, j1)
self.assertAlmostEqual(b, j2)
self.assertAlmostEqual(c, j3)
a,b,c = s[1][0:3]
self.assertAlmostEqual(a, j1)
self.assertAlmostEqual(b, j2)
self.assertAlmostEqual(c, j3)
s = inverse_kinematics_elbow_up(DH_TABLE, A, flipped = True)
a,b,c = s[0][0:3]
self.assertAlmostEqual(a, j1)
self.assertNotAlmostEqual(b, j2)
self.assertNotAlmostEqual(c, j3)
a,b,c = s[1][0:3]
self.assertAlmostEqual(a, j1)
self.assertNotAlmostEqual(b, j2)
self.assertNotAlmostEqual(c, j3)
# check if they are stored in the right order i.e.
# elbow_up, elbow_down, elbow_up_fl, delbow_down_fl
sols = inverse_kinematics_irb140(DH_TABLE, A)
for i in xrange(3, len(sols.T), 4):
a,b,c = sols[:,i][:3]
self.assertAlmostEqual(a, j1)
self.assertAlmostEqual(b, j2)
self.assertAlmostEqual(c, j3)
def test_elbow_up_backward_facing(self):
print '\ntest_elbow_up_backward'
for _ in xrange(100):
j1 = rand_range(-180, 180)
j2 = -40
j3 = -100
j4 = rand_range(-200, 200)
j5 = rand_range(-115, 115)
j6 = rand_range(-400, 400)
robot_info = forward_kinematics(j1,j2,j3,j4,j5,j6,**DH_TABLE)
A, debug = robot_info['flange'], robot_info['robot_geometry_local']
sol = mat([j1,j2,j3,j4,j5,j6])
s = inverse_kinematics_elbow_up(DH_TABLE, A, flipped = True)
self.assertNotEqual(n.isnan(n.sum(s)), True)
a,b,c = s[0][0:3]
self.assertAlmostEqual(a, j1)
self.assertAlmostEqual(b, j2)
self.assertAlmostEqual(c, j3)
a,b,c = s[1][0:3]
self.assertAlmostEqual(a, j1)
self.assertAlmostEqual(b, j2)
self.assertAlmostEqual(c, j3)
s = inverse_kinematics_elbow_down(DH_TABLE, A, flipped = True)
a,b,c = s[0][0:3]
self.assertAlmostEqual(a, j1)
self.assertNotAlmostEqual(b, j2)
self.assertNotAlmostEqual(c, j3)
a,b,c = s[1][0:3]
self.assertAlmostEqual(a, j1)
self.assertNotAlmostEqual(b, j2)
self.assertNotAlmostEqual(c, j3)
# check if they are stored in the right order i.e.
# elbow_up, elbow_down, elbow_up_backward, delbow_down_backward
sols = inverse_kinematics_irb140(DH_TABLE, A)
for i in xrange(2, len(sols.T), 4):
a,b,c = sols[:,i][:3]
self.assertAlmostEqual(a, j1)
self.assertAlmostEqual(b, j2)
self.assertAlmostEqual(c, j3)
def test_non_reach_config(self):
print '\ntest_non_reach_configs'
for _ in xrange(0,100):
j1 = rand_range(-180, 180)
j2 = 90
j3 = -89
j4 = rand_range(-200, 200)
j5 = rand_range(-115, 115)
j6 = rand_range(-400, 400)
s0 = j1,j2,j3,j4,j5,j6
robot_info = forward_kinematics(*s0, **DH_TABLE)
T44, debug1 = robot_info['flange'], robot_info['robot_geometry_local']
sol = mat( inverse_kinematics_irb140(DH_TABLE, T44) )
sol = sol.T
for i,s in enumerate(sol):
robot_info = forward_kinematics(*s, **DH_TABLE)
A, debug2 = robot_info['flange'], robot_info['robot_geometry_local']
if i in [l+m*8 for m,_ in enumerate(range(0, len(sol), 8)) for l in [0,1,4,5]]: #all non-flipped solutions only
self.assertAlmostEqual(norm(A-T44), 0)
else:
self.assertTrue(n.isnan(norm(A-T44)))
def test_just_barely_reach_flipped_configs(self):
print '\ntest_just_barely_reach_flipped_configs'
for _ in xrange(0,100):
j1 = rand_range(-180, 180)
j2 = -90
j3 = -89
j4 = rand_range(-200, 200)
j5 = rand_range(-115, 115)
j6 = rand_range(-400, 400)
s0 = j1,j2,j3,j4,j5,j6
robot_info = forward_kinematics(*s0, **DH_TABLE)
T44, debug1 = robot_info['flange'], robot_info['robot_geometry_local']
sol = mat( inverse_kinematics_irb140(DH_TABLE, T44) )
for s in sol.T:
robot_info = forward_kinematics(*s, **DH_TABLE)
A, debug2 = robot_info['flange'], robot_info['robot_geometry_local']
self.assertAlmostEqual(norm(A-T44), 0)
def test_forward_kinematics_general(self):
print '\ntest_forward_kinematics_general'
for counter in xrange(10000):
fcounter = (counter / 10000.0)*100
if fcounter % 1.0 == 0.0:
print fcounter
j1 = rand_range(-180, 180)
j2 = rand_range(-90, 110)
j3 = rand_range(-230, 50)
j4 = rand_range(-200, 200)
j5 = rand_range(-115, 115)
j6 = rand_range(-400, 400)
# makes sure we never end up at a singular point
while (abs(j3) - 90) < 1e-7:
j3 = rand_range(-230, 50)
s0 = j1,j2,j3,j4,j5,j6
robot_info = forward_kinematics(j1, j2, j3, j4, j5, j6, **DH_TABLE)
T44, debug1 = robot_info['flange'], robot_info['robot_geometry_local']
while norm(calc_wcp(T44,L=0.065)[:2]) < 1e-7:
j2 = rand_range(-90, 110)
T44, debug1 = forward_kinematics(j1, j2, j3, j4, j5, j6, **DH_TABLE)
sol = mat( inverse_kinematics_irb140(DH_TABLE, T44) )
num_valid_solutions = 0
for s in sol.T:
robot_info = forward_kinematics(*s, **DH_TABLE)
A, debug2 = robot_info['flange'], robot_info['robot_geometry_local']
num_valid_solutions += check_solution(*s)
error = norm(A - T44)
if not n.isnan(error):
self.assertAlmostEqual(error, 0)
self.assertGreaterEqual(num_valid_solutions, 1)
self.assertEqual(num_valid_solutions, calc_valid_raw_invkin_irb140(T44).shape[0])
L = []
for s in iterdim(sol,1):
if check_solution(*s) == True:
L.append(s)
L = mat(L)
self.assertTrue(norm(calc_valid_raw_invkin_irb140(T44) - L) == 0.0)
#----------------------------------------------------------------------------------------------------------#
if __name__ == '__main__':
unittest.main()
```
#### File: master-thesis/source/papersearch_parse_newer_different optimization_20170916.py
```python
from __future__ import division
# imports - core
import os, sys
import logging
import cPickle as pickle
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
import numpy
import numpy as n
numpy.set_printoptions(precision=2)
numpy.set_printoptions(suppress=True)
from numpy import array as mat, pi, cos, sin, arctan2 as atan2,\
arccos as acos, log10, degrees as deg, radians as rad
from numpy.linalg import norm, svd, inv
from numpy.random import randint, seed
from copy import deepcopy
import itertools as it
import pylab
# imports - anoto
filePath, fileName=os.path.split(__file__) #you are here
sys.path.append(os.path.normpath(os.path.join(filePath, '../int')))
# imports - master thesis
sys.path.append(os.path.normpath(os.path.join(filePath, '../int/master_thesis_code/source')))
from denavit_hartenberg140 import forward_kinematics, DH_TABLE as dh_table
from helperfunctions_math import homogenous_matrix as hom, matmul,\
quat_to_rot, rot_to_quat, nmap, \
rot_tilt_skew,\
nzip
import calibration_algorithm_newer as calib
from orientation_verification import solve_ori as SVD
from standardplot import StPlot
# imports - newer thesis files
#from ed_probe import PenED, rx_ry_rz as rot_xyz
# imports - 3rd party
import yaml
def normalize(x):
return x / norm(x)
def tool_svd(A,B):
points = mat(range(30)).reshape(3,10)
a = A.dot(points)
b = B.dot(points)
ac = a.T - n.mean(a.T, axis=0)
bc = b.T - n.mean(b.T, axis=0)
h = mat(map(lambda x: n.outer(x[0],x[1]),zip(ac,bc)))
H = n.sum(h, axis=0)
U,S,V = svd(H)
return U.dot(V)
class Calibration(object):
def __init__(self, data_parser, num_points=-1, random=False):
self.parser = data_parser
self.__calibrate(num_points, random)
def __calibrate(self, num_points=-1, random=False):
self.robot_infos = map(lambda x: forward_kinematics(*x, **dh_table), self.parser.robot_joints)
fk = self.flange
# anoto coords
pentip_2d = self.parser.pen_pos
# convert to mm, first point is origin
#pentip_2d = (pentip_2d - pentip_2d[0])*0.3
f_ad = 7.0 *(25.4/600.0)
pentip_2d = (pentip_2d) * f_ad
if not num_points == -1:
if random:
unique_rand = list(set(randint(0, len(fk), num_points)))
fk = fk[unique_rand]
pentip_2d = pentip_2d[unique_rand]
self.geometry_info = {
'data':
{
'forward_kinematics':fk,
'pentip_2d':pentip_2d
}
}
self._res, self.cond = calib.find_solution_pen_tip(self.geometry_info, num_points)
@property
def wobj_ori(self):
return self._res[:,:3]
@property
def wobj_ori_x(self):
return self.wobj_ori[:,0]
@property
def wobj_ori_y(self):
return self.wobj_ori[:,1]
@property
def wobj_ori_z(self):
return self.wobj_ori[:,2]
@property
def wobj_error(self):
return abs(90 - acos(self.wobj_ori[:,0].dot(self.wobj_ori[:,1]))*180.0/pi)
@property
def wobj_error_x(self):
if (self.parser.yaml_wobj is None):
return numpy.NaN
acos_val = acos(normalize(self.wobj_ori_x).dot(self.parser.yaml_wobj_ori_x))
return deg(acos_val)
@property
def wobj_error_y(self):
if (self.parser.yaml_wobj is None):
return numpy.NaN
return acos(normalize(self.wobj_ori_y).dot(self.parser.yaml_wobj_ori_y))*180.0/pi
@property
def wobj_error_z(self):
if (self.parser.yaml_wobj is None):
return numpy.NaN
return acos(normalize(self.wobj_ori_z).dot(self.parser.yaml_wobj_ori_z))*180.0/pi
@property
def tip(self):
return self._res[:,3]
@property
def tip_error(self):
"""
Used for optimizing the calibration result.
Calculates the tip error using a metric function.
"""
return self.global_tip_error
# if (self.parser.yaml_tip is None):
# return numpy.NaN
# return norm(self._res[:,3] - self.parser.yaml_tip)
def tip_error_max(self):
return self.global_tip_error
@property
def global_tip_error(self):
print self.parser.num_points
print len(self.parser.pen_pos_pattern_wobj_global)
print len(self.tcp)
k = min(len(self.tcp), self.parser.num_points)
print "k = {}".format(k)
print "..."
global_error = abs(self.tcp[:k] - self.parser.pen_pos_pattern_wobj_global[:k])[:,:,3]
max_xyz = norm(n.max(global_error,0)[:3])
mean_xyz = norm(n.mean(global_error,0)[:3])
min_xyz = norm(n.min(global_error,0)[:3])
err_metric = (max_xyz, mean_xyz, min_xyz)
return sum(err_metric)
@property
def tool(self):
return hom(0,0,0,self.tip)
@property
def tcp(self):
tcp = matmul(
self.flange,
self.tool
)
return tcp
@property
def flange(self):
flange = mat([x['flange'] for x in self.robot_infos])
flange[:,:3,3] = flange[:,:3,3]*1000 #convert to mm
return flange
@property
def flange_pos(self):
flange_pos = self.flange[:,:3,3]
return flange_pos
@property
def flange_pos_error(self):
return nmap(norm, self.flange_pos - self.parser.robot_flange)
@property
def wobj(self):
return hom(self._res[:,:3])
@property
def robot_frames_global(self):
frames = mat([x['robot_geometry_global'] for x in self.robot_infos])
frames[:,:,:3,3] = frames[:,:,:3,3] * 1000.0
return frames
class DataParser(object):
# constructor
def __init__(self, new_data, clean_data=False):
print "Initializing data parser..."
if type(new_data) == str:
with open(new_data) as fp:
self.data = pickle.load(fp)
elif type(new_data) == list or type(new_data) == tuple:
self.data = new_data
else:
self.data = None
if clean_data:
self.__clean()
self._yaml_data = None
self._yaml_file = None
self._subparse_len = 20
def __clean(self):
tmp = []
all_pos = self.pen_pos
for index, pos in enumerate(all_pos):
if not pos.dtype == numpy.object:
tmp.append(index)
self.data = [x[1] for x in enumerate(self.data) if (x[0] in tmp)]
def to_pickle(self, filepath):
with open(filepath,'wb+') as fp:
pickle.dump(self.data, fp)
def merge(self, other_parser):
self.data += other_parser.data
return self
def emptycopy(self):
self.subparser = 0
return self.subparser
def jumblecopy(self):
self.subparser = self.numpoints
return self.subparser
@property
def subparser(self):
tmp = deepcopy(self.data)
points = [self.data.pop(randint(0, self.num_points-1)) for _ in xrange(self._subparse_len)]
self.data = tmp
new_parser = DataParser(points)
new_parser.yaml_file = self.yaml_file
return new_parser
@subparser.setter
def subparser(self,value):
self._subparse_len = value
@property
def num_points(self):
return len(self.data)
@property
def robot_joints(self):
return mat([x['robot_joints'] for x in self.data])
@property
def robot_flange(self):
return mat([x['robot_flange'] for x in self.data])
@property
def robot_tcp(self):
return mat([x['robot_tcp'] for x in self.data])
@property
def pen_pos(self):
return mat([x['pen_pos'] for x in self.data])
def gen_robot_flanges(self):
robot_infos = map(lambda x: forward_kinematics(*x, **dh_table), self.robot_joints)
flange = mat([x['flange'] for x in robot_infos])
flange[:,:3,3] = flange[:,:3,3]*1000 #convert to mm
return flange
@property
def pen_pos_pattern_mm(self):
"""
Pen position relative to paper origin, which
is the UL position of the pattern with crosses
used in calibration. The coordinates are converted from ad to mm.
"""
ad_to_mm = 7.0 *(25.4 / 600.0)
paper_origin = mat([385911998.7,327159331.8])
return mat([x['pen_pos'] - paper_origin \
for x in self.data]) * ad_to_mm
@property
def pen_pos_pattern_mm_3d(self):
"""
Pen position relative to paper origin, which
is the UL position of the pattern with crosses
used in calibration. The coordinates are converted from ad to mm.
"""
return nmap(lambda x: x + [0], self.pen_pos_pattern_mm.tolist())
@property
def pen_pos_pattern_mm_hom(self):
return nmap(lambda x: hom(0,0,0,x), self.pen_pos_pattern_mm_3d)
@property
def pen_pos_pattern_wobj_global(self):
return nmap(lambda x: self.yaml_wobj.dot(x), self.pen_pos_pattern_mm_hom)
@property
def pen_ori(self):
return mat([x['pen_ori'] for x in self.data])
@property
def pen_angles(self):
return mat([rot_tilt_skew(x['pen_ori']) for x in self.data])
@property
def pen_fsr(self):
return mat([x['pen_fsr'] for x in self.data])
@property
def pen_fsr_adc(self):
return mat([x['pen_fsr_adc'] for x in self.data])
@property
def yaml_tip(self):
if not self.__yaml_data:
return None
return mat(self.__yaml_data['tool']['pos'])
@property
def yaml_tool_q(self):
if not self.__yaml_data:
return None
return self.__yaml_data['tool']['q']
@property
def yaml_wobj(self):
"""
The coordinate system of the paper pattern.
"""
if not self.__yaml_data:
return None
return hom(self.yaml_wobj_ori, self.yaml_wobj_pos)
@property
def yaml_wobj_ori(self):
"""
The coordinate system of the paper pattern.
This function converts the orientation from global target
coordiante system to global pattern oordinate sytem
by the transformation: [ 0 1 0
1 0 0
0 0 -1 ]
"""
if not self.__yaml_data:
return None
return matmul(quat_to_rot(self.__yaml_data['wobj']['q']),
mat([[0,1,0],
[1,0,0],
[0,0,-1]]))
@property
def yaml_wobj_q(self):
return self.__yaml_data['wobj']['q']
@property
def yaml_wobj_ori_x(self):
if not self.__yaml_data:
return None
return self.yaml_wobj_ori[:,0]
@property
def yaml_wobj_ori_y(self):
if not self.__yaml_data:
return None
return self.yaml_wobj_ori[:,1]
@property
def yaml_wobj_ori_z(self):
if not self.__yaml_data:
return None
return self.yaml_wobj_ori[:,2]
@property
def yaml_wobj_pos(self):
if not self.__yaml_data:
return None
return mat(self.__yaml_data['wobj']['pos'])
@property
def yaml_file(self):
if self.__yaml_data:
return self._yaml_file
else:
return None
@property
def yaml_dir(self):
if self.__yaml_data:
return os.path.join(os.path.dirname(self.yaml_file))
else:
return None
@yaml_file.setter
def yaml_file(self,filepath):
self.__yaml_data = filepath
if self.__yaml_data:
self._yaml_file = filepath
else:
self._yaml_file = None
@property
def __yaml_data(self):
return self._yaml_data
@__yaml_data.setter
def __yaml_data(self, filepath):
try:
with open(filepath) as fp:
self._yaml_data = yaml.load(fp)
except Exception as e:
self._yaml_data = None
log.warning(str(e))
# destructor
def __del__(self):
pass
def parser_merge(x,y):
x.merge(y)
return x
def create_parser(x):
return DataParser(x)
def calibration_optimization_rand(p, error):
curr_error = error
curr_calib = Calibration(p)
for k in range(p.num_points*50):
try:
c = Calibration(p.subparser)
except:
continue
if c.tip_error < curr_error:
curr_calib = deepcopy(c)
curr_error = deepcopy(c.tip_error)
print curr_error
return curr_calib.parser, error
def try_remove_one(parser):
c = Calibration(parser)
for k in range(parser.num_points):
tmp = parser.data.pop(k)
new_c = Calibration(parser)
if new_c.tip_error < c.tip_error:
return tmp
else:
parser.data.insert(k, tmp)
def try_remove_largest(parser):
c = Calibration(parser)
curr_k = None
curr_error = c.tip_error
for k in range(parser.num_points):
tmp = parser.data.pop(k)
new_c = Calibration(parser)
if new_c.tip_error <= curr_error:
curr_k = k
curr_error = new_c.tip_error
parser.data.insert(k, tmp)
if not (curr_k is None):
return parser.data.pop(curr_k)
def calibration_optimization(parser):
curr_parser = parser.emptycopy()
curr_parser.data = [parser.data.pop(0) for _ in xrange(6)]
curr_calib = Calibration(curr_parser)
counter = 0
limit = parser.num_points
print '{} | {}'.format(curr_calib.tip_error,parser.num_points)
while parser.num_points > 0:
curr_parser.data.append(parser.data.pop(0))
new_calib = Calibration(curr_parser)
if new_calib.tip_error < curr_calib.tip_error:
curr_calib = new_calib
counter = 0
print '{} | {}'.format(curr_calib.tip_error,parser.num_points)
else:
ret = try_remove_one(curr_parser)
#ret = try_remove_largest(curr_parser)
if not (ret is None) and (counter < limit):
parser.data.append(ret)
else:
print '{} | {}'.format(curr_calib.tip_error,parser.num_points)
counter=0
counter += 1
print curr_calib.tip_error
while try_remove_largest(curr_parser):
pass
return curr_parser
if __name__ == '__main__':
_debug = False
_optimize = False
if not len(sys.argv) > 1:
log.error('\n\t\tUsage: please supply file!')
#sys.argv.append( raw_input('Filepath: ') )
##sys.argv.append( 'C:\\Users\\***REMOVED***\\Dropbox\\exjobb\\results\\measurements\\measurements_automatic\\26_27052016\\4_measurement_27052016_0227.pickle' )
wobj_origin = mat([385911998.7,327159331.8])
o = wobj_origin
tmp_path = r"C:\Users\***REMOVED***\Desktop\final_meas_20160724_2016025\all\measurement_24072016_2251_2321_abort.pickle"
sys.argv.append(tmp_path)
_debug = True
print sys.argv
if len(sys.argv) == 2:
parser = DataParser(sys.argv[1])
else:
parsers = map(create_parser, sys.argv[1:])
parser = reduce(parser_merge, parsers)
yaml_filepath = os.path.join(os.path.dirname(os.path.abspath(sys.argv[1])),
'tool_wobj.yaml')
parser.yaml_file = yaml_filepath
if raw_input('Optimize results?: ').lower() == 'y':
_optimize = True
print '(be patient, this will take some time ...)'
org_parser = deepcopy(parser)
print parser.num_points
cal = Calibration(parser)
print cal.tip_error
curr_parser = calibration_optimization(parser)
print "===[ Calibration Result ]==="
print curr_parser.num_points
print "tip error metric: {}".format(Calibration(curr_parser).tip_error)
#pos = matmul(org_parser.gen_robot_flanges(), cal.tool)[:,:3,3]
#meas_pos = org_parser.pen_pos_pattern_wobj_global[:,:3,3]
#res = meas_pos - pos
parser = curr_parser
yaml_output = {
"tool": {
"pos":cal.tip.tolist(),
"q": org_parser.yaml_tool_q,
"name": "Optimized calibration tip"
},
"wobj": {
"pos": org_parser.yaml_wobj_pos.tolist(),
"q": org_parser.yaml_wobj_q
}
}
with open(os.path.join(parser.yaml_dir, "tool_wobj_opt.yaml"), 'w+') as fp:
yaml.dump(yaml_output, fp)
if not _debug:
parser.to_pickle(os.path.join(parser.yaml_dir,'optimized_measurements.pickle'))
# plotting - misc
pos = parser.pen_pos
pos = pos - pos[0]
pylab.grid()
pylab.plot(pos[:,0],
pos[:,1],'k.')
pylab.xlabel('x [mm]')
pylab.ylabel('y [mm]')
if not _debug:
filename = 'pen_coords'
if _optimize:
filename += '_opt'
pylab.savefig(os.path.join(parser.yaml_dir,'{}.png'.format(filename)))
pylab.clf()
else:
pylab.show()
calibration_run = []
x_val = range(2, parser.num_points)
for p in x_val:
cal = Calibration(parser, num_points=p)
calibration_run.append(cal)
calibration = Calibration(parser)
from pandas import DataFrame
df = DataFrame(data=zip(
[x.tip_error for x in calibration_run],
[x.wobj_error_x for x in calibration_run],
[x.wobj_error_y for x in calibration_run],
[x.wobj_error_z for x in calibration_run],
calibration.flange_pos_error,
),
columns=['tip_error','wobj_error_x',
'wobj_error_y',
'wobj_error_z',
'flange_error'],
index=x_val)
print df
if not _debug:
filename = 'calibration_errors'
if _optimize:
filename += '_opt'
df.to_csv(os.path.join(parser.yaml_dir,'{}.csv'.format(filename)))
df.to_latex(os.path.join(parser.yaml_dir,'{}.tex'.format(filename)))
tip_error = log10(calibration.tip_error)
pylab.plot(x_val, log10([x.tip_error for x in calibration_run]))
pylab.xlim([x_val[0], x_val[-1]])
pylab.axhline(tip_error, color='k', linestyle='--')
pylab.text(parser.num_points - 3,0.08+tip_error,
'${:0.4f}$ mm'.format(calibration.tip_error),
bbox={'facecolor':'white',
'edgecolor':'white'})
pylab.xlabel('number of measurements')
pylab.ylabel('log10 norm error [mm]')
pylab.title('Tool tip calibration results from automatic measurements')
pylab.grid()
if not _debug:
filename = 'tip_error'
if _optimize:
filename += '_opt'
pylab.savefig(os.path.join(parser.yaml_dir,'{}.png'.format(filename)))
pylab.clf()
else:
pylab.show()
pylab.plot(x_val, log10([x.wobj_error_x for x in calibration_run]))
pylab.plot(x_val, log10([x.wobj_error_y for x in calibration_run]))
pylab.plot(x_val, log10([x.wobj_error_z for x in calibration_run]))
pylab.xlim([x_val[0], x_val[-1]])
pylab.legend(['$\hat{x}_{an}$',
'$\hat{y}_{an}$',
'$\hat{z}_{an}$'])
pylab.xlabel('number of measurements')
pylab.ylabel('log10 angle error [deg]')
pylab.title('Work object calibration results from automatic measurements')
pylab.grid()
if not _debug:
filename = 'wobj_error'
if _optimize:
filename += '_opt'
pylab.savefig(os.path.join(parser.yaml_dir,'{}.png'.format(filename)))
pylab.clf()
else:
pylab.show()
## # plotting - result
## for k in range(1):
## index = k
## plotting = StPlot(length_unit='mm')
## plotting.draw_robot(calibration.robot_frames_global[index])
## plotting.draw_frame(calibration.wobj[0], size=0.4)
## try:
## plotting.draw_frame(parser.yaml_wobj, size=0.5)
## except:
## pass
## plotting.draw_tool(calibration.flange[index], calibration.tool)
## plotting.show()
```
#### File: source/robot-station-same-proof/robot_station2.py
```python
import random
from helperfunctions_plot import *
from plane_relative import *
from denavit_hartenberg140 import *
import itertools as it
def apply_along_axis(M, func=n.diff, axis=1):
return n.apply_along_axis(func, axis, arr=M)
def get_closest_solutions_pair(s0, s1):
compares = []
for i, s0i in enumerate( s0 ):
for j, s1j in enumerate( s1 ):
#print "%d, %d, %d" % (norm(s0i-s1j), i , j)
compares.append( [norm(s0i-s1j), i , j])
comp = mat( compares )
ret = []
try:
wh = n.argwhere(comp == comp.min(0)[0])
for k in comp[wh[:,0],:]:
i,j = k[1:3]
#norm_value = k[0]
pair = [s0[i], s1[j]]
ret.append( pair )
# import pdb; pdb.set_trace()
except:
pass
## if len(ret) > 1:
## import pdb; pdb.set_trace()
return ret
def add_solutions(solutions, solution_value, index=5):
for s in solutions.T:
tmp1 = s.copy()
tmp2 = s.copy()
old_val = s[index]
tmp1[index] = old_val + solution_value
yield tmp1
tmp2[index] = old_val - solution_value
yield tmp2
def traverse_solutions(*args):
for solutions in args:
for s in solutions.T:
yield s
def make_array(list_of):
return mat(list_of).T
if __name__ == '__main__':
for count in n.linspace(-180,180,10):
ax, fig = init_plot()
fig.clear()
j1 = 180
j2 = 0#rand_range(-90, 110)
j3 = 0#rand_range(-230, 50)
j4 = 0#rand_range(-200, 200)
j5 = 0#rand_range(-115, 115)
j6 = 0#rand_range(-400, 400)
j1,j2,j3,j4,j5,j6 = (-140.0, -14.35476839088895, 20.6520766452779, 0, 0, 0)
joint_values = j1,j2,j3,j4,j5,j6
# get forward kinematics i.e. last global robot-frame
T44, debug = forward_kinematics(*joint_values, **DH_TABLE)
IK_angles = inverse_kinematics_irb140(DH_TABLE, T44)
# sanity check of forward kinematics
for angles in IK_angles.T:
t44, _ = forward_kinematics(*joint_values, **DH_TABLE)
assert(norm(T44 - t44) < 1e-7)
# the base frame which defines the world-coordinate system
plane0 = define_plane_from_angles([0,0,0],0, 0, 0)
# list of global-robot-frames
global_robot_frames = matmul_series(*debug)
global_robot_frames.insert(0, plane0)
global_robot_frames = mat( global_robot_frames )
global_robot_points = global_robot_frames[:, :3, 3]
# generate a curve in the last global robot-frame
num_p = 50
point_matrix = generate_symmetric_curve(num_points=num_p)
point_matrix_tf = get_transformed_points(T44, point_matrix)
######
ax = fig.add_subplot(1,2,1, projection='3d')
for robot_frame in global_robot_frames:
plot_plane(ax,robot_frame, '--',scale_factor=0.1)
ax.scatter(point_matrix_tf[:,0],
point_matrix_tf[:,1],
point_matrix_tf[:,2])
ax.plot(global_robot_points[:,0],
global_robot_points[:,1],
global_robot_points[:,2], 'k',linewidth=2)
plot_equal_perspective(ax,
[-0.5,0.5],
[-0.5,0.5],
[0,1])
#show()
######
plane = global_robot_frames[-1]
global_plane_curve = point_matrix_tf
lost_p = 0
all_solutions = []
for point in global_plane_curve:
FK_p = homogenous_matrix(plane[:3,:3],
point[:3])
angle_solutions = inverse_kinematics_irb140(DH_TABLE, FK_p)
angle_solutions = filter_solutions( angle_solutions )
angle_solutions = angle_solutions.T
## if n.sum( angle_solutions.shape) == 0.0:
## lost_p += 1
## continue
print angle_solutions.shape
all_solutions.append(angle_solutions)
chosen_solutions = []
for k in xrange(1, len(all_solutions)):
if k == 1:
o = all_solutions[k-1]
else:
o = chosen_solutions[-1]
pairs = mat(get_closest_solutions_pair(o, all_solutions[k]))
if k==1:
chosen_solutions.append(pairs[0,0,:].reshape(1,6))
chosen_solutions.append(pairs[0,1,:].reshape(1,6))
else:
chosen_solutions.append(pairs[0,1,:].reshape(1,6))
chosen_solutions = mat(chosen_solutions).reshape(num_p - lost_p,6)
diff_solutions = apply_along_axis(chosen_solutions, func=n.diff, axis=0)
max_err_solutions = n.max(n.abs(diff_solutions), axis=1)
max_err_solutions = apply_along_axis(diff_solutions, func=n.linalg.norm, axis=1)
ax = fig.add_subplot(1,2,2)
plot(max_err_solutions)
show()
break
```
#### File: master-thesis/source/robot_station_verification2d.py
```python
import random
import os.path as path
from plotsettings import PlotSettings
from pylab import axhline, plot, show, \
axes, grid, xlabel, ylabel, title, xticks, yticks, savefig
from helperfunctions_math import mat, homogenous_matrix as hom, nzip,\
rotation_matrix_skew_tilt_rot as ori
from denavit_hartenberg140 import forward_kinematics,\
calc_valid_raw_invkin_irb140,\
calc_valid_invkin_irb140,\
calc_invkin_irb140,\
DH_TABLE as dh_table
def plot_robot_geometry(robot_info, color='k'):
global_robot_frames = mat(robot_info['robot_geometry_global'])
plot(global_robot_frames[:,0,3],
global_robot_frames[:,2,3], color ,linewidth=2)
tool_pos = nzip(robot_info['tcp'][:3, 3],
robot_info['flange'][:3, 3]).T
plot(tool_pos[:,0],
tool_pos[:,2], color='g' ,linewidth=3)
TICK_SIZE = PlotSettings.tick_size*0.8
LABEL_SIZE = PlotSettings.label_size*0.8
def main():
j1 = 0
j2 = 0
j3 = 0
j4 = 0
j5 = 0
j6 = 0
joint_values = [j1, j2, j3, j4, j5, j6]
tool = hom(0,0,0,[0.1,0,0])
dh_table['tool'] = tool
info = forward_kinematics(*joint_values, **dh_table)
# multiplication from left: rotation in base to base
# multiplication from right: rotation in tool to base
tcp = info['tcp'].dot(hom(ori(-10,30,40)))
pose = hom(tcp[:3,:3],[0.6, 0, 0.3])
s = calc_invkin_irb140(pose, raw_solutions=True)
ik_up = forward_kinematics(*s[0], **dh_table)
ik_down = forward_kinematics(*s[5], **dh_table)
ik_up_back = forward_kinematics(*s[10], **dh_table)
ik_down_back = forward_kinematics(*s[15], **dh_table)
#plot_robot_geometry(info)
plot_robot_geometry(ik_up,'b')
plot_robot_geometry(ik_up_back,'b--')
plot_robot_geometry(ik_down,'r')
plot_robot_geometry(ik_down_back,'r--')
xlabel('x [m]', fontsize=LABEL_SIZE)
ylabel('z [m]', fontsize=LABEL_SIZE)
xticks(fontsize=TICK_SIZE)
yticks(fontsize=TICK_SIZE)
grid()
axes().set_aspect('equal', 'datalim')
# save figure plot
figpath = r"C:\Users\***REMOVED***\Dropbox\exjobb\rapport\images"
savefig(path.join(
figpath, "invkin-elbowupdown2.png"), bbox_inches='tight')
if __name__ == '__main__':
main()
```
#### File: source/utils/timing.py
```python
from __future__ import division
#----------------------------------------#
import time
#----------------------------------------#
class Timer:
def __init__(self):
self.start_time = 0
self.stop_time = 0
self.duration = 0
def __enter__(self):
self.start_time = time.time()
self.duration = 0
def __exit__(self, *kwargs):
self.stop_time = time.time()
self.duration = self.stop_time - self.start_time
print self.__str__()
def __str__(self):
return 'Timer duration: {} s.'.format(self.duration)
``` |
{
"source": "johnsk95/pytorch-cifar",
"score": 2
} |
#### File: pytorch-cifar/models/autoencoder_cr.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision.models as models
import torchvision
import torchvision.transforms as transforms
class MergeAutoencoder(nn.Module):
def __init__(self, colorization_resnet, rotation_resnet):
super(MergeAutoencoder, self).__init__()
# feature extractors
self.colorization_resnet = colorization_resnet
# self.jigsaw_resnet = jigsaw_resnet
self.rotation_resnet = rotation_resnet
# self.simsiam_resnet = simsiam_resnet
# encoder
self.encoder = nn.Sequential(
nn.Linear(2560, 2048),
nn.ReLU(True),
nn.Linear(2048, 1536),
nn.ReLU(True),
nn.Linear(1536, 1024),
nn.ReLU(True),
nn.Linear(1024, 512),
nn.ReLU(True),
nn.Linear(512, 256),
nn.Linear(256, 100))
# decoder
self.decoder = nn.Sequential(
nn.Linear(256, 512),
nn.ReLU(True),
nn.Linear(512, 1024),
nn.ReLU(True),
nn.Linear(1024, 1536),
nn.ReLU(True),
nn.Linear(1536, 2048),
nn.ReLU(True),
nn.Linear(2048, 2560),
nn.Tanh())
def forward(self, x):
rotation = self.rotation_resnet(x)
gray_x = transforms.Grayscale()(x)
color = self.colorization_resnet(gray_x)
color = color.view(rotation.shape[0], -1)
feature_map = torch.cat((color, rotation), dim=1)
latent_vec = self.encoder(feature_map)
# out = self.decoder(latent_vec)
return latent_vec
``` |
{
"source": "johnskopis/ClickHouse",
"score": 2
} |
#### File: integration/test_grant_and_revoke/test.py
```python
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
import re
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance')
@pytest.fixture(scope="module", autouse=True)
def start_cluster():
try:
cluster.start()
instance.query("CREATE DATABASE test")
instance.query("CREATE TABLE test.table(x UInt32, y UInt32) ENGINE = MergeTree ORDER BY tuple()")
instance.query("INSERT INTO test.table VALUES (1,5), (2,10)")
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def cleanup_after_test():
try:
yield
finally:
instance.query("DROP USER IF EXISTS A, B")
instance.query("DROP TABLE IF EXISTS test.view_1")
def test_smoke():
instance.query("CREATE USER A")
assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test.table", user='A')
instance.query('GRANT SELECT ON test.table TO A')
assert instance.query("SELECT * FROM test.table", user='A') == "1\t5\n2\t10\n"
instance.query('REVOKE SELECT ON test.table FROM A')
assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test.table", user='A')
def test_grant_option():
instance.query("CREATE USER A")
instance.query("CREATE USER B")
instance.query('GRANT SELECT ON test.table TO A')
assert instance.query("SELECT * FROM test.table", user='A') == "1\t5\n2\t10\n"
assert "Not enough privileges" in instance.query_and_get_error("GRANT SELECT ON test.table TO B", user='A')
instance.query('GRANT SELECT ON test.table TO A WITH GRANT OPTION')
instance.query("GRANT SELECT ON test.table TO B", user='A')
assert instance.query("SELECT * FROM test.table", user='B') == "1\t5\n2\t10\n"
instance.query('REVOKE SELECT ON test.table FROM A, B')
def test_revoke_requires_grant_option():
instance.query("CREATE USER A")
instance.query("CREATE USER B")
instance.query("GRANT SELECT ON test.table TO B")
assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n"
expected_error = "Not enough privileges"
assert expected_error in instance.query_and_get_error("REVOKE SELECT ON test.table FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n"
instance.query("GRANT SELECT ON test.table TO A")
expected_error = "privileges have been granted, but without grant option"
assert expected_error in instance.query_and_get_error("REVOKE SELECT ON test.table FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n"
instance.query("GRANT SELECT ON test.table TO A WITH GRANT OPTION")
assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n"
instance.query("REVOKE SELECT ON test.table FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == ""
instance.query("GRANT SELECT ON test.table TO B")
assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n"
instance.query("REVOKE SELECT ON test.* FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == ""
instance.query("GRANT SELECT ON test.table TO B")
assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n"
instance.query("REVOKE ALL ON test.* FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == ""
instance.query("GRANT SELECT ON test.table TO B")
assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n"
instance.query("REVOKE ALL ON *.* FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == ""
instance.query("REVOKE GRANT OPTION FOR ALL ON *.* FROM A")
instance.query("GRANT SELECT ON test.table TO B")
assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n"
expected_error = "privileges have been granted, but without grant option"
assert expected_error in instance.query_and_get_error("REVOKE SELECT ON test.table FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n"
instance.query("GRANT SELECT ON test.* TO A WITH GRANT OPTION")
instance.query("GRANT SELECT ON test.table TO B")
assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n"
instance.query("REVOKE SELECT ON test.table FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == ""
def test_implicit_show_grants():
instance.query("CREATE USER A")
assert instance.query("select count() FROM system.databases WHERE name='test'", user="A") == "0\n"
assert instance.query("select count() FROM system.tables WHERE database='test' AND name='table'", user="A") == "0\n"
assert instance.query("select count() FROM system.columns WHERE database='test' AND table='table'", user="A") == "0\n"
instance.query("GRANT SELECT(x) ON test.table TO A")
assert instance.query("SHOW GRANTS FOR A") == "GRANT SELECT(x) ON test.table TO A\n"
assert instance.query("select count() FROM system.databases WHERE name='test'", user="A") == "1\n"
assert instance.query("select count() FROM system.tables WHERE database='test' AND name='table'", user="A") == "1\n"
assert instance.query("select count() FROM system.columns WHERE database='test' AND table='table'", user="A") == "1\n"
instance.query("GRANT SELECT ON test.table TO A")
assert instance.query("SHOW GRANTS FOR A") == "GRANT SELECT ON test.table TO A\n"
assert instance.query("select count() FROM system.databases WHERE name='test'", user="A") == "1\n"
assert instance.query("select count() FROM system.tables WHERE database='test' AND name='table'", user="A") == "1\n"
assert instance.query("select count() FROM system.columns WHERE database='test' AND table='table'", user="A") == "2\n"
instance.query("GRANT SELECT ON test.* TO A")
assert instance.query("SHOW GRANTS FOR A") == "GRANT SELECT ON test.* TO A\n"
assert instance.query("select count() FROM system.databases WHERE name='test'", user="A") == "1\n"
assert instance.query("select count() FROM system.tables WHERE database='test' AND name='table'", user="A") == "1\n"
assert instance.query("select count() FROM system.columns WHERE database='test' AND table='table'", user="A") == "2\n"
instance.query("GRANT SELECT ON *.* TO A")
assert instance.query("SHOW GRANTS FOR A") == "GRANT SELECT ON *.* TO A\n"
assert instance.query("select count() FROM system.databases WHERE name='test'", user="A") == "1\n"
assert instance.query("select count() FROM system.tables WHERE database='test' AND name='table'", user="A") == "1\n"
assert instance.query("select count() FROM system.columns WHERE database='test' AND table='table'", user="A") == "2\n"
instance.query("REVOKE ALL ON *.* FROM A")
assert instance.query("select count() FROM system.databases WHERE name='test'", user="A") == "0\n"
assert instance.query("select count() FROM system.tables WHERE database='test' AND name='table'", user="A") == "0\n"
assert instance.query("select count() FROM system.columns WHERE database='test' AND table='table'", user="A") == "0\n"
def test_implicit_create_view_grant():
instance.query("CREATE USER A")
expected_error = "Not enough privileges"
assert expected_error in instance.query_and_get_error("CREATE VIEW test.view_1 AS SELECT 1", user="A")
instance.query("GRANT CREATE TABLE ON test.* TO A")
instance.query("CREATE VIEW test.view_1 AS SELECT 1", user="A")
assert instance.query("SELECT * FROM test.view_1") == "1\n"
instance.query("REVOKE CREATE TABLE ON test.* FROM A")
instance.query("DROP TABLE test.view_1")
assert expected_error in instance.query_and_get_error("CREATE VIEW test.view_1 AS SELECT 1", user="A")
def test_implicit_create_temporary_table_grant():
instance.query("CREATE USER A")
expected_error = "Not enough privileges"
assert expected_error in instance.query_and_get_error("CREATE TEMPORARY TABLE tmp(name String)", user="A")
instance.query("GRANT CREATE TABLE ON test.* TO A")
instance.query("CREATE TEMPORARY TABLE tmp(name String)", user="A")
instance.query("REVOKE CREATE TABLE ON *.* FROM A")
assert expected_error in instance.query_and_get_error("CREATE TEMPORARY TABLE tmp(name String)", user="A")
def test_introspection():
instance.query("CREATE USER A")
instance.query("CREATE USER B")
instance.query('GRANT SELECT ON test.table TO A')
instance.query('GRANT CREATE ON *.* TO B WITH GRANT OPTION')
assert instance.query("SHOW USERS") == TSV([ "A", "B", "default" ])
assert instance.query("SHOW CREATE USERS A") == TSV([ "CREATE USER A" ])
assert instance.query("SHOW CREATE USERS B") == TSV([ "CREATE USER B" ])
assert instance.query("SHOW CREATE USERS A,B") == TSV([ "CREATE USER A", "CREATE USER B" ])
assert instance.query("SHOW CREATE USERS") == TSV([ "CREATE USER A", "CREATE USER B", "CREATE USER default IDENTIFIED WITH plaintext_password SETTINGS PROFILE default" ])
assert instance.query("SHOW GRANTS FOR A") == TSV([ "GRANT SELECT ON test.table TO A" ])
assert instance.query("SHOW GRANTS FOR B") == TSV([ "GRANT CREATE ON *.* TO B WITH GRANT OPTION" ])
assert instance.query("SHOW GRANTS FOR A,B") == TSV([ "GRANT SELECT ON test.table TO A", "GRANT CREATE ON *.* TO B WITH GRANT OPTION" ])
assert instance.query("SHOW GRANTS FOR B,A") == TSV([ "GRANT SELECT ON test.table TO A", "GRANT CREATE ON *.* TO B WITH GRANT OPTION" ])
assert instance.query("SHOW GRANTS FOR ALL") == TSV([ "GRANT SELECT ON test.table TO A", "GRANT CREATE ON *.* TO B WITH GRANT OPTION", "GRANT ALL ON *.* TO default WITH GRANT OPTION" ])
assert instance.query("SHOW GRANTS", user='A') == TSV([ "GRANT SELECT ON test.table TO A" ])
assert instance.query("SHOW GRANTS", user='B') == TSV([ "GRANT CREATE ON *.* TO B WITH GRANT OPTION" ])
expected_access1 = "CREATE USER A\n"\
"CREATE USER B\n"\
"CREATE USER default IDENTIFIED WITH plaintext_password SETTINGS PROFILE default"
expected_access2 = "GRANT SELECT ON test.table TO A\n"\
"GRANT CREATE ON *.* TO B WITH GRANT OPTION\n"\
"GRANT ALL ON *.* TO default WITH GRANT OPTION\n"
assert expected_access1 in instance.query("SHOW ACCESS")
assert expected_access2 in instance.query("SHOW ACCESS")
assert instance.query("SELECT name, storage, auth_type, auth_params, host_ip, host_names, host_names_regexp, host_names_like, default_roles_all, default_roles_list, default_roles_except from system.users WHERE name IN ('A', 'B') ORDER BY name") ==\
TSV([[ "A", "local directory", "no_password", "{}", "['::/0']", "[]", "[]", "[]", 1, "[]", "[]" ],
[ "B", "local directory", "no_password", "{}", "['::/0']", "[]", "[]", "[]", 1, "[]", "[]" ]])
assert instance.query("SELECT * from system.grants WHERE user_name IN ('A', 'B') ORDER BY user_name, access_type, grant_option") ==\
TSV([[ "A", "\N", "SELECT", "test", "table", "\N", 0, 0 ],
[ "B", "\N", "CREATE", "\N", "\N", "\N", 0, 1 ]])
def test_current_database():
instance.query("CREATE USER A")
instance.query("GRANT SELECT ON table TO A", database="test")
assert instance.query("SHOW GRANTS FOR A") == TSV([ "GRANT SELECT ON test.table TO A" ])
assert instance.query("SHOW GRANTS FOR A", database="test") == TSV([ "GRANT SELECT ON test.table TO A" ])
assert instance.query("SELECT * FROM test.table", user='A') == "1\t5\n2\t10\n"
assert instance.query("SELECT * FROM table", user='A', database='test') == "1\t5\n2\t10\n"
instance.query("CREATE TABLE default.table(x UInt32, y UInt32) ENGINE = MergeTree ORDER BY tuple()")
assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM table", user='A')
``` |
{
"source": "johnskopis/salt",
"score": 3
} |
#### File: salt/modules/win_timezone.py
```python
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
from datetime import datetime
# Import Salt libs
from salt.exceptions import CommandExecutionError
# Import 3rd party libs
try:
import pytz
HAS_PYTZ = True
except ImportError:
HAS_PYTZ = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'timezone'
class TzMapper(object):
def __init__(self, unix_to_win):
self.win_to_unix = {k.lower(): v for k, v in unix_to_win.items()}
self.unix_to_win = {v.lower(): k for k, v in unix_to_win.items()}
def add(self, k, v):
self.unix_to_win[k.lower()] = v
self.win_to_unix[v.lower()] = k
def remove(self, k):
self.win_to_unix.pop(self.unix_to_win.pop(k.lower()).lower())
def get_win(self, key, default=None):
return self.unix_to_win.get(key.lower(), default)
def get_unix(self, key, default=None):
return self.win_to_unix.get(key.lower(), default)
def list_win(self):
return sorted(self.unix_to_win.values())
def list_unix(self):
return sorted(self.win_to_unix.values())
mapper = TzMapper({
'AUS Central Standard Time': 'Australia/Darwin',
'AUS Eastern Standard Time': 'Australia/Sydney',
'Afghanistan Standard Time': 'Asia/Kabul',
'Alaskan Standard Time': 'America/Anchorage',
'Aleutian Standard Time': 'America/Adak',
'Altai Standard Time': 'Asia/Barnaul',
'Arab Standard Time': 'Asia/Riyadh',
'Arabian Standard Time': 'Asia/Dubai',
'Arabic Standard Time': 'Asia/Baghdad',
'Argentina Standard Time': 'America/Buenos_Aires',
'Astrakhan Standard Time': 'Europe/Astrakhan',
'Atlantic Standard Time': 'America/Halifax',
'Aus Central W. Standard Time': 'Australia/Eucla',
'Azerbaijan Standard Time': 'Asia/Baku',
'Azores Standard Time': 'Atlantic/Azores',
'Bahia Standard Time': 'America/Bahia',
'Bangladesh Standard Time': 'Asia/Dhaka',
'Belarus Standard Time': 'Europe/Minsk',
'Bougainville Standard Time': 'Pacific/Bougainville',
'Canada Central Standard Time': 'America/Regina',
'Cape Verde Standard Time': 'Atlantic/Cape_Verde',
'Caucasus Standard Time': 'Asia/Yerevan',
'Cen. Australia Standard Time': 'Australia/Adelaide',
'Central America Standard Time': 'America/Guatemala',
'Central Asia Standard Time': 'Asia/Almaty',
'Central Brazilian Standard Time': 'America/Cuiaba',
'Central Europe Standard Time': 'Europe/Budapest',
'Central European Standard Time': 'Europe/Warsaw',
'Central Pacific Standard Time': 'Pacific/Guadalcanal',
'Central Standard Time': 'America/Chicago',
'Central Standard Time (Mexico)': 'America/Mexico_City',
'Chatham Islands Standard Time': 'Pacific/Chatham',
'China Standard Time': 'Asia/Shanghai',
'Cuba Standard Time': 'America/Havana',
'Dateline Standard Time': 'Etc/GMT+12',
'E. Africa Standard Time': 'Africa/Nairobi',
'E. Australia Standard Time': 'Australia/Brisbane',
'E. Europe Standard Time': 'Europe/Chisinau',
'E. South America Standard Time': 'America/Sao_Paulo',
'Easter Island Standard Time': 'Pacific/Easter',
'Eastern Standard Time': 'America/New_York',
'Eastern Standard Time (Mexico)': 'America/Cancun',
'Egypt Standard Time': 'Africa/Cairo',
'Ekaterinburg Standard Time': 'Asia/Yekaterinburg',
'FLE Standard Time': 'Europe/Kiev',
'Fiji Standard Time': 'Pacific/Fiji',
'GMT Standard Time': 'Europe/London',
'GTB Standard Time': 'Europe/Bucharest',
'Georgian Standard Time': 'Asia/Tbilisi',
'Greenland Standard Time': 'America/Godthab',
'Greenwich Standard Time': 'Atlantic/Reykjavik',
'Haiti Standard Time': 'America/Port-au-Prince',
'Hawaiian Standard Time': 'Pacific/Honolulu',
'India Standard Time': 'Asia/Calcutta',
'Iran Standard Time': 'Asia/Tehran',
'Israel Standard Time': 'Asia/Jerusalem',
'Jordan Standard Time': 'Asia/Amman',
'Kaliningrad Standard Time': 'Europe/Kaliningrad',
'Korea Standard Time': 'Asia/Seoul',
'Libya Standard Time': 'Africa/Tripoli',
'Line Islands Standard Time': 'Pacific/Kiritimati',
'Lord Howe Standard Time': 'Australia/Lord_Howe',
'Magadan Standard Time': 'Asia/Magadan',
'Magallanes Standard Time': 'America/Punta_Arenas',
'Marquesas Standard Time': 'Pacific/Marquesas',
'Mauritius Standard Time': 'Indian/Mauritius',
'Middle East Standard Time': 'Asia/Beirut',
'Montevideo Standard Time': 'America/Montevideo',
'Morocco Standard Time': 'Africa/Casablanca',
'Mountain Standard Time': 'America/Denver',
'Mountain Standard Time (Mexico)': 'America/Chihuahua',
'Myanmar Standard Time': 'Asia/Rangoon',
'N. Central Asia Standard Time': 'Asia/Novosibirsk',
'Namibia Standard Time': 'Africa/Windhoek',
'Nepal Standard Time': 'Asia/Katmandu',
'New Zealand Standard Time': 'Pacific/Auckland',
'Newfoundland Standard Time': 'America/St_Johns',
'Norfolk Standard Time': 'Pacific/Norfolk',
'North Asia East Standard Time': 'Asia/Irkutsk',
'North Asia Standard Time': 'Asia/Krasnoyarsk',
'North Korea Standard Time': 'Asia/Pyongyang',
'Omsk Standard Time': 'Asia/Omsk',
'Pacific SA Standard Time': 'America/Santiago',
'Pacific Standard Time': 'America/Los_Angeles',
'Pacific Standard Time (Mexico)': 'America/Tijuana',
'Pakistan Standard Time': 'Asia/Karachi',
'Paraguay Standard Time': 'America/Asuncion',
'Romance Standard Time': 'Europe/Paris',
'Russia Time Zone 10': 'Asia/Srednekolymsk',
'Russia Time Zone 11': 'Asia/Kamchatka',
'Russia Time Zone 3': 'Europe/Samara',
'Russian Standard Time': 'Europe/Moscow',
'SA Eastern Standard Time': 'America/Cayenne',
'SA Pacific Standard Time': 'America/Bogota',
'SA Western Standard Time': 'America/La_Paz',
'SE Asia Standard Time': 'Asia/Bangkok',
'Saint Pierre Standard Time': 'America/Miquelon',
'Sakhalin Standard Time': 'Asia/Sakhalin',
'Samoa Standard Time': 'Pacific/Apia',
'Saratov Standard Time': 'Europe/Saratov',
'Singapore Standard Time': 'Asia/Singapore',
'South Africa Standard Time': 'Africa/Johannesburg',
'Sri Lanka Standard Time': 'Asia/Colombo',
'Syria Standard Time': 'Asia/Damascus',
'Taipei Standard Time': 'Asia/Taipei',
'Tasmania Standard Time': 'Australia/Hobart',
'Tocantins Standard Time': 'America/Araguaina',
'Tokyo Standard Time': 'Asia/Tokyo',
'Tomsk Standard Time': 'Asia/Tomsk',
'Tonga Standard Time': 'Pacific/Tongatapu',
'Transbaikal Standard Time': 'Asia/Chita',
'Turkey Standard Time': 'Europe/Istanbul',
'Turks And Caicos Standard Time': 'America/Grand_Turk',
'US Eastern Standard Time': 'America/Indianapolis',
'US Mountain Standard Time': 'America/Phoenix',
'UTC': 'Etc/GMT',
'UTC+12': 'Etc/GMT-12',
'UTC+13': 'Etc/GMT-13',
'UTC-02': 'Etc/GMT+2',
'UTC-08': 'Etc/GMT+8',
'UTC-09': 'Etc/GMT+9',
'UTC-11': 'Etc/GMT+11',
'Ulaanbaatar Standard Time': 'Asia/Ulaanbaatar',
'Venezuela Standard Time': 'America/Caracas',
'Vladivostok Standard Time': 'Asia/Vladivostok',
'W. Australia Standard Time': 'Australia/Perth',
'W. Central Africa Standard Time': 'Africa/Lagos',
'W. Europe Standard Time': 'Europe/Berlin',
'W. Mongolia Standard Time': 'Asia/Hovd',
'West Asia Standard Time': 'Asia/Tashkent',
'West Bank Standard Time': 'Asia/Hebron',
'West Pacific Standard Time': 'Pacific/Port_Moresby',
'Yakutsk Standard Time': 'Asia/Yakutsk'})
def __virtual__():
'''
Only load on windows
'''
if not __utils__['platform.is_windows']():
return False, "Module win_timezone: Not on Windows client"
if not HAS_PYTZ:
return False, "Module win_timezone: pytz not found"
if not __utils__['path.which']('tzutil'):
return False, "Module win_timezone: tzutil not found"
return __virtualname__
def get_zone():
'''
Get current timezone (i.e. America/Denver)
Returns:
str: Timezone in unix format
Raises:
CommandExecutionError: If timezone could not be gathered
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zone
'''
cmd = ['tzutil', '/g']
res = __salt__['cmd.run_all'](cmd, python_shell=False)
if res['retcode'] or not res['stdout']:
raise CommandExecutionError('tzutil encountered an error getting '
'timezone',
info=res)
return mapper.get_unix(res['stdout'].lower(), 'Unknown')
def get_offset():
'''
Get current numeric timezone offset from UTC (i.e. -0700)
Returns:
str: Offset from UTC
CLI Example:
.. code-block:: bash
salt '*' timezone.get_offset
'''
# http://craigglennie.com/programming/python/2013/07/21/working-with-timezones-using-Python-and-pytz-localize-vs-normalize/
tz_object = pytz.timezone(get_zone())
utc_time = pytz.utc.localize(datetime.utcnow())
loc_time = utc_time.astimezone(tz_object)
norm_time = tz_object.normalize(loc_time)
return norm_time.strftime('%z')
def get_zonecode():
'''
Get current timezone (i.e. PST, MDT, etc)
Returns:
str: An abbreviated timezone code
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zonecode
'''
tz_object = pytz.timezone(get_zone())
loc_time = tz_object.localize(datetime.utcnow())
return loc_time.tzname()
def set_zone(timezone):
'''
Sets the timezone using the tzutil.
Args:
timezone (str): A valid timezone
Returns:
bool: ``True`` if successful, otherwise ``False``
Raises:
CommandExecutionError: If invalid timezone is passed
CLI Example:
.. code-block:: bash
salt '*' timezone.set_zone 'America/Denver'
'''
# if it's one of the key's just use it
if timezone.lower() in mapper.win_to_unix:
win_zone = timezone
elif timezone.lower() in mapper.unix_to_win:
# if it's one of the values, use the key
win_zone = mapper.get_win(timezone)
else:
# Raise error because it's neither key nor value
raise CommandExecutionError('Invalid timezone passed: {0}'.format(timezone))
# Set the value
cmd = ['tzutil', '/s', win_zone]
res = __salt__['cmd.run_all'](cmd, python_shell=False)
if res['retcode']:
raise CommandExecutionError('tzutil encountered an error setting '
'timezone: {0}'.format(timezone),
info=res)
return zone_compare(timezone)
def zone_compare(timezone):
'''
Compares the given timezone with the machine timezone. Mostly useful for
running state checks.
Args:
timezone (str):
The timezone to compare. This can be in Windows or Unix format. Can
be any of the values returned by the ``timezone.list`` function
Returns:
bool: ``True`` if they match, otherwise ``False``
Example:
.. code-block:: bash
salt '*' timezone.zone_compare 'America/Denver'
'''
# if it's one of the key's just use it
if timezone.lower() in mapper.win_to_unix:
check_zone = timezone
elif timezone.lower() in mapper.unix_to_win:
# if it's one of the values, use the key
check_zone = mapper.get_win(timezone)
else:
# Raise error because it's neither key nor value
raise CommandExecutionError('Invalid timezone passed: {0}'
''.format(timezone))
return get_zone() == mapper.get_unix(check_zone, 'Unknown')
def list(unix_style=True):
'''
Return a list of Timezones that this module supports. These can be in either
Unix or Windows format.
.. versionadded:: 2018.3.3
Args:
unix_style (bool):
``True`` returns Unix-style timezones. ``False`` returns
Windows-style timezones. Default is ``True``
Returns:
list: A list of supported timezones
CLI Example:
.. code-block:: bash
# Unix-style timezones
salt '*' timezone.list
# Windows-style timezones
salt '*' timezone.list unix_style=False
'''
if unix_style:
return mapper.list_unix()
else:
return mapper.list_win()
def get_hwclock():
'''
Get current hardware clock setting (UTC or localtime)
.. note::
The hardware clock is always local time on Windows so this will always
return "localtime"
CLI Example:
.. code-block:: bash
salt '*' timezone.get_hwclock
'''
# The hardware clock is always localtime on Windows
return 'localtime'
def set_hwclock(clock):
'''
Sets the hardware clock to be either UTC or localtime
.. note::
The hardware clock is always local time on Windows so this will always
return ``False``
CLI Example:
.. code-block:: bash
salt '*' timezone.set_hwclock UTC
'''
# The hardware clock is always localtime on Windows
return False
```
#### File: cloud/clouds/test_ec2.py
```python
from __future__ import absolute_import, print_function, unicode_literals
import os
import tempfile
# Import Salt Libs
from salt.cloud.clouds import ec2
from salt.exceptions import SaltCloudSystemExit
import salt.utils.files
# Import Salt Testing Libs
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase, skipIf
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, PropertyMock
from tests.unit.test_crypt import PRIVKEY_DATA
PASS_DATA = (
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'<KEY>
)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class EC2TestCase(TestCase, LoaderModuleMockMixin):
'''
Unit TestCase for salt.cloud.clouds.ec2 module.
'''
def setUp(self):
super(EC2TestCase, self).setUp()
with tempfile.NamedTemporaryFile(dir=RUNTIME_VARS.TMP, suffix='.pem', delete=True) as fp:
self.key_file = fp.name
def tearDown(self):
super(EC2TestCase, self).tearDown()
if os.path.exists(self.key_file):
os.remove(self.key_file)
def setup_loader_modules(self):
return {ec2: {'__opts__': {}}}
def test__validate_key_path_and_mode(self):
# Key file exists
with patch('os.path.exists', return_value=True):
with patch('os.stat') as patched_stat:
type(patched_stat.return_value).st_mode = PropertyMock(return_value=0o644)
self.assertRaises(
SaltCloudSystemExit, ec2._validate_key_path_and_mode, 'key_file')
type(patched_stat.return_value).st_mode = PropertyMock(return_value=0o600)
self.assertTrue(ec2._validate_key_path_and_mode('key_file'))
type(patched_stat.return_value).st_mode = PropertyMock(return_value=0o400)
self.assertTrue(ec2._validate_key_path_and_mode('key_file'))
# Key file does not exist
with patch('os.path.exists', return_value=False):
self.assertRaises(
SaltCloudSystemExit, ec2._validate_key_path_and_mode, 'key_file')
@patch('salt.cloud.clouds.ec2._get_node')
@patch('salt.cloud.clouds.ec2.get_location')
@patch('salt.cloud.clouds.ec2.get_provider')
@patch('salt.utils.aws.query')
def test_get_password_data(self, query, get_provider, get_location, _get_node):
query.return_value = [
{
'passwordData': <PASSWORD>
}
]
_get_node.return_value = {'instanceId': 'i-abcdef'}
get_location.return_value = 'us-west2'
get_provider.return_value = 'ec2'
with salt.utils.files.fopen(self.key_file, 'w') as fp:
fp.write(PRIVKEY_DATA)
ret = ec2.get_password_data(
name='i-abcddef', kwargs={'key_file': self.key_file}, call='action'
)
assert ret['passwordData'] == PASS_DATA
assert ret['password'] == '<PASSWORD>!'
```
#### File: unit/states/test_grains.py
```python
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import os
import contextlib
# Import Salt Testing libs
from tests.support.runtests import RUNTIME_VARS
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
# Import salt libs
import salt.utils.files
import salt.utils.stringutils
import salt.utils.yaml
import salt.modules.grains as grainsmod
import salt.states.grains as grains
from salt.ext import six
@skipIf(NO_MOCK, NO_MOCK_REASON)
class GrainsTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
grains_test_dir = '__salt_test_state_grains'
if not os.path.exists(os.path.join(RUNTIME_VARS.TMP, grains_test_dir)):
os.makedirs(os.path.join(RUNTIME_VARS.TMP, grains_test_dir))
loader_globals = {
'__opts__': {
'test': False,
'conf_file': os.path.join(RUNTIME_VARS.TMP, grains_test_dir, 'minion'),
'cachedir': os.path.join(RUNTIME_VARS.TMP, grains_test_dir),
'local': True,
},
'__salt__': {
'cmd.run_all': MagicMock(return_value={
'pid': 5,
'retcode': 0,
'stderr': '',
'stdout': ''}),
'grains.get': grainsmod.get,
'grains.set': grainsmod.set,
'grains.setval': grainsmod.setval,
'grains.delval': grainsmod.delval,
'grains.append': grainsmod.append,
'grains.remove': grainsmod.remove,
'saltutil.sync_grains': MagicMock()
}
}
return {grains: loader_globals, grainsmod: loader_globals}
def assertGrainFileContent(self, grains_string):
if os.path.isdir(grains.__opts__['conf_file']):
grains_file = os.path.join(
grains.__opts__['conf_file'],
'grains')
else:
grains_file = os.path.join(
os.path.dirname(grains.__opts__['conf_file']),
'grains')
with salt.utils.files.fopen(grains_file, "r") as grf:
grains_data = salt.utils.stringutils.to_unicode(grf.read())
self.assertMultiLineEqual(grains_string, grains_data)
@contextlib.contextmanager
def setGrains(self, grains_data):
with patch.dict(grains.__grains__, grains_data):
with patch.dict(grainsmod.__grains__, grains_data):
if os.path.isdir(grains.__opts__['conf_file']):
grains_file = os.path.join(
grains.__opts__['conf_file'],
'grains')
else:
grains_file = os.path.join(
os.path.dirname(grains.__opts__['conf_file']), 'grains')
with salt.utils.files.fopen(grains_file, "w+") as grf:
salt.utils.yaml.safe_dump(grains_data, grf, default_flow_style=False)
yield
# 'exists' function tests: 2
def test_exists_missing(self):
with self.setGrains({'a': 'aval'}):
ret = grains.exists(name='foo')
self.assertEqual(ret['result'], False)
self.assertEqual(ret['comment'], 'Grain does not exist')
self.assertEqual(ret['changes'], {})
def test_exists_found(self):
with self.setGrains({'a': 'aval', 'foo': 'bar'}):
# Grain already set
ret = grains.exists(name='foo')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Grain exists')
self.assertEqual(ret['changes'], {})
# 'present' function tests: 12
def test_present_add(self):
# Set a non existing grain
with self.setGrains({'a': 'aval'}):
ret = grains.present(name='foo', value='bar')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['changes'], {'foo': 'bar'})
self.assertEqual(grains.__grains__, {'a': 'aval', 'foo': 'bar'})
self.assertGrainFileContent("a: aval\nfoo: bar\n")
# Set a non existing nested grain
with self.setGrains({'a': 'aval'}):
ret = grains.present(name='foo:is:nested', value='bar')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['changes'], {'foo': {'is': {'nested': 'bar'}}})
self.assertEqual(grains.__grains__, {'a': 'aval', 'foo': {'is': {'nested': 'bar'}}})
self.assertGrainFileContent("a: aval\n"
"foo:\n"
" is:\n"
" nested: bar\n"
)
# Set a non existing nested dict grain
with self.setGrains({'a': 'aval'}):
ret = grains.present(
name='foo:is:nested',
value={'bar': 'is a dict'})
self.assertEqual(ret['result'], True)
self.assertEqual(ret['changes'], {'foo': {'is': {'nested': {'bar': 'is a dict'}}}})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': {'is': {'nested': {'bar': 'is a dict'}}}})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ " is:\n"
+ " nested:\n"
+ " bar: is a dict\n"
)
def test_present_add_key_to_existing(self):
with self.setGrains({'a': 'aval', 'foo': {'k1': 'v1'}}):
# Fails setting a grain to a dict
ret = grains.present(
name='foo:k2',
value='v2')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Set grain foo:k2 to v2')
self.assertEqual(ret['changes'], {'foo': {'k2': 'v2', 'k1': 'v1'}})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': {'k1': 'v1', 'k2': 'v2'}})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ " k1: v1\n"
+ " k2: v2\n"
)
def test_present_already_set(self):
with self.setGrains({'a': 'aval', 'foo': 'bar'}):
# Grain already set
ret = grains.present(
name='foo',
value='bar')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Grain is already set')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': 'bar'})
with self.setGrains({'a': 'aval', 'foo': {'is': {'nested': 'bar'}}}):
# Nested grain already set
ret = grains.present(
name='foo:is:nested',
value='bar')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Grain is already set')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': {'is': {'nested': 'bar'}}})
with self.setGrains({'a': 'aval', 'foo': {'is': {'nested': 'bar'}}}):
# Nested dict grain already set
ret = grains.present(
name='foo:is',
value={'nested': 'bar'})
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Grain is already set')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': {'is': {'nested': 'bar'}}})
def test_present_overwrite(self):
with self.setGrains({'a': 'aval', 'foo': 'bar'}):
# Overwrite an existing grain
ret = grains.present(
name='foo',
value='newbar')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['changes'], {'foo': 'newbar'})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': 'newbar'})
self.assertGrainFileContent("a: aval\n"
+ "foo: newbar\n"
)
with self.setGrains({'a': 'aval', 'foo': 'bar'}):
# Clear a grain (set to None)
ret = grains.present(
name='foo',
value=None)
self.assertEqual(ret['result'], True)
self.assertEqual(ret['changes'], {'foo': None})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': None})
self.assertGrainFileContent("a: aval\n"
+ "foo: null\n"
)
with self.setGrains({'a': 'aval', 'foo': {'is': {'nested': 'bar'}}}):
# Overwrite an existing nested grain
ret = grains.present(
name='foo:is:nested',
value='newbar')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['changes'], {'foo': {'is': {'nested': 'newbar'}}})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': {'is': {'nested': 'newbar'}}})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ " is:\n"
+ " nested: newbar\n"
)
with self.setGrains({'a': 'aval', 'foo': {'is': {'nested': 'bar'}}}):
# Clear a nested grain (set to None)
ret = grains.present(
name='foo:is:nested',
value=None)
self.assertEqual(ret['result'], True)
self.assertEqual(ret['changes'], {'foo': {'is': {'nested': None}}})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': {'is': {'nested': None}}})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ " is:\n"
+ " nested: null\n"
)
def test_present_fail_overwrite(self):
with self.setGrains({'a': 'aval', 'foo': {'is': {'nested': 'val'}}}):
# Overwrite an existing grain
ret = grains.present(
name='foo:is',
value='newbar')
self.assertEqual(ret['result'], False)
self.assertEqual(ret['changes'], {})
self.assertEqual(ret['comment'], 'The key \'foo:is\' exists but is a dict or a list. Use \'force=True\' to overwrite.')
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': {'is': {'nested': 'val'}}})
with self.setGrains({'a': 'aval', 'foo': {'is': {'nested': 'val'}}}):
# Clear a grain (set to None)
ret = grains.present(
name='foo:is',
value=None)
self.assertEqual(ret['result'], False)
self.assertEqual(ret['changes'], {})
self.assertEqual(ret['comment'], 'The key \'foo:is\' exists but is a dict or a list. Use \'force=True\' to overwrite.')
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': {'is': {'nested': 'val'}}})
def test_present_fails_to_set_dict_or_list(self):
with self.setGrains({'a': 'aval', 'foo': 'bar'}):
# Fails to overwrite a grain to a list
ret = grains.present(
name='foo',
value=['l1', 'l2'])
self.assertEqual(ret['result'], False)
self.assertEqual(ret['comment'], 'The key \'foo\' exists and the '
+ 'given value is a dict or a list. '
+ 'Use \'force=True\' to overwrite.')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': 'bar'})
with self.setGrains({'a': 'aval', 'foo': 'bar'}):
# Fails setting a grain to a dict
ret = grains.present(
name='foo',
value={'k1': 'v1'})
self.assertEqual(ret['result'], False)
self.assertEqual(ret['comment'], 'The key \'foo\' exists and the given '
+ 'value is a dict or a list. Use '
+ '\'force=True\' to overwrite.')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': 'bar'})
with self.setGrains({'a': 'aval', 'foo': {'is': {'nested': 'bar'}}}):
# Fails to overwrite a nested grain to a list
ret = grains.present(
name='foo,is,nested',
value=['l1', 'l2'],
delimiter=',')
self.assertEqual(ret['result'], False)
self.assertEqual(ret['changes'], {})
self.assertEqual(ret['comment'], 'The key \'foo:is:nested\' exists and the '
+ 'given value is a dict or a list. '
+ 'Use \'force=True\' to overwrite.')
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': {'is': {'nested': 'bar'}}})
with self.setGrains({'a': 'aval', 'foo': {'is': {'nested': 'bar'}}}):
# Fails setting a nested grain to a dict
ret = grains.present(
name='foo:is:nested',
value={'k1': 'v1'})
self.assertEqual(ret['result'], False)
self.assertEqual(ret['comment'], 'The key \'foo:is:nested\' exists and the '
+ 'given value is a dict or a list. '
+ 'Use \'force=True\' to overwrite.')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': {'is': {'nested': 'bar'}}})
def test_present_fail_merge_dict(self):
with self.setGrains({'a': 'aval', 'foo': {'k1': 'v1'}}):
# Fails setting a grain to a dict
ret = grains.present(
name='foo',
value={'k2': 'v2'})
self.assertEqual(ret['result'], False)
self.assertEqual(ret['comment'], 'The key \'foo\' exists but '
+ 'is a dict or a list. '
+ 'Use \'force=True\' to overwrite.')
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': {'k1': 'v1'}})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ " k1: v1\n"
)
def test_present_force_to_set_dict_or_list(self):
with self.setGrains({'a': 'aval', 'foo': 'bar'}):
# Force to overwrite a grain to a list
ret = grains.present(
name='foo',
value=['l1', 'l2'],
force=True)
self.assertEqual(ret['result'], True)
self.assertEqual(
ret['comment'],
"Set grain foo to ['l1', 'l2']" if six.PY3
else "Set grain foo to [u'l1', u'l2']"
)
self.assertEqual(ret['changes'], {'foo': ['l1', 'l2']})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': ['l1', 'l2']})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ "- l1\n"
+ "- l2\n"
)
with self.setGrains({'a': 'aval', 'foo': 'bar'}):
# Force setting a grain to a dict
ret = grains.present(
name='foo',
value={'k1': 'v1'},
force=True)
self.assertEqual(ret['result'], True)
self.assertEqual(
ret['comment'],
"Set grain foo to {'k1': 'v1'}" if six.PY3
else "Set grain foo to {u'k1': u'v1'}"
)
self.assertEqual(ret['changes'], {'foo': {'k1': 'v1'}})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': {'k1': 'v1'}})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ " k1: v1\n"
)
with self.setGrains({'a': 'aval', 'foo': {'is': {'nested': 'bar'}}}):
# Force to overwrite a nested grain to a list
ret = grains.present(
name='foo,is,nested',
value=['l1', 'l2'],
delimiter=',',
force=True)
self.assertEqual(ret['result'], True)
self.assertEqual(ret['changes'], {'foo': {'is': {'nested': ['l1', 'l2']}}})
self.assertEqual(
ret['comment'],
"Set grain foo:is:nested to ['l1', 'l2']" if six.PY3
else "Set grain foo:is:nested to [u'l1', u'l2']"
)
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': {'is': {'nested': ['l1', 'l2']}}})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ " is:\n"
+ " nested:\n"
+ " - l1\n"
+ " - l2\n"
)
with self.setGrains({'a': 'aval', 'foo': {'is': {'nested': 'bar'}, 'and': 'other'}}):
# Force setting a nested grain to a dict
ret = grains.present(
name='foo:is:nested',
value={'k1': 'v1'},
force=True)
self.assertEqual(ret['result'], True)
self.assertEqual(
ret['comment'],
"Set grain foo:is:nested to {'k1': 'v1'}" if six.PY3
else "Set grain foo:is:nested to {u'k1': u'v1'}"
)
self.assertEqual(ret['changes'], {'foo': {'is': {'nested': {'k1': 'v1'}}, 'and': 'other'}})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': {'is': {'nested': {'k1': 'v1'}}, 'and': 'other'}})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ " and: other\n"
+ " is:\n"
+ " nested:\n"
+ " k1: v1\n"
)
def test_present_fails_to_convert_value_to_key(self):
with self.setGrains({'a': 'aval', 'foo': 'bar'}):
# Fails converting a value to a nested grain key
ret = grains.present(
name='foo:is:nested',
value={'k1': 'v1'})
self.assertEqual(ret['result'], False)
self.assertEqual(ret['comment'], 'The key \'foo\' value is \'bar\', '
+ 'which is different from the provided '
+ 'key \'is\'. Use \'force=True\' to overwrite.')
self.assertEqual(ret['changes'], {})
def test_present_overwrite_test(self):
with patch.dict(grains.__opts__, {'test': True}):
with self.setGrains({'a': 'aval', 'foo': 'bar'}):
# Overwrite an existing grain
ret = grains.present(
name='foo',
value='newbar')
self.assertEqual(ret['result'], None)
self.assertEqual(ret['changes'], {'changed': {'foo': 'newbar'}})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': 'bar'})
self.assertGrainFileContent("a: aval\n"
+ "foo: bar\n"
)
def test_present_convert_value_to_key(self):
with self.setGrains({'a': 'aval', 'foo': 'is'}):
# Converts a value to a nested grain key
ret = grains.present(
name='foo:is:nested',
value={'k1': 'v1'})
self.assertEqual(ret['result'], True)
self.assertEqual(
ret['comment'],
"Set grain foo:is:nested to {'k1': 'v1'}" if six.PY3
else "Set grain foo:is:nested to {u'k1': u'v1'}"
)
self.assertEqual(ret['changes'], {'foo': {'is': {'nested': {'k1': 'v1'}}}})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': {'is': {'nested': {'k1': 'v1'}}}})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ " is:\n"
+ " nested:\n"
+ " k1: v1\n"
)
with self.setGrains({'a': 'aval', 'foo': ['one', 'is', 'correct']}):
# Converts a list element to a nested grain key
ret = grains.present(
name='foo:is:nested',
value={'k1': 'v1'})
self.assertEqual(ret['result'], True)
self.assertEqual(
ret['comment'],
"Set grain foo:is:nested to {'k1': 'v1'}" if six.PY3
else "Set grain foo:is:nested to {u'k1': u'v1'}"
)
self.assertEqual(ret['changes'], {'foo': ['one', {'is': {'nested': {'k1': 'v1'}}}, 'correct']})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': ['one', {'is': {'nested': {'k1': 'v1'}}}, 'correct']})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ "- one\n"
+ "- is:\n"
+ " nested:\n"
+ " k1: v1\n"
+ "- correct\n"
)
def test_present_unknown_failure(self):
with patch('salt.modules.grains.setval') as mocked_setval:
mocked_setval.return_value = 'Failed to set grain foo'
with self.setGrains({'a': 'aval', 'foo': 'bar'}):
# Unknown reason failure
ret = grains.present(
name='foo',
value='baz')
self.assertEqual(ret['result'], False)
self.assertEqual(ret['comment'], 'Failed to set grain foo')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': 'bar'})
self.assertGrainFileContent("a: aval\n"
+ "foo: bar\n"
)
# 'absent' function tests: 6
def test_absent_already(self):
# Unset a non existent grain
with self.setGrains({'a': 'aval'}):
ret = grains.absent(
name='foo')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Grain foo does not exist')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval'})
self.assertGrainFileContent("a: aval\n")
# Unset a non existent nested grain
with self.setGrains({'a': 'aval'}):
ret = grains.absent(
name='foo:is:nested')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Grain foo:is:nested does not exist')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval'})
self.assertGrainFileContent("a: aval\n")
def test_absent_unset(self):
# Unset a grain
with self.setGrains({'a': 'aval', 'foo': 'bar'}):
ret = grains.absent(
name='foo')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Value for grain foo was set to None')
self.assertEqual(ret['changes'], {'grain': 'foo', 'value': None})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': None})
self.assertGrainFileContent("a: aval\n"
+ "foo: null\n"
)
# Unset grain when its value is False
with self.setGrains({'a': 'aval', 'foo': False}):
ret = grains.absent(
name='foo')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Value for grain foo was set to None')
self.assertEqual(ret['changes'], {'grain': 'foo', 'value': None})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': None})
self.assertGrainFileContent("a: aval\n"
+ "foo: null\n"
)
# Unset a nested grain
with self.setGrains({'a': 'aval', 'foo': ['order', {'is': {'nested': 'bar'}}, 'correct']}):
ret = grains.absent(
name='foo,is,nested',
delimiter=',')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Value for grain foo:is:nested was set to None')
self.assertEqual(ret['changes'], {'grain': 'foo:is:nested', 'value': None})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': ['order', {'is': {'nested': None}}, 'correct']})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ "- order\n"
+ "- is:\n"
+ " nested: null\n"
+ "- correct\n"
)
# Unset a nested value don't change anything
with self.setGrains({'a': 'aval', 'foo': ['order', {'is': 'nested'}, 'correct']}):
ret = grains.absent(
name='foo:is:nested')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Grain foo:is:nested does not exist')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': ['order', {'is': 'nested'}, 'correct']})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ "- order\n"
+ "- is: nested\n"
+ "- correct\n"
)
def test_absent_unset_test(self):
with patch.dict(grains.__opts__, {'test': True}):
with self.setGrains({'a': 'aval', 'foo': 'bar'}):
# Overwrite an existing grain
ret = grains.absent(name='foo')
self.assertEqual(ret['result'], None)
self.assertEqual(ret['changes'], {'grain': 'foo', 'value': None})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': 'bar'})
self.assertGrainFileContent("a: aval\n"
+ "foo: bar\n"
)
def test_absent_fails_nested_complex_grain(self):
# Unset a nested complex grain
with self.setGrains({'a': 'aval', 'foo': ['order', {'is': {'nested': 'bar'}}, 'correct']}):
ret = grains.absent(
name='foo:is')
self.assertEqual(ret['result'], False)
self.assertEqual(ret['comment'], 'The key \'foo:is\' exists but is a dict or a list. Use \'force=True\' to overwrite.')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': ['order', {'is': {'nested': 'bar'}}, 'correct']})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ "- order\n"
+ "- is:\n"
+ " nested: bar\n"
+ "- correct\n"
)
def test_absent_force_nested_complex_grain(self):
# Unset a nested complex grain
with self.setGrains({'a': 'aval', 'foo': ['order', {'is': {'nested': 'bar'}}, 'correct']}):
ret = grains.absent(
name='foo:is',
force=True)
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Value for grain foo:is was set to None')
self.assertEqual(ret['changes'], {'grain': 'foo:is', 'value': None})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': ['order', {'is': None}, 'correct']})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ "- order\n"
+ "- is: null\n"
+ "- correct\n"
)
def test_absent_delete(self):
# Delete a grain
with self.setGrains({'a': 'aval', 'foo': 'bar'}):
ret = grains.absent(
name='foo',
destructive=True)
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Grain foo was deleted')
self.assertEqual(ret['changes'], {'deleted': 'foo'})
self.assertEqual(
grains.__grains__,
{'a': 'aval'})
self.assertGrainFileContent("a: aval\n")
# Delete a previously unset grain
with self.setGrains({'a': 'aval', 'foo': None}):
ret = grains.absent(
name='foo',
destructive=True)
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Grain foo was deleted')
self.assertEqual(ret['changes'], {'deleted': 'foo'})
self.assertEqual(
grains.__grains__,
{'a': 'aval'})
self.assertGrainFileContent("a: aval\n")
# Delete a nested grain
with self.setGrains({'a': 'aval', 'foo': ['order', {'is': {'nested': 'bar', 'other': 'value'}}, 'correct']}):
ret = grains.absent(
name='foo:is:nested',
destructive=True)
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Grain foo:is:nested was deleted')
self.assertEqual(ret['changes'], {'deleted': 'foo:is:nested'})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': ['order', {'is': {'other': 'value'}}, 'correct']})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ "- order\n"
+ "- is:\n"
+ " other: value\n"
+ "- correct\n"
)
# 'append' function tests: 6
def test_append(self):
# Append to an existing list
with self.setGrains({'a': 'aval', 'foo': ['bar']}):
ret = grains.append(
name='foo',
value='baz')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Value baz was added to grain foo')
self.assertEqual(ret['changes'], {'added': 'baz'})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': ['bar', 'baz']})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ "- bar\n"
+ "- baz\n"
)
def test_append_nested(self):
# Append to an existing nested list
with self.setGrains({'a': 'aval', 'foo': {'list': ['bar']}}):
ret = grains.append(
name='foo,list',
value='baz',
delimiter=',')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Value baz was added to grain foo:list')
self.assertEqual(ret['changes'], {'added': 'baz'})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': {'list': ['bar', 'baz']}})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ " list:\n"
+ " - bar\n"
+ " - baz\n"
)
def test_append_already(self):
# Append to an existing list
with self.setGrains({'a': 'aval', 'foo': ['bar']}):
ret = grains.append(
name='foo',
value='bar')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Value bar is already in the list '
+ 'for grain foo')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': ['bar']})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ "- bar\n"
)
def test_append_fails_not_a_list(self):
# Fail to append to an existing grain, not a list
with self.setGrains({'a': 'aval', 'foo': {'bar': 'val'}}):
ret = grains.append(
name='foo',
value='baz')
self.assertEqual(ret['result'], False)
self.assertEqual(ret['comment'], 'Grain foo is not a valid list')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': {'bar': 'val'}})
def test_append_convert_to_list(self):
# Append to an existing grain, converting to a list
with self.setGrains({'a': 'aval', 'foo': {'bar': 'val'}}):
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ " bar: val\n"
)
ret = grains.append(
name='foo',
value='baz',
convert=True)
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Value baz was added to grain foo')
self.assertEqual(ret['changes'], {'added': 'baz'})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': [{'bar': 'val'}, 'baz']})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ "- bar: val\n"
+ "- baz\n"
)
# Append to an existing grain, converting to a list a multi-value dict
with self.setGrains({'a': 'aval', 'foo': {'bar': 'val', 'other': 'value'}}):
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ " bar: val\n"
+ " other: value\n"
)
ret = grains.append(
name='foo',
value='baz',
convert=True)
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Value baz was added to grain foo')
self.assertEqual(ret['changes'], {'added': 'baz'})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': [{'bar': 'val', 'other': 'value'}, 'baz']})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ "- bar: val\n"
+ " other: value\n"
+ "- baz\n"
)
def test_append_fails_inexistent(self):
# Append to a non existing grain
with self.setGrains({'a': 'aval'}):
ret = grains.append(
name='foo',
value='bar')
self.assertEqual(ret['result'], False)
self.assertEqual(ret['comment'], 'Grain foo does not exist')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval'})
def test_append_convert_to_list_empty(self):
# Append to an existing list
with self.setGrains({'foo': None}):
ret = grains.append(name='foo',
value='baz',
convert=True)
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Value baz was added to grain foo')
self.assertEqual(ret['changes'], {'added': 'baz'})
self.assertEqual(
grains.__grains__,
{'foo': ['baz']})
self.assertGrainFileContent("foo:\n"
+ "- baz\n")
# 'list_present' function tests: 7
def test_list_present(self):
with self.setGrains({'a': 'aval', 'foo': ['bar']}):
ret = grains.list_present(
name='foo',
value='baz')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Append value baz to grain foo')
self.assertEqual(ret['changes'], {'new': {'foo': ['bar', 'baz']}})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': ['bar', 'baz']})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ "- bar\n"
+ "- baz\n"
)
def test_list_present_nested(self):
with self.setGrains({'a': 'aval', 'foo': {'is': {'nested': ['bar']}}}):
ret = grains.list_present(
name='foo,is,nested',
value='baz',
delimiter=',')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Append value baz to grain foo:is:nested')
self.assertEqual(ret['changes'], {'new': {'foo': {'is': {'nested': ['bar', 'baz']}}}})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': {'is': {'nested': ['bar', 'baz']}}})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ " is:\n"
+ " nested:\n"
+ " - bar\n"
+ " - baz\n"
)
def test_list_present_inexistent(self):
with self.setGrains({'a': 'aval'}):
ret = grains.list_present(
name='foo',
value='baz')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Append value baz to grain foo')
self.assertEqual(ret['changes'], {'new': {'foo': ['baz']}})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': ['baz']})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ "- baz\n"
)
def test_list_present_inexistent_nested(self):
with self.setGrains({'a': 'aval'}):
ret = grains.list_present(
name='foo:is:nested',
value='baz')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Append value baz to grain foo:is:nested')
self.assertEqual(ret['changes'], {'new': {'foo': {'is': {'nested': ['baz']}}}})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': {'is': {'nested': ['baz']}}})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ " is:\n"
+ " nested:\n"
+ " - baz\n"
)
def test_list_present_not_a_list(self):
with self.setGrains({'a': 'aval', 'foo': 'bar'}):
ret = grains.list_present(
name='foo',
value='baz')
self.assertEqual(ret['result'], False)
self.assertEqual(ret['comment'], 'Grain foo is not a valid list')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': 'bar'})
self.assertGrainFileContent("a: aval\n"
+ "foo: bar\n"
)
def test_list_present_nested_already(self):
with self.setGrains({'a': 'aval', 'b': {'foo': ['bar']}}):
ret = grains.list_present(
name='b:foo',
value='bar')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Value bar is already in grain b:foo')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'b': {'foo': ['bar']}})
self.assertGrainFileContent("a: aval\n"
+ "b:\n"
+ " foo:\n"
+ " - bar\n"
)
def test_list_present_already(self):
with self.setGrains({'a': 'aval', 'foo': ['bar']}):
ret = grains.list_present(
name='foo',
value='bar')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Value bar is already in grain foo')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': ['bar']})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ "- bar\n"
)
def test_list_present_unknown_failure(self):
with self.setGrains({'a': 'aval', 'foo': ['bar']}):
# Unknown reason failure
with patch.dict(grainsmod.__salt__, {'grains.append': MagicMock()}):
ret = grains.list_present(
name='foo',
value='baz')
self.assertEqual(ret['result'], False)
self.assertEqual(ret['comment'], 'Failed append value baz to grain foo')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': ['bar']})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ "- bar\n"
)
# 'list_absent' function tests: 6
def test_list_absent(self):
with self.setGrains({'a': 'aval', 'foo': ['bar']}):
ret = grains.list_absent(
name='foo',
value='bar')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Value bar was deleted from grain foo')
self.assertEqual(ret['changes'], {'deleted': ['bar']})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': []})
self.assertGrainFileContent("a: aval\n"
+ "foo: []\n"
)
def test_list_absent_nested(self):
with self.setGrains({'a': 'aval', 'foo': {'list': ['bar']}}):
ret = grains.list_absent(
name='foo:list',
value='bar')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Value bar was deleted from grain foo:list')
self.assertEqual(ret['changes'], {'deleted': ['bar']})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': {'list': []}})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ " list: []\n"
)
def test_list_absent_inexistent(self):
with self.setGrains({'a': 'aval'}):
ret = grains.list_absent(
name='foo',
value='baz')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Grain foo does not exist')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval'})
self.assertGrainFileContent("a: aval\n")
def test_list_absent_inexistent_nested(self):
with self.setGrains({'a': 'aval'}):
ret = grains.list_absent(
name='foo:list',
value='baz')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Grain foo:list does not exist')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval'})
self.assertGrainFileContent("a: aval\n")
def test_list_absent_not_a_list(self):
with self.setGrains({'a': 'aval', 'foo': 'bar'}):
ret = grains.list_absent(
name='foo',
value='bar')
self.assertEqual(ret['result'], False)
self.assertEqual(ret['comment'], 'Grain foo is not a valid list')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': 'bar'})
self.assertGrainFileContent("a: aval\n"
+ "foo: bar\n"
)
def test_list_absent_already(self):
with self.setGrains({'a': 'aval', 'foo': ['bar']}):
ret = grains.list_absent(
name='foo',
value='baz')
self.assertEqual(ret['result'], True)
self.assertEqual(ret['comment'], 'Value baz is absent from grain foo')
self.assertEqual(ret['changes'], {})
self.assertEqual(
grains.__grains__,
{'a': 'aval', 'foo': ['bar']})
self.assertGrainFileContent("a: aval\n"
+ "foo:\n"
+ "- bar\n"
)
```
#### File: tests/unit/test_fileclient.py
```python
from __future__ import absolute_import
import errno
import logging
import os
import shutil
# Import Salt Testing libs
from tests.support.runtests import RUNTIME_VARS
from tests.integration import AdaptedConfigurationTestCaseMixin
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import patch, Mock, MagicMock, NO_MOCK, NO_MOCK_REASON
from tests.support.unit import TestCase, skipIf
# Import Salt libs
import salt.utils.files
from salt.ext.six.moves import range
from salt import fileclient
from salt.ext import six
log = logging.getLogger(__name__)
class FileclientTestCase(TestCase):
'''
Fileclient test
'''
opts = {
'extension_modules': '',
'cachedir': '/__test__',
}
def _fake_makedir(self, num=errno.EEXIST):
def _side_effect(*args, **kwargs):
raise OSError(num, 'Errno {0}'.format(num))
return Mock(side_effect=_side_effect)
def test_cache_skips_makedirs_on_race_condition(self):
'''
If cache contains already a directory, do not raise an exception.
'''
with patch('os.path.isfile', lambda prm: False):
for exists in range(2):
with patch('os.makedirs', self._fake_makedir()):
with fileclient.Client(self.opts)._cache_loc('testfile') as c_ref_itr:
assert c_ref_itr == os.sep + os.sep.join(['__test__', 'files', 'base', 'testfile'])
def test_cache_raises_exception_on_non_eexist_ioerror(self):
'''
If makedirs raises other than EEXIST errno, an exception should be raised.
'''
with patch('os.path.isfile', lambda prm: False):
with patch('os.makedirs', self._fake_makedir(num=errno.EROFS)):
with self.assertRaises(OSError):
with fileclient.Client(self.opts)._cache_loc('testfile') as c_ref_itr:
assert c_ref_itr == '/__test__/files/base/testfile'
def test_extrn_path_with_long_filename(self):
safe_file_name = os.path.split(fileclient.Client(self.opts)._extrn_path('https://test.com/' + ('A' * 254), 'base'))[-1]
assert safe_file_name == 'A' * 254
oversized_file_name = os.path.split(fileclient.Client(self.opts)._extrn_path('https://test.com/' + ('A' * 255), 'base'))[-1]
assert len(oversized_file_name) < 256
assert oversized_file_name != 'A' * 255
oversized_file_with_query_params = os.path.split(fileclient.Client(self.opts)._extrn_path('https://test.com/file?' + ('A' * 255), 'base'))[-1]
assert len(oversized_file_with_query_params) < 256
SALTENVS = ('base', 'dev')
SUBDIR = 'subdir'
SUBDIR_FILES = ('foo.txt', 'bar.txt', 'baz.txt')
def _get_file_roots(fs_root):
return dict(
[(x, [os.path.join(fs_root, x)]) for x in SALTENVS]
)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class FileClientTest(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModuleMockMixin):
def setup_loader_modules(self):
FS_ROOT = os.path.join(RUNTIME_VARS.TMP, 'fileclient_fs_root')
CACHE_ROOT = os.path.join(RUNTIME_VARS.TMP, 'fileclient_cache_root')
MOCKED_OPTS = {
'file_roots': _get_file_roots(FS_ROOT),
'fileserver_backend': ['roots'],
'cachedir': CACHE_ROOT,
'file_client': 'local',
}
self.addCleanup(shutil.rmtree, FS_ROOT, ignore_errors=True)
self.addCleanup(shutil.rmtree, CACHE_ROOT, ignore_errors=True)
return {fileclient: {'__opts__': MOCKED_OPTS}}
def setUp(self):
self.file_client = fileclient.Client(self.master_opts)
def tearDown(self):
del self.file_client
def test_file_list_emptydirs(self):
'''
Ensure that the fileclient class won't allow a direct call to file_list_emptydirs()
'''
with self.assertRaises(NotImplementedError):
self.file_client.file_list_emptydirs()
def test_get_file(self):
'''
Ensure that the fileclient class won't allow a direct call to get_file()
'''
with self.assertRaises(NotImplementedError):
self.file_client.get_file(None)
def test_get_file_client(self):
minion_opts = self.get_temp_config('minion')
minion_opts['file_client'] = 'remote'
with patch('salt.fileclient.RemoteClient', MagicMock(return_value='remote_client')):
ret = fileclient.get_file_client(minion_opts)
self.assertEqual('remote_client', ret)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class FileclientCacheTest(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModuleMockMixin):
'''
Tests for the fileclient caching. The LocalClient is the only thing we can
test as it is the only way we can mock the fileclient (the tests run from
the minion process, so the master cannot be mocked from test code).
'''
def setup_loader_modules(self):
self.FS_ROOT = os.path.join(RUNTIME_VARS.TMP, 'fileclient_fs_root')
self.CACHE_ROOT = os.path.join(RUNTIME_VARS.TMP, 'fileclient_cache_root')
self.MOCKED_OPTS = {
'file_roots': _get_file_roots(self.FS_ROOT),
'fileserver_backend': ['roots'],
'cachedir': self.CACHE_ROOT,
'file_client': 'local',
}
self.addCleanup(shutil.rmtree, self.FS_ROOT, ignore_errors=True)
self.addCleanup(shutil.rmtree, self.CACHE_ROOT, ignore_errors=True)
return {fileclient: {'__opts__': self.MOCKED_OPTS}}
def setUp(self):
'''
No need to add a dummy foo.txt to muddy up the github repo, just make
our own fileserver root on-the-fly.
'''
def _new_dir(path):
'''
Add a new dir at ``path`` using os.makedirs. If the directory
already exists, remove it recursively and then try to create it
again.
'''
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
# Just in case a previous test was interrupted, remove the
# directory and try adding it again.
shutil.rmtree(path)
os.makedirs(path)
else:
raise
# Crete the FS_ROOT
for saltenv in SALTENVS:
saltenv_root = os.path.join(self.FS_ROOT, saltenv)
# Make sure we have a fresh root dir for this saltenv
_new_dir(saltenv_root)
path = os.path.join(saltenv_root, 'foo.txt')
with salt.utils.files.fopen(path, 'w') as fp_:
fp_.write(
'This is a test file in the \'{0}\' saltenv.\n'
.format(saltenv)
)
subdir_abspath = os.path.join(saltenv_root, SUBDIR)
os.makedirs(subdir_abspath)
for subdir_file in SUBDIR_FILES:
path = os.path.join(subdir_abspath, subdir_file)
with salt.utils.files.fopen(path, 'w') as fp_:
fp_.write(
'This is file \'{0}\' in subdir \'{1} from saltenv '
'\'{2}\''.format(subdir_file, SUBDIR, saltenv)
)
# Create the CACHE_ROOT
_new_dir(self.CACHE_ROOT)
def test_cache_dir(self):
'''
Ensure entire directory is cached to correct location
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(self.MOCKED_OPTS)
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_dir(
'salt://{0}'.format(SUBDIR),
saltenv,
cachedir=None
)
)
for subdir_file in SUBDIR_FILES:
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
'files',
saltenv,
SUBDIR,
subdir_file)
# Double check that the content of the cached file
# identifies it as being from the correct saltenv. The
# setUp function creates the file with the name of the
# saltenv mentioned in the file, so a simple 'in' check is
# sufficient here. If opening the file raises an exception,
# this is a problem, so we are not catching the exception
# and letting it be raised so that the test fails.
with salt.utils.files.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(subdir_file in content)
self.assertTrue(SUBDIR in content)
self.assertTrue(saltenv in content)
def test_cache_dir_with_alternate_cachedir_and_absolute_path(self):
'''
Ensure entire directory is cached to correct location when an alternate
cachedir is specified and that cachedir is an absolute path
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(self.MOCKED_OPTS)
alt_cachedir = os.path.join(RUNTIME_VARS.TMP, 'abs_cachedir')
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_dir(
'salt://{0}'.format(SUBDIR),
saltenv,
cachedir=alt_cachedir
)
)
for subdir_file in SUBDIR_FILES:
cache_loc = os.path.join(alt_cachedir,
'files',
saltenv,
SUBDIR,
subdir_file)
# Double check that the content of the cached file
# identifies it as being from the correct saltenv. The
# setUp function creates the file with the name of the
# saltenv mentioned in the file, so a simple 'in' check is
# sufficient here. If opening the file raises an exception,
# this is a problem, so we are not catching the exception
# and letting it be raised so that the test fails.
with salt.utils.files.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(subdir_file in content)
self.assertTrue(SUBDIR in content)
self.assertTrue(saltenv in content)
def test_cache_dir_with_alternate_cachedir_and_relative_path(self):
'''
Ensure entire directory is cached to correct location when an alternate
cachedir is specified and that cachedir is a relative path
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(self.MOCKED_OPTS)
alt_cachedir = 'foo'
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_dir(
'salt://{0}'.format(SUBDIR),
saltenv,
cachedir=alt_cachedir
)
)
for subdir_file in SUBDIR_FILES:
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
alt_cachedir,
'files',
saltenv,
SUBDIR,
subdir_file)
# Double check that the content of the cached file
# identifies it as being from the correct saltenv. The
# setUp function creates the file with the name of the
# saltenv mentioned in the file, so a simple 'in' check is
# sufficient here. If opening the file raises an exception,
# this is a problem, so we are not catching the exception
# and letting it be raised so that the test fails.
with salt.utils.files.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(subdir_file in content)
self.assertTrue(SUBDIR in content)
self.assertTrue(saltenv in content)
def test_cache_file(self):
'''
Ensure file is cached to correct location
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(self.MOCKED_OPTS)
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_file('salt://foo.txt', saltenv, cachedir=None)
)
cache_loc = os.path.join(
fileclient.__opts__['cachedir'], 'files', saltenv, 'foo.txt')
# Double check that the content of the cached file identifies
# it as being from the correct saltenv. The setUp function
# creates the file with the name of the saltenv mentioned in
# the file, so a simple 'in' check is sufficient here. If
# opening the file raises an exception, this is a problem, so
# we are not catching the exception and letting it be raised so
# that the test fails.
with salt.utils.files.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(saltenv in content)
def test_cache_file_with_alternate_cachedir_and_absolute_path(self):
'''
Ensure file is cached to correct location when an alternate cachedir is
specified and that cachedir is an absolute path
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(self.MOCKED_OPTS)
alt_cachedir = os.path.join(RUNTIME_VARS.TMP, 'abs_cachedir')
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_file('salt://foo.txt',
saltenv,
cachedir=alt_cachedir)
)
cache_loc = os.path.join(alt_cachedir,
'files',
saltenv,
'foo.txt')
# Double check that the content of the cached file identifies
# it as being from the correct saltenv. The setUp function
# creates the file with the name of the saltenv mentioned in
# the file, so a simple 'in' check is sufficient here. If
# opening the file raises an exception, this is a problem, so
# we are not catching the exception and letting it be raised so
# that the test fails.
with salt.utils.files.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(saltenv in content)
def test_cache_file_with_alternate_cachedir_and_relative_path(self):
'''
Ensure file is cached to correct location when an alternate cachedir is
specified and that cachedir is a relative path
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(self.MOCKED_OPTS)
alt_cachedir = 'foo'
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_file('salt://foo.txt',
saltenv,
cachedir=alt_cachedir)
)
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
alt_cachedir,
'files',
saltenv,
'foo.txt')
# Double check that the content of the cached file identifies
# it as being from the correct saltenv. The setUp function
# creates the file with the name of the saltenv mentioned in
# the file, so a simple 'in' check is sufficient here. If
# opening the file raises an exception, this is a problem, so
# we are not catching the exception and letting it be raised so
# that the test fails.
with salt.utils.files.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(saltenv in content)
```
#### File: tests/unit/test_payload.py
```python
from __future__ import absolute_import, print_function, unicode_literals
import time
import errno
import threading
import datetime
# Import Salt Testing libs
from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON
# Import Salt libs
from salt.utils import immutabletypes
from salt.utils.odict import OrderedDict
import salt.exceptions
import salt.payload
# Import 3rd-party libs
import zmq
from salt.ext import six
import logging
log = logging.getLogger(__name__)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PayloadTestCase(TestCase):
def assertNoOrderedDict(self, data):
if isinstance(data, OrderedDict):
raise AssertionError(
'Found an ordered dictionary'
)
if isinstance(data, dict):
for value in six.itervalues(data):
self.assertNoOrderedDict(value)
elif isinstance(data, (list, tuple)):
for chunk in data:
self.assertNoOrderedDict(chunk)
def test_list_nested_odicts(self):
payload = salt.payload.Serial('msgpack')
idata = {'pillar': [OrderedDict(environment='dev')]}
odata = payload.loads(payload.dumps(idata.copy()))
self.assertNoOrderedDict(odata)
self.assertEqual(idata, odata)
def test_datetime_dump_load(self):
'''
Check the custom datetime handler can understand itself
'''
payload = salt.payload.Serial('msgpack')
dtvalue = datetime.datetime(2001, 2, 3, 4, 5, 6, 7)
idata = {dtvalue: dtvalue}
sdata = payload.dumps(idata.copy())
odata = payload.loads(sdata)
self.assertEqual(
sdata,
b'\x81\xc7\x18N20010203T04:05:06.000007\xc7\x18N20010203T04:05:06.000007')
self.assertEqual(idata, odata)
def test_verylong_dump_load(self):
'''
Test verylong encoder/decoder
'''
payload = salt.payload.Serial('msgpack')
idata = {'jid': 20180227140750302662}
sdata = payload.dumps(idata.copy())
odata = payload.loads(sdata)
idata['jid'] = '{0}'.format(idata['jid'])
self.assertEqual(idata, odata)
def test_immutable_dict_dump_load(self):
'''
Test immutable dict encoder/decoder
'''
payload = salt.payload.Serial('msgpack')
idata = {'dict': {'key': 'value'}}
sdata = payload.dumps({'dict': immutabletypes.ImmutableDict(idata['dict'])})
odata = payload.loads(sdata)
self.assertEqual(idata, odata)
def test_immutable_list_dump_load(self):
'''
Test immutable list encoder/decoder
'''
payload = salt.payload.Serial('msgpack')
idata = {'list': [1, 2, 3]}
sdata = payload.dumps({'list': immutabletypes.ImmutableList(idata['list'])})
odata = payload.loads(sdata)
self.assertEqual(idata, odata)
def test_immutable_set_dump_load(self):
'''
Test immutable set encoder/decoder
'''
payload = salt.payload.Serial('msgpack')
idata = {'set': ['red', 'green', 'blue']}
sdata = payload.dumps({'set': immutabletypes.ImmutableSet(idata['set'])})
odata = payload.loads(sdata)
self.assertEqual(idata, odata)
def test_odict_dump_load(self):
'''
Test odict just works. It wasn't until msgpack 0.2.0
'''
payload = salt.payload.Serial('msgpack')
data = OrderedDict()
data['a'] = 'b'
data['y'] = 'z'
data['j'] = 'k'
data['w'] = 'x'
sdata = payload.dumps({'set': data})
odata = payload.loads(sdata)
self.assertEqual({'set': dict(data)}, odata)
def test_mixed_dump_load(self):
'''
Test we can handle all exceptions at once
'''
payload = salt.payload.Serial('msgpack')
dtvalue = datetime.datetime(2001, 2, 3, 4, 5, 6, 7)
od = OrderedDict()
od['a'] = 'b'
od['y'] = 'z'
od['j'] = 'k'
od['w'] = 'x'
idata = {dtvalue: dtvalue, # datetime
'jid': 20180227140750302662, # long int
'dict': immutabletypes.ImmutableDict({'key': 'value'}), # immutable dict
'list': immutabletypes.ImmutableList([1, 2, 3]), # immutable list
'set': immutabletypes.ImmutableSet(('red', 'green', 'blue')), # immutable set
'odict': od, # odict
}
edata = {dtvalue: dtvalue, # datetime, == input
'jid': '20180227140750302662', # string repr of long int
'dict': {'key': 'value'}, # builtin dict
'list': [1, 2, 3], # builtin list
'set': ['red', 'green', 'blue'], # builtin set
'odict': dict(od), # builtin dict
}
sdata = payload.dumps(idata)
odata = payload.loads(sdata)
self.assertEqual(edata, odata)
def test_recursive_dump_load(self):
'''
Test recursive payloads are (mostly) serialized
'''
payload = salt.payload.Serial('msgpack')
data = {'name': 'roscivs'}
data['data'] = data # Data all the things!
sdata = payload.dumps(data)
odata = payload.loads(sdata)
self.assertTrue('recursion' in odata['data'].lower())
class SREQTestCase(TestCase):
port = 8845 # TODO: dynamically assign a port?
@classmethod
def setUpClass(cls):
'''
Class to set up zmq echo socket
'''
def echo_server():
'''
A server that echos the message sent to it over zmq
Optional "sleep" can be sent to delay response
'''
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:{0}".format(SREQTestCase.port))
payload = salt.payload.Serial('msgpack')
while SREQTestCase.thread_running.is_set():
try:
# Wait for next request from client
message = socket.recv(zmq.NOBLOCK)
msg_deserialized = payload.loads(message)
log.info('Echo server received message: %s', msg_deserialized)
if isinstance(msg_deserialized['load'], dict) and msg_deserialized['load'].get('sleep'):
log.info('Test echo server sleeping for %s seconds',
msg_deserialized['load']['sleep'])
time.sleep(msg_deserialized['load']['sleep'])
socket.send(message)
except zmq.ZMQError as exc:
if exc.errno == errno.EAGAIN:
continue
raise
SREQTestCase.thread_running = threading.Event()
SREQTestCase.thread_running.set()
SREQTestCase.echo_server = threading.Thread(target=echo_server)
SREQTestCase.echo_server.start()
@classmethod
def tearDownClass(cls):
'''
Remove echo server
'''
# kill the thread
SREQTestCase.thread_running.clear()
SREQTestCase.echo_server.join()
def get_sreq(self):
return salt.payload.SREQ('tcp://127.0.0.1:{0}'.format(SREQTestCase.port))
def test_send_auto(self):
'''
Test creation, send/rect
'''
sreq = self.get_sreq()
# check default of empty load and enc clear
assert sreq.send_auto({}) == {'enc': 'clear', 'load': {}}
# check that the load always gets passed
assert sreq.send_auto({'load': 'foo'}) == {'load': 'foo', 'enc': 'clear'}
def test_send(self):
sreq = self.get_sreq()
assert sreq.send('clear', 'foo') == {'enc': 'clear', 'load': 'foo'}
@skipIf(True, 'Disabled until we can figure out how to make this more reliable.')
def test_timeout(self):
'''
Test SREQ Timeouts
'''
sreq = self.get_sreq()
# client-side timeout
start = time.time()
# This is a try/except instead of an assertRaises because of a possible
# subtle bug in zmq wherein a timeout=0 actually exceutes a single poll
# before the timeout is reached.
log.info('Sending tries=0, timeout=0')
try:
sreq.send('clear', 'foo', tries=0, timeout=0)
except salt.exceptions.SaltReqTimeoutError:
pass
assert time.time() - start < 1 # ensure we didn't wait
# server-side timeout
log.info('Sending tries=1, timeout=1')
start = time.time()
with self.assertRaises(salt.exceptions.SaltReqTimeoutError):
sreq.send('clear', {'sleep': 2}, tries=1, timeout=1)
assert time.time() - start >= 1 # ensure we actually tried once (1s)
# server-side timeout with retries
log.info('Sending tries=2, timeout=1')
start = time.time()
with self.assertRaises(salt.exceptions.SaltReqTimeoutError):
sreq.send('clear', {'sleep': 2}, tries=2, timeout=1)
assert time.time() - start >= 2 # ensure we actually tried twice (2s)
# test a regular send afterwards (to make sure sockets aren't in a twist
log.info('Sending regular send')
assert sreq.send('clear', 'foo') == {'enc': 'clear', 'load': 'foo'}
def test_destroy(self):
'''
Test the __del__ capabilities
'''
sreq = self.get_sreq()
# ensure no exceptions when we go to destroy the sreq, since __del__
# swallows exceptions, we have to call destroy directly
sreq.destroy()
``` |
{
"source": "JohnSkubic/SkubyBrewCalculator",
"score": 2
} |
#### File: SkubyBrewCalc/SkubyBrewCalc/LegalOptionsUtil.py
```python
from LegalOptions import legal_hops,HOP_AA_IDX, HOP_ID_IDX
from LegalOptions import legal_fermentables,FERMENTABLES_TYPE_IDX, FERMENTABLES_SUBTYPE_IDX
from LegalOptions import FERMENTABLES_COLOR_IDX, FERMENTABLES_PPG_IDX, FERMENTABLES_ID_IDX
from LegalOptions import legal_yeasts, YEAST_LAB_IDX, YEAST_CODE_IDX, YEAST_TYPE_IDX
from LegalOptions import YEAST_FLOCCULATION_IDX, YEAST_ATTENUATION_IDX
from LegalOptions import YEAST_MIN_TEMP_IDX, YEAST_MAX_TEMP_IDX, YEAST_ID_IDX
# Common functions
def _get_name_by_id (ingr_id, option_dict, id_idx):
for key in option_dict.keys():
if option_dict[key][id_idx] == ingr_id:
return key
return None
def _get_unique_options (option_dict, idx):
unique_options = []
for key in option_dict.keys():
if not option_dict[key][idx] in unique_options:
unique_options.append(option_dict[key][idx])
return unique_options
def _get_unique_options_with_filter (option_dict, idx, filter_val, filter_idx):
unique_options = []
for key in option_dict.keys():
if option_dict[key][filter_idx] == filter_val and not option_dict[key][idx] in unique_options:
unique_options.append(option_dict[key][idx])
return unique_options
# Hop Access Functions
def get_hop_by_id (hop_id):
return _get_name_by_id(hop_id, legal_hops, HOP_ID_IDX)
def get_hop_list ():
return legal_hops.keys()
# Fermentable Access Functions
def get_fermentable_by_id (fermentable_id):
return _get_name_by_id(fermentable_id, legal_fermentables, FERMENTABLES_ID_IDX)
def get_fermentable_list ():
return legal_fermentables.keys()
def get_fermentable_types ():
return _get_unique_options(legal_fermentables, FERMENTABLES_TYPE_IDX)
def get_fermentable_subtypes ():
return _get_unique_options(legal_fermentables, FERMENTABLES_SUBTYPE_IDX)
def get_fermentable_subtypes_by_type(fermentable_type):
return _get_unique_options_with_filter(legal_fermentables, FERMENTABLES_SUBTYPE_IDX, fermentable_type, FERMENTABLES_TYPE_IDX)
# Yeast Access Functions
def get_yeast_by_id (yeast_id):
return _get_name_by_id(yeast_id, legal_yeasts, YEAST_ID_IDX)
def get_yeast_list ():
return legal_yeasts.keys()
def get_yeast_labs ():
return _get_unique_options(legal_yeasts, YEAST_LAB_IDX)
def get_yeast_types ():
return _get_unique_options(legal_yeasts, YEAST_TYPE_IDX)
``` |
{
"source": "johnskyap/detangle2021",
"score": 2
} |
#### File: detangle2021/defs/repo.bzl
```python
def _tar_dep(dep):
return dep + "//:tar"
def _tar_deps(deps):
return [_tar_dep(d) for d in deps]
def _quote_string(i):
return "\"" + i.replace("\"", "\\\"") + "\""
def _join_list(l):
return ",".join([_quote_string(i) for i in l])
def web_repo(name, srcs, path, urls, sha256, strip_prefix, deps = [], licenses = ""):
native.new_http_archive(
name = name,
urls = urls,
sha256 = sha256,
strip_prefix = strip_prefix,
build_file_content = """
package(default_visibility = ["//visibility:public"])
load("@io_bazel_rules_closure//closure:defs.bzl", "web_library")
load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
web_library(
name = "%s",
srcs = [%s],
path = %s,
deps = [%s],
)
pkg_tar(
name = "src_tar",
srcs = [%s],
package_dir = %s,
)
pkg_tar(
name = "tar",
deps = [":src_tar",%s],
package_dir = "/",
)
""" % (name, _join_list(srcs), _quote_string(path), _join_list(deps), _join_list(srcs), _quote_string(path), _join_list(_tar_deps(deps))),
)
``` |
{
"source": "johnsliao/cs779-mbta-db",
"score": 3
} |
#### File: johnsliao/cs779-mbta-db/load.py
```python
import cx_Oracle
import os
import csv
import datetime
from dateutil import parser
password = os.environ.get('ORACLE_PASSWORD')
dsn_tns = cx_Oracle.makedsn('localhost', '1521', service_name='orclpdb1')
conn = cx_Oracle.connect(user='system', password=password, dsn=dsn_tns)
c = conn.cursor()
def load_directions_ids():
reader = csv.reader(open('vehicles-direction_ids.csv'), quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL)
next(reader) # Skip header
for l in reader:
sql = "SELECT direction FROM directions WHERE direction='%s'" % (l[0])
c.execute(sql)
exists = c.fetchone()
if not exists:
sql = "INSERT INTO DIRECTIONS (direction) VALUES ('%s')" % l[0]
c.execute(sql)
conn.commit()
def load_stops():
reader = csv.reader(open('stops.csv'), quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL)
next(reader) # Skip header
for l in reader:
sql = "SELECT stop_id FROM stops WHERE stop_id='%s'" % (l[0])
c.execute(sql)
exists = c.fetchone()
print(l)
if not exists:
sql = "SELECT municipality_id FROM municipalities WHERE municipality='%s'" % (l[6])
c.execute(sql)
municipality_id = c.fetchone()[0]
sql = "SELECT street_id FROM streets WHERE street='%s'" % (l[2].replace("'", "''"))
c.execute(sql)
at_street = c.fetchone()
sql = "SELECT street_id FROM streets WHERE street='%s'" % (l[8].replace("'", "''"))
c.execute(sql)
on_street = c.fetchone()
if not at_street and on_street:
sql = """INSERT INTO STOPS (STOP_ID, ADDRESS, DESCRIPTION, LATITUDE, LONGITUDE, MUNICIPALITY_ID, NAME, ON_STREET) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')""" % (
l[0], l[1].replace("'", "''"), l[3].replace("'", "''"), l[4], l[5], municipality_id,
l[7].replace("'", "''"), on_street[0])
c.execute(sql)
elif not on_street and at_street:
sql = """INSERT INTO STOPS (STOP_ID, ADDRESS, AT_STREET, DESCRIPTION, LATITUDE, LONGITUDE, MUNICIPALITY_ID, NAME) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')""" % (
l[0], l[1].replace("'", "''"), at_street, l[3].replace("'", "''"), l[4], l[5], municipality_id,
l[7].replace("'", "''"))
c.execute(sql)
else:
sql = """INSERT INTO STOPS (STOP_ID, ADDRESS, DESCRIPTION, LATITUDE, LONGITUDE, MUNICIPALITY_ID, NAME) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s')""" % (
l[0], l[1].replace("'", "''"), l[3].replace("'", "''"), l[4], l[5], municipality_id,
l[7].replace("'", "''"))
c.execute(sql)
conn.commit()
def load_routes_direction_names():
reader = csv.reader(open('routes-direction_names.csv'), quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL)
next(reader) # Skip header
for l in reader:
sql = "SELECT direction_name FROM direction_names WHERE direction_name='%s'" % (l[0])
c.execute(sql)
exists = c.fetchone()
if not exists:
sql = "INSERT INTO direction_names (direction_name) VALUES ('%s')" % (l[0])
c.execute(sql)
conn.commit()
def load_destinations():
reader = csv.reader(open('routes-direction_destinations.csv'), quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL)
next(reader) # Skip header
for l in reader:
sql = "SELECT destination FROM destinations WHERE destination='%s'" % (l[0])
c.execute(sql)
exists = c.fetchone()
if not exists:
sql = "INSERT INTO DESTINATIONS (destination) VALUES ('%s')" % (l[0])
c.execute(sql)
conn.commit()
def load_routes():
reader = csv.reader(open('routes.csv'), quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL)
next(reader) # Skip header
for l in reader:
sql = "SELECT route_id FROM routes WHERE route_id='%s'" % (l[0])
c.execute(sql)
exists = c.fetchone()
if not exists:
sql = "SELECT color_id FROM colors WHERE color='%s'" % (l[1])
c.execute(sql)
color = c.fetchone()[0]
sql = "SELECT color_id FROM colors WHERE color='%s'" % (l[6])
c.execute(sql)
text_color = c.fetchone()[0]
sql = "SELECT line_id FROM lines WHERE line_id='%s'" % (l[9])
c.execute(sql)
line_id = c.fetchone()
if line_id:
sql = "INSERT INTO routes (route_id, color, description, fare_class, long_name, short_name, text_color, line_id) " \
"VALUES ('%s','%s','%s','%s','%s','%s','%s','%s')" % (
l[0], color, l[2], l[3], l[4], l[5], text_color, line_id[0])
c.execute(sql)
else:
sql = "INSERT INTO routes (route_id, color, description, fare_class, long_name, short_name, text_color) " \
"VALUES ('%s','%s','%s','%s','%s','%s','%s')" % (
l[0], color, l[2], l[3], l[4], l[5], text_color)
c.execute(sql)
conn.commit()
def load_lines():
reader = csv.reader(open('lines.csv'), quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL)
next(reader) # Skip header
for l in reader:
sql = "SELECT line_id FROM lines WHERE line_id='%s'" % (l[0])
c.execute(sql)
exists = c.fetchone()
if not exists:
sql = "SELECT color_id FROM colors WHERE color='%s'" % (l[1])
c.execute(sql)
color = c.fetchone()[0]
sql = "SELECT color_id FROM colors WHERE color='%s'" % (l[4])
c.execute(sql)
text_color = c.fetchone()[0]
sql = "INSERT INTO LINES (line_id, color, long_name, short_name, text_color) " \
"VALUES ('%s', '%s', '%s', '%s', '%s')" % (l[0], color, l[2], l[3], text_color)
c.execute(sql)
reader = csv.reader(open('routes.csv'), quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL)
next(reader) # Skip header
for l in reader:
sql = "SELECT line_id FROM lines WHERE line_id='%s'" % (l[9])
c.execute(sql)
exists = c.fetchone()
if not exists and l[9] != '':
sql = "SELECT color_id FROM colors WHERE color='%s'" % (l[1])
c.execute(sql)
color = c.fetchone()[0]
sql = "SELECT color_id FROM colors WHERE color='%s'" % (l[6])
c.execute(sql)
text_color = c.fetchone()[0]
sql = "INSERT INTO LINES (line_id, color, long_name, short_name, text_color) " \
"VALUES ('%s', '%s', '%s', '%s', '%s')" % (l[9], color, l[4], l[5], text_color)
print(sql)
c.execute(sql)
conn.commit()
def load_colors():
reader = csv.reader(open('lines.csv'), quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL)
next(reader) # Skip header
for l in reader:
sql = "SELECT color FROM colors WHERE color='%s'" % (l[1])
c.execute(sql)
exists = c.fetchone()
if not exists:
sql = "INSERT INTO COLORS (color) VALUES ('%s')" % (l[1])
c.execute(sql)
sql = "SELECT color FROM colors WHERE color='%s'" % (l[4])
c.execute(sql)
exists = c.fetchone()
if not exists:
sql = "INSERT INTO COLORS (color) VALUES ('%s')" % (l[4])
c.execute(sql)
reader = csv.reader(open('routes.csv'), quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL)
next(reader) # Skip header
for l in reader:
sql = "SELECT color FROM colors WHERE color='%s'" % (l[1])
c.execute(sql)
exists = c.fetchone()
if not exists:
sql = "INSERT INTO COLORS (color) VALUES ('%s')" % (l[1])
c.execute(sql)
sql = "SELECT color FROM colors WHERE color='%s'" % (l[6])
c.execute(sql)
exists = c.fetchone()
if not exists:
sql = "INSERT INTO COLORS (color) VALUES ('%s')" % (l[6])
c.execute(sql)
conn.commit()
def load_direction_names_routes_bridge():
sql = "SELECT direction_name_id FROM direction_names"
c.execute(sql)
direction_name_ids = c.fetchall()
sql = "SELECT route_id FROM routes"
c.execute(sql)
route_ids = c.fetchall()
for route_id in route_ids:
for direction_name_id in direction_name_ids:
sql = "SELECT route_id, direction_name_id FROM direction_names_routes_bridge " \
"WHERE route_id='%s' AND direction_name_id='%s'" % (route_id[0], direction_name_id[0])
c.execute(sql)
exists = c.fetchone()
if not exists:
sql = "INSERT INTO direction_names_routes_bridge (route_id, direction_name_id) " \
"VALUES ('%s', '%s')" % (route_id[0], direction_name_id[0])
c.execute(sql)
conn.commit()
def load_destinations_routes_bridge():
sql = "SELECT destination_id FROM destinations"
c.execute(sql)
destination_ids = c.fetchall()
sql = "SELECT route_id FROM routes"
c.execute(sql)
route_ids = c.fetchall()
for route_id in route_ids:
for destination_id in destination_ids:
sql = "SELECT route_id, destination_id FROM destinations_routes_bridge " \
"WHERE route_id='%s' AND destination_id='%s'" % (route_id[0], destination_id[0])
c.execute(sql)
exists = c.fetchone()
if not exists:
sql = "INSERT INTO destinations_routes_bridge (route_id, destination_id) " \
"VALUES ('%s', '%s')" % (route_id[0], destination_id[0])
c.execute(sql)
conn.commit()
def load_municipalities():
reader = csv.reader(open('stops.csv'), quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL)
next(reader) # Skip header
for l in reader:
sql = "SELECT municipality_id FROM municipalities WHERE municipality='%s'" % (l[6])
c.execute(sql)
exists = c.fetchone()
if not exists:
sql = "INSERT INTO MUNICIPALITIES (municipality) VALUES ('%s')" % l[6]
c.execute(sql)
conn.commit()
def load_streets():
reader = csv.reader(open('stops.csv'), quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL)
next(reader) # Skip header
for l in reader:
# At Street
street = l[2].replace("'", "''"),
sql = "SELECT street FROM streets WHERE street='%s'" % street
c.execute(sql)
exists = c.fetchone()
if not exists and l[2] != '':
sql = "INSERT INTO streets (street) VALUES ('%s')" % street
c.execute(sql)
# On Street
street = l[8].replace("'", "''"),
sql = "SELECT street FROM streets WHERE street='%s'" % street
c.execute(sql)
exists = c.fetchone()
if not exists and l[8] != '':
sql = "INSERT INTO streets (street) VALUES ('%s')" % street
c.execute(sql)
conn.commit()
def load_statuses():
reader = csv.reader(open('vehicles-current_statuses.csv'), quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL)
next(reader) # Skip header
for l in reader:
sql = "SELECT status FROM statuses WHERE status='%s'" % (l[0])
c.execute(sql)
exists = c.fetchone()
if not exists:
sql = "INSERT INTO statuses (status) VALUES ('%s')" % l[0]
c.execute(sql)
conn.commit()
def load_vehicles_data():
reader = csv.reader(open('vehicles.csv'), quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL)
next(reader) # Skip header
for l in reader:
utc_delta = datetime.datetime.utcnow() - datetime.datetime.now()
updated_at = parser.parse(l[8]) + utc_delta
updated_at = updated_at.strftime('%Y-%m-%d %H:%M:%S')
sql = "SELECT vehicle_id, updated_at FROM vehicles_data WHERE vehicle_id='%s' and updated_at=TO_DATE('%s','yyyy-mm-DD HH24:MI:SS')" % (
l[0], updated_at)
c.execute(sql)
exists = c.fetchone()
if not exists:
sql = "SELECT direction_id FROM directions WHERE direction='%s'" % (l[4])
c.execute(sql)
direction_id = c.fetchone()[0]
try:
sql = "SELECT route_id FROM routes WHERE route_id='%s'" % (l[10])
c.execute(sql)
route_id = c.fetchone()[0]
except TypeError:
print 'ROUTE_ID "%s" does not exist in ROUTES table - ignoring' % l[10]
continue
sql = "SELECT stop_id FROM stops WHERE stop_id='%s'" % (l[11])
c.execute(sql)
stop_id = c.fetchone()
sql = "SELECT status_id FROM statuses WHERE status='%s'" % (l[9])
c.execute(sql)
status_id = c.fetchone()[0]
speed = l[7]
if speed == 'None' or not speed:
speed = ''
current_stop_sequence = l[3]
if current_stop_sequence == 'None' or not current_stop_sequence:
current_stop_sequence = ''
bearing = l[2]
if bearing == 'None' or not bearing:
bearing = ''
if not stop_id:
sql = "INSERT INTO vehicles_data (vehicle_id, label, bearing, current_stop_sequence, longitude, latitude, speed, updated_at, direction_id, route_id, current_status) " \
"VALUES ('%s','%s','%s','%s','%s','%s','%s',TO_DATE('%s','yyyy-mm-DD HH24:MI:SS'),'%s','%s','%s')" % (
l[0], l[1], bearing, current_stop_sequence, l[5], l[6], speed, updated_at, direction_id,
route_id, status_id)
c.execute(sql)
else:
sql = "INSERT INTO vehicles_data (vehicle_id, label, bearing, current_stop_sequence, longitude, latitude, speed, updated_at, direction_id, route_id, current_status, stop_id) " \
"VALUES ('%s','%s','%s','%s','%s','%s','%s', TO_DATE('%s','yyyy-mm-DD HH24:MI:SS'),'%s','%s','%s','%s')" % (
l[0], l[1], bearing, current_stop_sequence, l[5], l[6], speed, updated_at, direction_id,
route_id, status_id, stop_id[0])
c.execute(sql)
conn.commit()
if __name__ == '__main__':
load_directions_ids()
load_destinations()
load_colors()
load_lines()
load_routes_direction_names()
load_routes()
load_direction_names_routes_bridge()
load_destinations_routes_bridge()
load_municipalities()
load_streets()
load_stops()
load_statuses()
load_vehicles_data()
``` |
{
"source": "johnsliao/pymbta",
"score": 3
} |
#### File: pymbta/pymbta/pymbta.py
```python
import requests
import os
import json
"""
pymbta is a library that takes the pain out of querying the MBTA api to find bus/train locations.
For example code and documentation goto:
https://github.com/johnsliao/pymbta
The MBTA publishes its data via GTFS-realtime feeds, a standard developed by Google for delivering realtime data.
MBTA GTFS files available here:
http://www.mbta.com/uploadedfiles/MBTA_GTFS.zip
Read more about using the MBTA-realtime API v2 here:
http://www.mbta.com/rider_tools/
If you encounter a bug, file an issue or email me:
<EMAIL>
"""
__author__ = '<NAME> <<EMAIL>>'
__version__ = '1.1'
class MBTA(object):
QUERY_TYPES = ['vehiclesbyroute', 'vehiclesbytrip']
DIRECTIONS = []
FORMATS = ['json', 'xml']
LOCATIONS = []
ROUTES = {}
def __init__(self, format='json', direction=None, query_type=None, route=None):
self.base_url = 'http://realtime.mbta.com/developer/api/v2/'
self.key = os.environ.get('MBTA_API_KEY')
self.format = format
self.direction = direction
self.query_type = query_type
self.route = route
self.url = ''
self.response = ''
def check_parameters(self):
if self.format not in self.FORMATS:
raise ValueError('[%s] is not a valid query format. Valid formats include %s' %
(self.format, self.FORMATS))
if not any(d for d in self.DIRECTIONS if self.direction.lower() in self.DIRECTIONS):
raise ValueError('[%s] is not a valid direction. Valid directions include %s' %
(self.direction, self.DIRECTIONS))
if self.query_type not in self.QUERY_TYPES:
raise ValueError('[%s] is not a valid MBTA query type. Valid query types include %s' %
(self.query_type, self.QUERY_TYPES))
if not any(r for r in self.ROUTES if self.route.lower() == r.lower()):
raise ValueError('[%s] is not a valid MBTA route. Please read user guide for valid route' %
(self.route))
def check_api_response(self):
if 'error' in self.response:
raise ValueError(
'No MBTA available data for %s route' % self.route)
def return_route_id(self, route):
return self.ROUTES[route]
def generate_url(self):
self.check_parameters()
self.route_id = self.return_route_id(self.route)
self.url = '%s%s?api_key=%s&route=%s&format=%s' % (
self.base_url,
self.query_type,
self.key,
self.route_id,
self.format)
def make_api_request(self):
result = requests.get(self.url)
#print result.text
self.response = json.loads(result.text)
def return_locations(self):
self.LOCATIONS = []
self.generate_url()
self.make_api_request()
self.check_api_response()
directions = self.response['direction']
for direction in directions:
if direction['direction_name'].lower() == self.direction.lower():
trips = direction['trip']
for trip in trips:
lat = trip['vehicle']['vehicle_lat']
lon = trip['vehicle']['vehicle_lon']
self.LOCATIONS.append([lat, lon])
if len(self.LOCATIONS) is 0:
return None
return self.LOCATIONS
class PurpleLine(MBTA):
ROUTES = {"Fairmount": "CR-Fairmount",
"Fitchburg": "CR-Fitchburg",
"Framingham/Worcester": "CR-Worcester",
"Franklin": "CR-Franklin",
"Greenbush": "CR-Greenbush",
"Haverhill": "CR-Haverhill",
"Lowell": "CR-Lowell",
"Needham": "CR-Needham",
"Newburyport/Rockport": "CR-Newburyport",
"Providence/Stoughton": "CR-Providence",
"Kingston/Plymouth": "CR-Kingston",
"Middleborough/Lakeville": "CR-Middleborough",
}
DIRECTIONS = ['inbound', 'outbound']
def __init__(self, query_type=None, direction=None, format='json', route=None):
MBTA.__init__(
self, query_type=query_type, direction=direction, format=format, route=route)
class RedLine(MBTA):
ROUTES = {"Red Line": "Red"}
DIRECTIONS = ['southbound', 'northbound']
def __init__(self, query_type=None, direction=None, format='json', route=None):
MBTA.__init__(
self, query_type=query_type, direction=direction, format=format, route=route)
class GreenLine(MBTA):
ROUTES = {"Green Line B": "Green-B",
"Green Line C": "Green-C",
"Green Line D": "Green-D",
"Green Line E": "Green-E",
}
DIRECTIONS = ['westbound', 'eastbound']
def __init__(self, query_type=None, direction=None, format='json', route=None):
MBTA.__init__(
self, query_type=query_type, direction=direction, format=format, route=route)
class OrangeLine(MBTA):
ROUTES = {"Orange Line": "Orange", }
DIRECTIONS = ['southbound', 'northbound']
def __init__(self, query_type=None, direction=None, format='json', route=None):
MBTA.__init__(
self, query_type=query_type, direction=direction, format=format, route=route)
class BlueLine(MBTA):
ROUTES = {"Blue Line": "Blue", }
DIRECTIONS = ['westbound', 'eastbound']
def __init__(self, query_type=None, direction=None, format='json', route=None):
MBTA.__init__(
self, query_type=query_type, direction=direction, format=format, route=route)
class Bus(MBTA):
ROUTES = {"SL1": "741",
"SL2": "742",
"SL4": "751",
"SL5": "749",
"CT1": "701",
"CT2": "747",
"CT3": "708",
"1": "1",
"4": "4",
"5": "5",
"7": "7",
"8": "8",
"9": "9",
"10": "10",
"11": "11",
"14": "14",
"15": "15",
"16": "16",
"17": "17",
"18": "18",
"19": "19",
"21": "21",
"22": "22",
"23": "23",
"24": "24",
"24/27": "2427",
"26": "26",
"27": "27",
"28": "28",
"29": "29",
"30": "30",
"31": "31",
"32": "32",
"32/33": "3233",
"33": "33",
"34": "34",
"34E": "34E",
"35": "35",
"36": "36",
"37": "37",
"37/38": "3738",
"38": "38",
"39": "39",
"40": "40",
"40/50": "4050",
"41": "41",
"42": "42",
"43": "43",
"44": "44",
"45": "45",
"47": "47",
"50": "50",
"51": "51",
"52": "52",
"55": "55",
"57": "57",
"57A": "57A",
"59": "59",
"60": "60",
"62": "62",
"62/76": "627",
"64": "64",
"65": "65",
"66": "66",
"67": "67",
"68": "68",
"69": "69",
"70": "70",
"70A": "70A",
"71": "71",
"72": "72",
"72/75": "725",
"73": "73",
"74": "74",
"75": "75",
"76": "76",
"77": "77",
"78": "78",
"79": "79",
"80": "80",
"83": "83",
"84": "84",
"85": "85",
"86": "86",
"87": "87",
"88": "88",
"89": "89",
"89/93": "8993",
"90": "90",
"91": "91",
"92": "92",
"93": "93",
"94": "94",
"95": "95",
"96": "96",
"97": "97",
"99": "99",
"100": "100",
"101": "101",
"104": "104",
"105": "105",
"106": "106",
"108": "108",
"109": "109",
"110": "110",
"111": "111",
"112": "112",
"114": "114",
"116": "116",
"116/117": "116117",
"117": "117",
"119": "119",
"120": "120",
"121": "121",
"131": "131",
"132": "132",
"134": "134",
"136": "136",
"137": "137",
"170": "170",
"171": "171",
"195": "195",
"201": "201",
"202": "202",
"210": "210",
"211": "211",
"212": "212",
"214": "214",
"214/216": "214216",
"215": "215",
"216": "216",
"217": "217",
"220": "220",
"221": "221",
"222": "222",
"225": "225",
"230": "230",
"236": "236",
"238": "238",
"240": "240",
"245": "245",
"325": "325",
"326": "326",
"350": "350",
"351": "351",
"352": "352",
"354": "354",
"411": "411",
"424": "424",
"426": "426",
"428": "428",
"429": "429",
"430": "430",
"431": "431",
"434": "434",
"435": "435",
"436": "436",
"439": "439",
"441": "441",
"441/442": "441442",
"442": "442",
"448": "448",
"449": "449",
"450": "450",
"451": "451",
"455": "455",
"456": "456",
"459": "459",
"465": "465",
"501": "501",
"502": "502",
"503": "503",
"504": "504",
"505": "505",
"553": "553",
"554": "554",
"556": "556",
"558": "558",
"608": "608",
"710": "710",
"712": "712",
"713": "713",
"714": "714",
"716": "716",
"9701": "9701",
"9702": "9702",
"9703": "9703",
"Shuttle": "Logan-22",
"Shuttle": "Logan-33",
"Shuttle": "Logan-55",
"Shuttle": "Logan-44",
"Shuttle": "Logan-66", }
DIRECTIONS = ['outbound', 'inbound']
def __init__(self, query_type=None, direction=None, format='json', route=None):
MBTA.__init__(
self, query_type=query_type, direction=direction, format=format, route=route)
``` |
{
"source": "johnsliao/Python-Algorithms",
"score": 4
} |
#### File: Python-Algorithms/Algorithms/InsertionSort.py
```python
unsorted_list = [5,1,4,2,3]
def insertionSort(list):
# Go through each element in the list, index x
# keep index i = x
# WHILE list[i] < list[i-1] and i>0
# swap
list_length = len(list)
for x in range(list_length):
i = x
while i > 0 and list[i] < list[i-1]:
temp = list[i-1]
list[i-1] = list[i]
list[i] = temp
i -= 1
return list
print insertionSort(unsorted_list)
```
#### File: Python-Algorithms/Algorithms/MergeSort.py
```python
list = [5,1,4,2,3]
def topDownSplitMerge(A, iBegin, iEnd, B):
if iEnd - iBegin < 2:
return
iMiddle = (iEnd + iBegin) / 2
topDownSplitMerge(A, iBegin, iMiddle, B)
topDownSplitMerge(A, iMiddle, iEnd, B)
topDownMerge(A, iBegin, iMiddle, iEnd, B)
copyArray(B, iBegin, iEnd)
def topDownMerge(A, iBegin, iMiddle, iEnd, B):
i, j = iBegin, iMiddle
for k in range(iBegin, iEnd):
print A[i], A[j], A[k]
print i,j,k
if i < iMiddle and (j >= iEnd or A[i] <= A[j]):
B[k] = A[i]
i += 1
else:
B[k] = A[j]
j += 1
def copyArray(B, iBegin, iEnd):
for k in range(iBegin, iEnd):
list[k] = B[k]
topDownSplitMerge(list, 0, len(list), list)
print list
```
#### File: Python-Algorithms/Algorithms/SelectionSort.py
```python
unsorted_list = [5,1,4,2,3]
def selectionSort(list):
# Loop through each value in the list (index x)
# Keep track of current min index, set to x by default
# Loop through each value starting from x+1
# IF list[y] < list[min]
# min = y
# IF min != x (new min found)
# swap
list_length = len(list)
for x in range(list_length-1):
min = x
for y in range(x+1, list_length):
if list[y] < list[min]:
min = y
if min != x:
temp = list[x]
list[x] = list[min]
list[min] = temp
return list
print selectionSort(unsorted_list)
```
#### File: Python-Algorithms/Data Structures/MinHeap.py
```python
class MinHeap():
def __init__(self):
self.data = []
self.heapSize = 0
def insert(self, data):
self.data.append(data)
self.heapSize += 1
self.siftUp(self.heapSize-1)
def deleteMin(self):
if self.heapSize is 0:
return
self.data[0] = self.data[self.heapSize-1]
self.heapSize -= 1
self.siftDown(0)
def siftDown(self, index):
leftChild = self.getLeftChild(index)
rightChild = self.getRightChild(index)
if leftChild >= self.heapSize or rightChild >= self.heapSize:
return
if self.data[rightChild] < self.data[leftChild]:
minIndex = rightChild
else:
minIndex = leftChild
if self.data[index] > self.data[minIndex]:
self.data[index], self.data[minIndex] = self.data[minIndex], self.data[index]
self.siftDown(minIndex)
def siftUp(self, index):
if index is 0:
return
parentIndex = self.getParent(index)
if self.data[index] < self.data[parentIndex]:
self.data[index], self.data[parentIndex] = self.data[parentIndex], self.data[index]
self.siftUp(parentIndex)
def getLeftChild(self, index):
return index * 2 + 1
def getRightChild(self, index):
return index * 2 + 2
def getParent(self, index):
return (index-1)/2
def printHeap(self):
for x in range(self.heapSize):
print self.data[x]
minHeap = MinHeap()
minHeap.insert(10)
minHeap.insert(2)
minHeap.insert(3)
minHeap.insert(8)
minHeap.insert(12)
minHeap.insert(25)
minHeap.deleteMin()
minHeap.printHeap()
```
#### File: Python-Algorithms/Data Structures/Stack.py
```python
class Node():
def __init__(self, data=None):
self.data = data
self.next = None
class Stack():
def __init__(self):
self.head = None
def push(self, data):
new_node = Node(data)
if self.head is None:
self.head = new_node
return
new_node.next = self.head
self.head = new_node
def pop(self):
if self.head is None:
raise RuntimeError('Stack is empty')
temp_node = self.head
self.head = self.head.next
del temp_node
def printStack(self):
temp_node = self.head
while temp_node is not None:
print temp_node.data
temp_node = temp_node.next
stack = Stack()
stack.push(1)
stack.push(2)
stack.push(3)
stack.printStack()
stack.pop()
stack.printStack()
``` |
{
"source": "johnsmith2077/kevlar",
"score": 3
} |
#### File: kevlar/kevlar/augment.py
```python
import kevlar
def augment(augseqstream, nakedseqstream, upint=10000):
"""
Augment an unannotated stream of sequences.
- `augseqstream`: a stream of sequences annotated with k-mers of interest
- `nakedseqstream`: a stream of unannotated sequences, to be augmented with
k-mers of interest from `augseqstream`
"""
ksize = None
ikmers = dict()
for n, record in enumerate(augseqstream):
if n > 0 and n % upint == 0:
kevlar.plog('[kevlar::augment] processed', n, 'input reads')
for ikmer in record.annotations:
seq = record.ikmerseq(ikmer)
ikmers[seq] = ikmer.abund
ikmers[kevlar.revcom(seq)] = ikmer.abund
ksize = ikmer.ksize
for record in nakedseqstream:
qual = None
if hasattr(record, 'quality') and record.quality is not None:
qual = record.quality
newrecord = kevlar.sequence.Record(
name=record.name, sequence=record.sequence, quality=qual,
)
numkmers = len(record.sequence) - ksize + 1
for offset in range(numkmers):
kmer = record.sequence[offset:offset+ksize]
if kmer in ikmers:
abund = ikmers[kmer]
newrecord.annotate(kmer, offset, abund)
yield newrecord
def main(args):
augseqs = kevlar.parse_augmented_fastx(kevlar.open(args.augseqs, 'r'))
nakedseqs = kevlar.parse_augmented_fastx(kevlar.open(args.seqs, 'r'))
outstream = kevlar.open(args.out, 'w')
for record in augment(augseqs, nakedseqs):
kevlar.print_augmented_fastx(record, outstream)
```
#### File: kevlar/cli/assemble.py
```python
import khmer
def subparser(subparsers):
"""Define the `kevlar assemble` command-line interface."""
desc = 'Assemble reads into contigs representing putative variants'
subparser = subparsers.add_parser('assemble', description=desc)
subparser.add_argument('-p', '--part-id', type=str, metavar='ID',
help='only assemble partition "ID" in the input')
subparser.add_argument('--max-reads', type=int, metavar='N', default=10000,
help='do not attempt to assemble any partitions '
'with more than N reads (default: 10000)')
subparser.add_argument('-o', '--out', metavar='FILE',
help='output file; default is terminal (stdout)')
subparser.add_argument('augfastq', help='annotated reads in augmented '
'Fastq/Fasta format')
```
#### File: kevlar/cli/augment.py
```python
def subparser(subparsers):
"""Define the `kevlar augment` command-line interface."""
desc = """\
Internally, kevlar annotates sequences with "interesting k-mers" and uses
"augmented" Fastq and Fasta formats. Processing sequences with third-part
tools usually requires discarding these annotations. This command is used
to augment/reaugment a set of sequences using annotations from an already
augmented sequence file.
"""
subparser = subparsers.add_parser('augment', description=desc)
subparser.add_argument('-o', '--out', metavar='FILE',
help='output file; default is terminal (stdout)')
subparser.add_argument('augseqs', help='augmented sequence file')
subparser.add_argument('seqs', help='sequences to annotate')
```
#### File: kevlar/cli/localize.py
```python
import re
def subparser(subparsers):
"""Define the `kevlar localize` command-line interface."""
desc = """\
For each partition, compute the reference target sequence to use for
variant calling. NOTE: this command relies on the `bwa` program being in
the PATH environmental variable.
"""
subparser = subparsers.add_parser('localize', description=desc)
subparser.add_argument('-d', '--delta', type=int, metavar='Δ',
default=50, help='retrieve the genomic interval '
'from the reference by extending beyond the span '
'of all k-mer starting positions by Δ bp')
subparser.add_argument('-p', '--part-id', type=str, metavar='ID',
help='only localize partition "ID" in the input')
subparser.add_argument('-o', '--out', metavar='FILE', default='-',
help='output file; default is terminal (stdout)')
subparser.add_argument('-z', '--seed-size', type=int, metavar='Z',
default=51, help='seed size; default is 51')
subparser.add_argument('-x', '--max-diff', type=int, metavar='X',
default=None, help='split and report multiple '
'reference targets if the distance between two '
'seed matches is > X; by default, X is set '
'dynamically for each partition and is equal to 3 '
'times the length of the longest contig in the '
'partition; each resulting bin specifies a '
'reference target sequence to which assembled '
'contigs will subsequently be aligned')
subparser.add_argument('--include', metavar='REGEX', type=str,
help='discard alignments to any chromosomes whose '
'sequence IDs do not match the given pattern')
subparser.add_argument('--exclude', metavar='REGEX', type=str,
help='discard alignments to any chromosomes whose '
'sequence IDs match the given pattern')
subparser.add_argument('refr', help='BWA indexed reference genome')
subparser.add_argument('contigs', nargs='+', help='assembled reads in '
'augmented Fasta format')
```
#### File: kevlar/cli/partition.py
```python
def subparser(subparsers):
"""Define the `kevlar novel` command-line interface."""
desc = """\
Construct a graph to group reads by shared interesting k-mers. Nodes in the
graph represent reads, and edges between a pair of nodes indicate that the
two corresponding reads have one or more interesting k-mers in common.
Connected components in the undirected graph correspond to distinct
variants (or variant-related breakpoints).
"""
subparser = subparsers.add_parser('partition', description=desc)
subparser.add_argument('-s', '--strict', action='store_true',
help='require perfect identity between overlapping '
'reads for inclusion in the same partition; by '
'default, only a shared interesting k-mer is '
'required')
subparser.add_argument('--min-abund', metavar='X', type=int, default=2,
help='ignore k-mers with abundance lower than X; '
'default is 2')
subparser.add_argument('--max-abund', metavar='Y', type=int, default=200,
help='ignore k-mers with abundance higher than Y; '
'default is 200')
subparser.add_argument('--no-dedup', dest='dedup', action='store_false',
default=True, help='skip step to remove duplicates')
subparser.add_argument('--gml', metavar='FILE',
help='write read graph to .gml file')
subparser.add_argument('--split', type=str, metavar='OUTPREFIX',
help='write each partition to a separate output '
'file, each with a filename like '
'"OUTPREFIX.cc#.augfastq.gz"')
subparser.add_argument('-o', '--out', metavar='FILE',
help='output file; default is terminal (stdout)')
subparser.add_argument('infile', help='input reads in augmented Fast[q|a] '
'format')
```
#### File: kevlar/cli/varfilter.py
```python
def subparser(subparsers):
subparser = subparsers.add_parser(
'varfilter', description='Filter out variants falling in the genomic '
'regions specified by the BED file(s). This can be used to exclude '
'putative variant calls corresponding to common variants, segmental '
'duplications, or other problematic loci.'
)
subparser.add_argument(
'-o', '--out', metavar='FILE', help='file to which filtered variants '
'will be written; default is terminal (standard output)'
)
subparser.add_argument(
'filt', help='BED file containing regions to filter out'
)
subparser.add_argument(
'vcf', nargs='+', help='VCF file(s) with calls to filter'
)
```
#### File: kevlar/kevlar/dist.py
```python
from collections import defaultdict
import json
import math
import threading
import kevlar
import khmer
import numpy
import pandas
class KevlarZeroAbundanceDistError(ValueError):
pass
def count_first_pass(infiles, counts, mask, nthreads=1):
message = 'Processing input with {:d} threads'.format(nthreads)
kevlar.plog('[kevlar::dist]', message)
for filename in infiles:
kevlar.plog(' -', filename)
parser = khmer.ReadParser(filename)
threads = list()
for _ in range(nthreads):
thread = threading.Thread(
target=counts.consume_seqfile_with_mask,
args=(parser, mask,),
kwargs={'threshold': 1, 'consume_masked': True},
)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
kevlar.plog('[kevlar::dist] Done processing input!')
def count_second_pass(infiles, counts, nthreads=1):
kevlar.plog('[kevlar::dist] Second pass over the data')
tracking = khmer.Nodetable(counts.ksize(), 1, 1, primes=counts.hashsizes())
abund_lists = list()
def __do_abund_dist(parser):
abund = counts.abundance_distribution(parser, tracking)
abund_lists.append(abund)
for filename in infiles:
kevlar.plog(' -', filename)
parser = khmer.ReadParser(filename)
threads = list()
for _ in range(nthreads):
thread = threading.Thread(
target=__do_abund_dist,
args=(parser,)
)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
assert len(abund_lists) == len(infiles) * nthreads
abundance = defaultdict(int)
for abund in abund_lists:
for i, count in enumerate(abund):
if i > 0 and count > 0:
abundance[i] += count
kevlar.plog('[kevlar::dist] Done second pass over input!')
return abundance
def weighted_mean_std_dev(values, weights):
mu = numpy.average(values, weights=weights)
sigma = math.sqrt(numpy.average((values-mu)**2, weights=weights))
return mu, sigma
def calc_mu_sigma(abundance):
total = sum(abundance.values())
if total == 0:
message = 'all k-mer abundances are 0, please check input files'
raise KevlarZeroAbundanceDistError(message)
mu, sigma = weighted_mean_std_dev(
list(abundance.keys()),
list(abundance.values()),
)
return mu, sigma
def compute_dist(abundance):
total = sum(abundance.values())
fields = ['Abundance', 'Count', 'CumulativeCount', 'CumulativeFraction']
data = pandas.DataFrame(columns=fields)
cuml = 0
for abund, count in sorted(abundance.items()):
assert count > 0, (abund, count)
cuml += count
frac = cuml / total
row = {
'Abundance': abund,
'Count': count,
'CumulativeCount': cuml,
'CumulativeFraction': frac,
}
data = data.append(row, ignore_index=True)
return data
def dist(infiles, mask, ksize=31, memory=1e6, threads=1):
counts = khmer.Counttable(ksize, memory / 4, 4)
count_first_pass(infiles, counts, mask, nthreads=threads)
abundance = count_second_pass(infiles, counts, nthreads=threads)
mu, sigma = calc_mu_sigma(abundance)
data = compute_dist(abundance)
return mu, sigma, data
def main(args):
mask = khmer.Nodetable.load(args.mask)
mu, sigma, data = dist(
args.infiles, mask, ksize=args.ksize, memory=args.memory,
threads=args.threads,
)
out = {'mu': mu, 'sigma': sigma}
print(json.dumps(out))
if args.tsv:
data.to_csv(args.tsv, sep='\t', index=False)
if args.plot:
import os
try: # pragma: no cover
import matplotlib
if os.environ.get('DISPLAY', '') == '':
matplotlib.use('Agg')
from matplotlib import pyplot as plt
except RuntimeError as rerr: # pragma: no cover
if 'Python is not installed as a framework' not in str(rerr):
raise rerr
message = 'There was a problem loading matplotlib. '
message += 'Try https://stackoverflow.com/q/21784641/459780 '
message += 'for troubleshooting ideas.'
raise RuntimeError(message)
matplotlib.rcParams["figure.figsize"] = [12, 6]
matplotlib.rcParams['axes.labelsize'] = 16
matplotlib.rcParams['xtick.labelsize'] = 16
plt.plot(data['Abundance'], data['Count'], color='blue')
plt.axvline(x=mu, color='blue', linestyle='--')
plt.axvline(x=mu - sigma, color='red', linestyle=':')
plt.axvline(x=mu + sigma, color='red', linestyle=':')
plt.xlim(args.plot_xlim)
plt.xlabel('K-mer abundance')
plt.ylabel('Frequency')
plt.savefig(args.plot, dpi=300)
```
#### File: kevlar/kevlar/evaluate.py
```python
from collections import defaultdict
import kevlar
from kevlar.intervalforest import IntervalForest
import sys
def populate_index_from_bed(instream):
index = IntervalForest()
for line in instream:
if line.startswith('#') or line.strip() == '':
continue
values = line.strip().split()
chrom = values[0]
start, end = [int(coord) for coord in values[1:3]]
strrepr = '{:s}:{:d}-{:d}'.format(chrom, start, end)
index.insert(chrom, start, end, strrepr)
return index
def compact(variants, index, delta=10):
"""Compact variants by call class
Variant calls labeled with the same `CALLCLASS` attribute were predicted
from the same partition (set of reads). While more than one of these may
indeed be true variants with respect to the reference, we expect only one
*de novo* variant per partition.
This function assumes the variant calls are sorted by likelihood score (the
`LIKESCORE` attribute in kevlar). Any call that does not pass filters is
ignored. Then, for each CALLCLASS with multiple passing calls, all calls
are discarded except for the one matching the true variant. If the
CALLCLASS has no calls matching a true variant, all of the calls are
discarded except for the highest scoring call.
"""
variants_by_class = defaultdict(list)
calls = list()
for varcall in variants:
if varcall.filterstr != 'PASS':
continue
callclass = varcall.attribute('CALLCLASS')
if callclass is None:
calls.append(varcall)
else:
variants_by_class[callclass].append(varcall)
for callclass, calllist in variants_by_class.items():
nmatches = 0
match = None
for varcall in calllist:
hits = index.query(varcall.seqid, varcall.position, delta=delta)
if hits == set():
continue
else:
nmatches += 1
if match is None:
match = varcall
if nmatches == 0:
calllist[0].annotate('EVAL', 'False')
calls.append(calllist[0])
else:
assert nmatches > 0, nmatches
if nmatches > 1:
print('WARNING: found', nmatches, 'matches for CALLCLASS',
callclass, file=sys.stderr)
match.annotate('EVAL', 'True')
calls.append(match)
calls.sort(key=lambda c: float(c.attribute('LIKESCORE')), reverse=True)
calls = [c for c in calls if float(c.attribute('LIKESCORE')) > 0.0]
return calls
```
#### File: kevlar/kevlar/partition.py
```python
import networkx
import khmer
import kevlar
def partition(readstream, strict=False, minabund=None, maxabund=None,
dedup=True, gmlfile=None):
timer = kevlar.Timer()
timer.start()
timer.start('loadreads')
kevlar.plog('[kevlar::partition] Loading reads')
graph = kevlar.ReadGraph()
graph.load(readstream, minabund=minabund, maxabund=maxabund)
elapsed = timer.stop('loadreads')
message = 'Reads loaded in {:.2f} sec'.format(elapsed)
kevlar.plog('[kevlar::partition]', message)
timer.start('buildgraph')
mode = 'strict' if strict else 'relaxed'
message = 'Building read graph in {:s} mode'.format(mode)
kevlar.plog('[kevlar::partition]', message)
graph.populate_edges(strict=strict)
elapsed = timer.stop('buildgraph')
message = 'Graph built in {:.2f} sec'.format(elapsed)
kevlar.plog('[kevlar::partition]', message)
if gmlfile: # pragma: no cover
kevlar.to_gml(graph, gmlfile, logstream)
timer.start('partition')
kevlar.plog('[kevlar::partition] Partition readgraph')
part_iter = graph.partitions(dedup, minabund, maxabund, abundfilt=True)
for n, part in enumerate(part_iter, 1):
reads = [graph.get_record(readname) for readname in list(part)]
for read in reads:
read.name += ' kvcc={:d}'.format(n)
yield n, reads
elapsed = timer.stop('partition')
message = 'Partitioning done in {:.2f} sec'.format(elapsed)
kevlar.plog('[kevlar::partition]', message)
total = timer.stop()
message = 'Total time: {:.2f} seconds'.format(total)
kevlar.plog('[kevlar::partition]', message)
def main(args):
if args.split:
kevlar.mkdirp(args.split, trim=True)
outstream = None if args.split else kevlar.open(args.out, 'w')
readstream = kevlar.parse_augmented_fastx(kevlar.open(args.infile, 'r'))
partitioner = partition(
readstream, strict=args.strict, minabund=args.min_abund,
maxabund=args.max_abund, dedup=args.dedup, gmlfile=args.gml,
)
numreads = 0
for partnum, part in partitioner:
numreads += len(part)
if args.split:
ofname = '{:s}.cc{:d}.augfastq.gz'.format(args.split, partnum)
with kevlar.open(ofname, 'w') as outfile:
for read in part:
kevlar.print_augmented_fastx(read, outfile)
else:
for read in part:
kevlar.print_augmented_fastx(read, outstream)
message = 'grouped {:d} reads'.format(numreads)
message += ' into {:d} connected components'.format(partnum)
kevlar.plog('[kevlar::partition]', message)
```
#### File: kevlar/kevlar/readgraph.py
```python
from collections import defaultdict
import itertools
import networkx
import kevlar
class ReadGraph(networkx.Graph):
def __init__(self, data=None, **attr):
"""
Constructor
In addition to the base class, we add a dictionary to store sets of
reads containing each "interesting" k-mer. These k-mers are used to
build out the graph edges.
Also, we store the names of the input reads so that reads with no
connections to other reads can be distinguished from assembled contigs.
"""
self.ikmers = defaultdict(set)
self.readnames = set()
super(ReadGraph, self).__init__(data, **attr)
def full_cc(self, cc):
sg = self.subgraph(cc).copy()
sg = ReadGraph(data=sg)
sg.ikmers = self.ikmers
sg.readnames = self.readnames
return sg
def get_record(self, recordname):
return self.nodes[recordname]['record']
def load(self, readstream, minabund=None, maxabund=None, dedup=False):
"""
Load reads and interesting k-mers into a graph structure.
A graph node is created for each read, and a set of reads containing
each interesting k-mer is stored. If abundance thresholds are enforced,
do a second in-memory pass over the k-mers to discard any that don't
satisfy the threshold criteria.
Set `dedup=True` to deduplicate read sequences for handling PCR
duplicates. This doesn't do a proper check (i.e. check both pairs
against genome), but simply makes sure that only one copy of each read
sequence is loaded. This is implemented with a very naive and resource
intensive approach, so this mode should only be used on small (e.g.
already partitioned) graphs.
"""
temp_ikmers = defaultdict(set)
unique_reads = set()
for record in readstream:
if dedup:
minread = kevlar.revcommin(record.sequence)
if minread in unique_reads:
continue
unique_reads.add(minread)
self.add_node(record.name, record=record)
self.readnames.add(record.name)
for kmer in record.annotations:
kmerseq = kevlar.revcommin(record.ikmerseq(kmer))
temp_ikmers[kmerseq].add(record.name)
if minabund is None and maxabund is None:
self.ikmers = temp_ikmers
else:
for kmer in temp_ikmers:
readset = temp_ikmers[kmer]
abund = len(readset)
minfail = minabund and abund < minabund
maxfail = maxabund and abund > maxabund
if not minfail and not maxfail:
self.ikmers[kmer] = readset
def check_edge(self, pair, minkmer):
"""
Add edge between 2 nodes in the "shared interesting k-mer" read graph.
If the edge already exists, make sure that the existing edge matches
the edge that would have been added.
"""
tailname, headname = pair.tail.name, pair.head.name
if tailname in self and headname in self[tailname]:
assert self[tailname][headname]['offset'] == pair.offset
if self[tailname][headname]['tail'] == tailname:
assert self[tailname][headname]['overlap'] == pair.overlap
self[tailname][headname]['ikmers'].add(minkmer)
else:
self.add_edge(tailname, headname, offset=pair.offset,
overlap=pair.overlap, ikmers=set([minkmer]),
orient=pair.sameorient, tail=tailname)
def populate_edges(self, strict=False):
"""
Instantiate edges between nodes that share an interesting k-mer.
Setting `strict=True` will result in edges between reads only when they
have a perfect match in their overlap.
"""
for kmer in self.ikmers:
readset = self.ikmers[kmer]
for read1, read2 in itertools.combinations(readset, 2):
if strict:
if read1 in self and read2 in self[read1]:
continue
record1 = self.get_record(read1)
record2 = self.get_record(read2)
pair = kevlar.ReadPair(record1, record2, kmer)
if pair.incompatible:
# Shared k-mer but bad overlap
continue
self.check_edge(pair, kmer)
else:
self.add_edge(read1, read2)
def partitions(self, dedup=True, minabund=None, maxabund=None,
abundfilt=False):
"""
Retrieve all partitions (connected components) from this graph.
The `minabund` and `maxabund` parameters are used at graph construction
time to filter out k-mers whose abundance is too large or small. If
`abundfilt` is true, the minimum bundance is also applied to the number
of sequences (reads or contigs) in the partition.
"""
for cc in sorted(networkx.connected_components(self), reverse=True,
# Sort first by number of reads, then by read names
key=lambda c: (len(c), sorted(c))):
if len(cc) == 1 and list(cc)[0] in self.readnames:
continue # Skip unassembled input reads
if dedup:
partition = ReadGraph()
readstream = [self.get_record(readid) for readid in cc]
partition.load(readstream, minabund, maxabund, dedup=True)
assert partition.number_of_nodes() > 0
if abundfilt:
if minabund and partition.number_of_nodes() < minabund:
continue # Skip partitions that are too small
# # Ill-advised strategy for pre-emptively discarding
# # unassemblable partitions. It turns out that number of edges
# # in the read graph (even in strict mode) doesn't really
# # distinguish between what assembles and what doesn't.
# if partition.number_of_nodes() < 10:
# partition.populate_edges(strict=True)
# nedges = partition.number_of_edges()
# if minabund and nedges < minabund:
# continue
yield partition
else:
yield cc
```
#### File: kevlar/kevlar/simlike.py
```python
from collections import defaultdict
import kevlar
from kevlar.vcf import Variant
from math import log, isclose
import scipy.stats
from scipy.special import comb as choose
class KevlarSampleLabelingError(ValueError):
pass
def discard_nonunique_kmers(altseq, case, controls, refr):
case_counts = case.get_kmer_counts(altseq)
alt_counts_refr = refr.get_kmer_counts(altseq)
case_counts_valid = [
c for c, r in zip(case_counts, alt_counts_refr) if r == 0
]
ctrl_counts_valid = list()
for control in controls:
ctrl_counts = control.get_kmer_counts(altseq)
valid_counts = [
c for c, r in zip(ctrl_counts, alt_counts_refr) if r == 0
]
ctrl_counts_valid.append(valid_counts)
return case_counts_valid, ctrl_counts_valid, alt_counts_refr
def discard_outlier_abunds(case_counts, ctrl_counts):
meanabund = sum(case_counts) / len(case_counts)
case_counts_valid = [
a for a in case_counts if abs(a - meanabund) < 20
]
ctrl_counts_valid = list()
for control in ctrl_counts:
meanabund = sum(control) / len(control)
alist = [a for a in control if abs(a - meanabund) < 20]
ctrl_counts_valid.append(alist)
return case_counts_valid, ctrl_counts_valid
def spanning_kmer_abundances(altseq, refrseq, case, controls, refr,
dropoutliers=False):
"""Aggregate the abundances of the k-mers spanning the variant.
- altseq: sequence spanning the variant (alternate allele)
- refrseq: sequence spanning the reference allele
- case: table of k-mer counts from proband reads
- controls: list of k-mer count tables, 1 for each parent/control
- refr: table of k-mer counts from reference genome assembly
This function collects the k-mer counts of each k-mer spanning the variant
alternate allele. Alt allele k-mers not unique to the variant (abundance >
0 in the reference genome) are discarded. The counts of the remaining
k-mers are retained for subsequent likelihood score calculations, in the
following format.
abundances = [
[15, 14, 13, 16, 14, 15, 14, 14], # k-mer abundances from case/proband
[0, 0, 1, 0, 2, 10, 0, 0], # k-mer abundances from parent/control 1
[0, 1, 1, 0, 1, 0, 2, 0], # k-mer abundances from parent/control 2
]
refr_abunds = [1, 1, 2, 1, 4, 2, 1, 1] # genomic freq of refr allele kmers
For SNVs and MNVs, each alternate allele k-mer has a corresponding
reference allele k-mer, and the counts of these in the reference genome are
retained to enable a dynamically scaled error model. For indels there is no
such correspondence, in which case `refr_abunds` is full of `None` values.
"""
orig_nkmers = len(altseq) - case.ksize() + 1
case_counts, ctrl_counts, alt_counts_refr = discard_nonunique_kmers(
altseq, case, controls, refr
)
if dropoutliers:
case_counts, ctrl_counts = discard_outlier_abunds(
case_counts, ctrl_counts
)
ndropped = orig_nkmers - len(case_counts)
abundances = [case_counts] + ctrl_counts
if len(altseq) == len(refrseq): # SNV or MNV
refr_counts = refr.get_kmer_counts(refrseq)
refr_abunds = [
c for c, r in zip(refr_counts, alt_counts_refr) if r == 0
]
else: # INDEL
refr_abunds = [None] * len(case_counts)
return abundances, refr_abunds, ndropped
def abund_log_prob(genotype, abundance, refrabund=None, mean=30.0, sd=8.0,
error=0.001):
"""Calculate probability of k-mer abundance conditioned on genotype.
The `genotype` variable represents the number of assumed allele copies and
is one of {0, 1, 2} (corresponding to genotypes {0/0, 0/1, and 1/1}). The
`mean` and `sd` variables describe a normal distribution of observed
abundances of k-mers with copy number 2. The `error` parameter is the
sequencing error rate.
For SNVs, there is a 1-to-1 correspondence of alternate allele k-mers to
reference allele k-mers. We can therefore check the frequency of the
reference allele in the reference genome and scale up the error rate if it
is repetitive. There is no such mapping of alt allele k-mers to refr allele
k-mers for indels, so we use a lower fixed error rate.
"""
if genotype == 0:
if not refrabund: # INDEL mode
refrabund = 1
error *= 0.01
scaledmean = mean * refrabund
if abundance > scaledmean:
abundance = scaledmean
nCk = choose(scaledmean, abundance, exact=True)
prob = (
log(nCk)
+ (abundance * log(error))
+ ((scaledmean - abundance) * log(1.0 - error))
)
return prob
elif genotype == 1:
return scipy.stats.norm.logpdf(abundance, mean / 2, sd / 2)
elif genotype == 2:
return scipy.stats.norm.logpdf(abundance, mean, sd)
def likelihood_denovo(abunds, refrabunds, mean=30.0, sd=8.0, error=0.001):
assert len(abunds[1]) == len(refrabunds), (len(abunds[1]), len(refrabunds))
assert len(abunds[2]) == len(refrabunds), (len(abunds[2]), len(refrabunds))
logsum = 0.0
# Case
for abund in abunds[0]:
logsum += abund_log_prob(1, abund, mean=mean, sd=sd)
# Controls
for altabunds in abunds[1:]:
for alt, refr in zip(altabunds, refrabunds):
logsum += abund_log_prob(0, alt, refrabund=refr, mean=mean,
error=error)
return logsum
def likelihood_false(abunds, refrabunds, mean=30.0, error=0.001):
assert len(abunds[1]) == len(refrabunds)
assert len(abunds[2]) == len(refrabunds)
logsum = 0.0
for altabunds in abunds:
for alt, refr in zip(altabunds, refrabunds):
logsum += abund_log_prob(0, alt, refrabund=refr, mean=mean,
error=error)
return logsum
def likelihood_inherited(abunds, mean=30.0, sd=8.0, error=0.001):
"""Compute the likelihood that a variant is inherited.
There are 15 valid inheritance scenarios, 11 of which (shown below) result
in the proband carrying at least one copy of the alternate allele. Select
the one with the highest likelihood.
The other likelihood calculations are implemented to handle an arbitrary
number of controls, but this can only handle trios.
"""
scenarios = [
(1, 0, 1), (1, 0, 2),
(1, 1, 0), (1, 1, 1), (1, 1, 2),
(1, 2, 0), (1, 2, 1),
(2, 1, 1), (2, 1, 2),
(2, 2, 1), (2, 2, 2),
]
logsum = 0.0
abundances = zip(abunds[0], abunds[1], abunds[2])
for a_c, a_m, a_f in abundances:
maxval = None
for g_c, g_m, g_f in scenarios:
p_c = abund_log_prob(g_c, a_c, mean=mean, sd=sd, error=error)
p_m = abund_log_prob(g_m, a_m, mean=mean, sd=sd, error=error)
p_f = abund_log_prob(g_f, a_f, mean=mean, sd=sd, error=error)
testsum = p_c + p_m + p_f + log(1.0 / 15.0)
if maxval is None or testsum > maxval:
maxval = testsum
logsum += maxval
return log(15.0 / 11.0) + logsum # 1 / (11/15)
def joinlist(thelist):
if len(thelist) == 0:
return '.'
else:
return ','.join([str(v) for v in thelist])
def calc_likescore(call, altabund, refrabund, mu, sigma, epsilon):
lldn = likelihood_denovo(altabund, refrabund, mean=mu, sd=sigma,
error=epsilon)
llfp = likelihood_false(altabund, refrabund, mean=mu, error=epsilon)
llih = likelihood_inherited(altabund, mean=mu, sd=sigma, error=epsilon)
likescore = lldn - max(llfp, llih)
call.annotate('LLDN', lldn)
call.annotate('LLFP', llfp)
call.annotate('LLIH', llih)
call.annotate('LIKESCORE', likescore)
def default_sample_labels(nsamples):
samples = list()
for i in range(nsamples):
samples.append('Control{:d}'.format(i))
samples[0] = 'Case'
return samples
def annotate_abundances(call, abundances, refrabund, samplelabels):
if len(refrabund) > 0 and None not in refrabund:
call.annotate('REFRCOPYNUM', ','.join(map(str, refrabund)))
for sample, abundlist in zip(samplelabels, abundances):
abundstr = joinlist(abundlist)
call.format(sample, 'ALTABUND', abundstr)
def process_partition(partitionid, calls, ambigthresh=10):
passcalls = [c for c in calls if c.filterstr == 'PASS']
if len(passcalls) == 0:
return
maxscore = max([c.attribute('LIKESCORE') for c in passcalls])
maxcalls = list()
for c in calls:
passed = c.filterstr == 'PASS'
optimal = isclose(c.attribute('LIKESCORE'), maxscore)
if passed and optimal:
maxcalls.append(c)
else:
c.filter(kevlar.vcf.VariantFilter.PartitionScore)
for c in maxcalls:
if ambigthresh and len(maxcalls) > ambigthresh:
c.filter(kevlar.vcf.VariantFilter.AmbiguousCall)
else:
c.annotate('CALLCLASS', partitionid)
def window_check(call, ksize=31):
altspan = call.window
refspan = call.refrwindow
altmissing = altspan is None
refmissing = refspan is None
altshort = altspan and len(altspan) < ksize
refshort = refspan and len(refspan) < ksize
if altmissing or refmissing or altshort or refshort:
if call.filterstr == 'PASS':
message = 'WARNING: stubbornly refusing to compute likelihood:'
kevlar.plog('[kevlar::simlike]', message)
if altmissing:
message = ' missing alt allele spanning window'
kevlar.plog('[kevlar::simlike]', message)
if refmissing:
message = ' missing refr allele spanning window'
kevlar.plog('[kevlar::simlike]', message)
if altshort:
message = ' alt allele spanning window {:s}'.format(altspan)
message += ', shorter than k size {:d}'.format(ksize)
kevlar.plog('[kevlar::simlike]', message)
if refshort:
message = ' ref allele spanning window {:s}'.format(refspan)
message += ', shorter than k size {:d}'.format(ksize)
kevlar.plog('[kevlar::simlike]', message)
return True
return False
def check_hash_spanning_novel_kmers(call, caseabundlist, casemin):
abovethresh = [a for a in caseabundlist if a >= casemin]
if len(abovethresh) == 0:
call.filter(kevlar.vcf.VariantFilter.PassengerVariant)
def check_case_abund_low(call, caseabundlist, casemin, caseabundlow):
if not caseabundlow or caseabundlow <= 0:
return
belowthresh = [a < casemin for a in caseabundlist]
toomanykmers = [True] * caseabundlow
if ''.join(map(str, toomanykmers)) in ''.join(map(str, belowthresh)):
call.filter(kevlar.vcf.VariantFilter.CaseAbundance)
def check_ctrl_abund_high(call, ctrlabundlists, ctrlmax, ctrlabundhigh):
if not ctrlabundhigh or ctrlabundhigh <= 0:
return
for abundlist in ctrlabundlists:
toohigh = [a for a in abundlist if a > ctrlmax]
if len(toohigh) > ctrlabundhigh:
call.filter(kevlar.vcf.VariantFilter.ControlAbundance)
break
def simlike(variants, case, controls, refr, mu=30.0, sigma=8.0, epsilon=0.001,
casemin=6, ctrlmax=1, caseabundlow=5, ctrlabundhigh=4,
samplelabels=None, fastmode=False, minlikescore=0.0,
dropoutliers=False, ambigthresh=10):
calls_by_partition = defaultdict(list)
if samplelabels is None:
samplelabels = default_sample_labels(len(controls) + 1)
progress_indicator = kevlar.ProgressIndicator(
'[kevlar::simlike] scores for {counter} calls computed'
)
for call in variants:
skipvar = fastmode and call.filterstr != 'PASS'
if skipvar or window_check(call, case.ksize()):
call.annotate('LIKESCORE', float('-inf'))
calls_by_partition[call.attribute('PART')].append(call)
continue
altabund, refrabund, ndropped = spanning_kmer_abundances(
call.window, call.refrwindow, case, controls, refr,
dropoutliers=dropoutliers
)
call.annotate('DROPPED', ndropped)
check_hash_spanning_novel_kmers(call, altabund[0], casemin)
check_case_abund_low(call, altabund[0], casemin, caseabundlow)
check_ctrl_abund_high(call, altabund[1:], ctrlmax, ctrlabundhigh)
skipvar = fastmode and call.filterstr != 'PASS'
if skipvar:
call.annotate('LIKESCORE', float('-inf'))
calls_by_partition[call.attribute('PART')].append(call)
continue
calc_likescore(call, altabund, refrabund, mu, sigma, epsilon)
annotate_abundances(call, altabund, refrabund, samplelabels)
calls_by_partition[call.attribute('PART')].append(call)
progress_indicator.update()
allcalls = list()
for partition, calls in calls_by_partition.items():
process_partition(partition, calls, ambigthresh=ambigthresh)
allcalls.extend(calls)
allcalls.sort(key=lambda c: c.attribute('LIKESCORE'), reverse=True)
for call in allcalls:
if call.attribute('LIKESCORE') < minlikescore:
call.filter(kevlar.vcf.VariantFilter.LikelihoodFail)
yield call
def main(args):
nsamples = len(args.controls) + 1
if args.sample_labels:
nlabels = len(args.sample_labels)
if nlabels and nlabels != nsamples:
message = 'provided {:d} labels'.format(nlabels)
message += ' but {:d} samples'.format(nsamples)
raise KevlarSampleLabelingError(message)
else:
args.sample_labels = default_sample_labels(nsamples)
kevlar.plog('[kevlar::simlike] Loading k-mer counts for each sample')
case = kevlar.sketch.load(args.case)
controls = [kevlar.sketch.load(c) for c in args.controls]
refr = kevlar.sketch.load(args.refr)
reader = kevlar.vcf.vcfstream(args.vcf)
outstream = kevlar.open(args.out, 'w')
writer = kevlar.vcf.VCFWriter(outstream, source='kevlar::simlike')
for label in args.sample_labels:
writer.register_sample(label)
writer.write_header()
message = 'Computing likelihood scores for preliminary variant calls'
kevlar.plog('[kevlar::simlike]', message)
calculator = simlike(
reader, case, controls, refr, mu=args.mu, sigma=args.sigma,
epsilon=args.epsilon, casemin=args.case_min, ctrlmax=args.ctrl_max,
caseabundlow=args.case_abund_low, ctrlabundhigh=args.ctrl_abund_high,
samplelabels=args.sample_labels, fastmode=args.fast_mode,
minlikescore=args.min_like_score, dropoutliers=args.drop_outliers,
ambigthresh=args.ambig_thresh,
)
for call in calculator:
writer.write(call)
```
#### File: kevlar/tests/test_cli.py
```python
import kevlar
import pytest
from sys import stderr
def test_kevlar_open():
thefile = kevlar.tests.data_file('wasp-pass.contig.augfasta')
filehandle = kevlar.open(thefile, 'r')
filecontents = filehandle.read()
assert len(filecontents.strip().split('\n')) == 9
with pytest.raises(ValueError, match=r'invalid mode "p"'):
filehandle = kevlar.open(thefile, 'p')
def test_main(capsys):
import kevlar.__main__
contig = kevlar.tests.data_file('wasp-pass.contig.augfasta')
cutout = kevlar.tests.data_file('wasp.gdna.fa')
arglist = ['call', contig, cutout]
kevlar.logstream, logstream = stderr, kevlar.logstream
kevlar.__main__.main(arglist)
kevlar.logstream = logstream
out, err = capsys.readouterr()
assert '\tPASS\t' in out
assert '\tPassengerVariant\t' in out
def test_help(capsys):
with pytest.raises(SystemExit):
kevlar.cli.parser().parse_args(['-h'])
out, err = capsys.readouterr()
assert 'show this help message and exit' in out
def test_version(capsys):
with pytest.raises(SystemExit):
kevlar.cli.parser().parse_args(['-v'])
out, err = capsys.readouterr()
assert kevlar.__version__ in out or kevlar.__version__ in err
@pytest.mark.parametrize('subcommand', [s for s in kevlar.cli.mains])
def test_help_sub(subcommand, capsys):
with pytest.raises(SystemExit):
kevlar.cli.parser().parse_args([subcommand, '-h'])
out, err = capsys.readouterr()
assert subcommand in out
assert 'show this help message and exit' in out
```
#### File: kevlar/tests/test_evaluate.py
```python
import kevlar
from kevlar.tests import data_file
def test_compact():
bedstream = kevlar.open(data_file('compact-test-refr.bed.gz'), 'r')
index = kevlar.evaluate.populate_index_from_bed(bedstream)
vcfstream = kevlar.open(data_file('compact-test-pred.vcf.gz'), 'r')
reader = kevlar.vcf.VCFReader(vcfstream)
compactor = kevlar.evaluate.compact(reader, index, delta=10)
calls = list(compactor)
assert len(calls) == 33
```
#### File: kevlar/tests/test_gentrio.py
```python
from io import StringIO
import os
import pytest
import random
from tempfile import mkdtemp
from shutil import rmtree
import sys
import kevlar
from kevlar import MutableString
from kevlar.tests import data_file
def test_weights_str_to_dict():
from kevlar.gentrio import weights_str_to_dict as ws2d
def same_dict(d1, d2):
if d1.keys() != d2.keys():
return False
for key, d1val in d1.items():
d2val = d2[key]
if abs(d1val - d2val) > 0.0001:
return False
return True
assert same_dict(ws2d('snv=1.0'), {'snv': 1.0})
assert same_dict(ws2d('snv=3.14'), {'snv': 1.0})
assert same_dict(
ws2d('snv=0.8,ins=0.1,del=0.1'),
{'snv': 0.8, 'ins': 0.1, 'del': 0.1}
)
assert same_dict(
ws2d('snv=0.8,ins=0.2,del=0.2'),
{'snv': 0.8 / 1.2, 'ins': 0.2 / 1.2, 'del': 0.2 / 1.2}
)
with pytest.raises(ValueError, match=r'too many values to unpack'):
ws2d('snv=0.8;ins=0.2;del=0.2')
def test_rng():
rng = random.Random(1776)
draws = [rng.randint(1, 100) for _ in range(5)]
assert draws == [80, 75, 14, 46, 21]
@pytest.mark.parametrize('seq,pos,offset,refr,alt,refrwindow,altwindow', [
('AACTAGCCTGCGGTCTGTGTTTCCCGACTTCTGAGTCATGGGGTTTCAATGCCTAT',
14, 2, 'C', 'T', 'CCTGCGGTCTGTGTTTC', 'CCTGCGGTTTGTGTTTC'),
('TTGAGATCGCGACGCTACTCTGAGCTCGGAGGAGCGGCATAAACGCGCCACCACCC',
26, 1, 'C', 'G', 'TCTGAGCTCGGAGGAGC', 'TCTGAGCTGGGAGGAGC'),
('CCTTGGTGCCACGATCCGGCTATGGCGGAAGGGCACACCTAACCGCAACATTTGCC',
2, 2, 'T', 'C', 'CCTTGGTGCCA', 'CCCTGGTGCCA'),
('GGGTCCCAAGAGTCTGATTTCTAGCTTTTTATTTACACCCCGGTAGCAGGATCAGA',
33, 3, 'T', 'G', 'TTTTTATTTACACCCCG', 'TTTTTATTGACACCCCG'),
])
def test_snv(seq, pos, offset, refr, alt, refrwindow, altwindow):
testrefr, testalt, testrw, testaw = kevlar.gentrio.mutate_snv(
seq, pos, offset, ksize=9
)
print('REFR', refr, testrefr, refrwindow, testrw, file=sys.stderr)
print('ALT', alt, testalt, altwindow, testaw, file=sys.stderr)
assert refr == testrefr
assert alt == testalt
assert refrwindow == testrw
assert altwindow == testaw
@pytest.mark.parametrize('seq,pos,length,duplpos,refr,alt,rwindow,awindow', [
('AACTAGCCTGCGGTCTGTGTTTCCCGACTTCTGAGTCATGGGGTTTCAATGCCTAT',
11, 5, 33, 'C', 'CAGTCA', 'CTGCGGTC', 'CTGCAGTCAGGTC'),
('TTGAGATCGCGACGCTACTCTGAGCTCGGAGGAGCGGCATAAACGCGCCACCACCC',
47, 11, 32, 'G', 'GAGCGGCATAAA', 'CGCGCCAC', 'CGCGAGCGGCATAAACCAC'),
('CCTTGGTGCCACGATCCGGCTATGGCGGAAGGGCACACCTAACCGCAACATTTGCC',
52, 3, 39, 'T', 'TTAA', 'CATTTGCC', 'CATTTAATGCC'),
('GGGTCCCAAGAGTCTGATTTCTAGCTTTTTATTTACACCCCGGTAGCAGGATCAGA',
9, 9, 29, 'A', 'ATATTTACAC', 'CCAAGAGT', 'CCAATATTTACACGAGT'),
])
def test_insertion(seq, pos, length, duplpos, refr, alt, rwindow, awindow):
testrefr, testalt, testrw, testaw = kevlar.gentrio.mutate_insertion(
seq, pos, length, duplpos, ksize=5
)
print('REFR', refr, testrefr, rwindow, testrw, file=sys.stderr)
print('ALT', alt, testalt, awindow, testaw, file=sys.stderr)
assert refr == testrefr
assert alt == testalt
assert rwindow == testrw
assert awindow == testaw
def test_insertion_rng():
seq = 'ATGCCTATAGATTCAGTAGTTACCAGAGGCAGTGGTGTTTGCCACGCCATTTCTACGCGA'
rng = random.Random(2018)
refr, alt, refrwindow, altwindow = kevlar.gentrio.mutate_insertion(
seq, position=19, length=5, duplpos=44, rng=rng, ksize=11
)
assert refr == 'G'
assert alt == 'GCCCCA'
assert refrwindow == 'GATTCAGTAGTTACCAGAGG'
assert altwindow == 'GATTCAGTAGCCCCATTACCAGAGG'
@pytest.mark.parametrize('seq,pos,length,refr,alt,rwindow,awindow', [
('AACTAGCCTGCGGTCTGTGTTTCCCGACTTCTGAGTCATGGGGTTTCAATGCCTAT',
5, 9, 'AGCCTGCGGT', 'A', 'ACTAGCCTGCGGTCTGT', 'ACTACTGT'),
('TTGAGATCGCGACGCTACTCTGAGCTCGGAGGAGCGGCATAAACGCGCCACCACCC',
37, 4, 'GCATA', 'G', 'GCGGCATAAACG', 'GCGGAACG'),
('CCTTGGTGCCACGATCCGGCTATGGCGGAAGGGCACACCTAACCGCAACATTTGCC',
14, 7, 'ATCCGGCT', 'A', 'ACGATCCGGCTATGG', 'ACGAATGG'),
('GGGTCCCAAGAGTCTGATTTCTAGCTTTTTATTTACACCCCGGTAGCAGGATCAGA',
49, 5, 'GGATCA', 'G', 'GCAGGATCAGA', 'GCAGGA'),
])
def test_deletion(seq, pos, length, refr, alt, rwindow, awindow):
testrefr, testalt, testrw, testaw = kevlar.gentrio.mutate_deletion(
seq, pos, length, ksize=5
)
print('REFR', refr, testrefr, rwindow, testrw, file=sys.stderr)
print('ALT', alt, testalt, awindow, testaw, file=sys.stderr)
assert refr == testrefr
assert alt == testalt
assert rwindow == testrw
assert awindow == testaw
def test_gen_muts():
seqstream = kevlar.open(data_file('100kbx3.fa.gz'), 'r')
sequences = kevlar.seqio.parse_seq_dict(seqstream)
w = {'snv': 0.7, 'ins': 0.15, 'del': 0.15}
mutator = kevlar.gentrio.generate_mutations(sequences, weights=w, rng=42)
mutations = list(mutator)
refrs = [m._refr for m in mutations]
alts = [m._alt for m in mutations]
print('DEBUG refrs', refrs, file=sys.stderr)
print('DEBUG alts', alts, file=sys.stderr)
testrefrs = [
'ATTACGACAGAGTTTGTAGGTGTACGAGCCCAATCCAACGTCGGCCATCCGAGACTCTTTAAGTACCCG'
'GCCATACACTGTGCGCCGAAAAATCAGCGATCATACCACCGTTTGAAGCTTCACGGCCGAGTGTTCTGG'
'CGATTCGT', 'TATATGAGCTCTCGACGGAATTTACGAGCGCGTATAAGCCTTTTGCAGTTACAACAT'
'T', 'A', 'GAGTTGGGTATAATAACGTAGTCGGGGGAGCAGATGGAGCAGTGCGACCGCCG', 'C',
'G', 'A', 'T', 'G', 'C'
]
testalts = [
'A', 'T', 'C', 'G', 'G', 'C', 'ATGCGCAGAGGATATGTTAGTGACTATTGAAGGTGGAAC'
'TTGCAAGGGAATGGGTTCACCCTTGCGATTTCGGGGCTACTAAGCACATAGGCTAACGGCAGATGGAGT'
'AAGCTACGCCAAAACTAATTAGCGTGCTCGGGGCGTAGGCGGGACCCCGGAAATGATAACCAGGATCAA'
'ACATCCCTTCTTCGACCGAAGGCTGTTGCGCACGTATGACAGCTCTGTGACGCTCTAGATTCAGCTTTG'
'AAGTCGTGACACGTTGCGATACCTTGACCTGGATGAAACTTCGCCGGGACTTCCCTGACAA', 'TTTG'
'TTCCCATGACTTACGCTACACACGAGCCAGCTAGCTGCGAAAACCTAAGAGCCTCCG', 'A', 'CTA'
'GCGAAACACGGAATAACATCAAATGACAGCTATCTCCCAAGATGGTGGGTAGGTTTATAGTAGAGTGGG'
'CGGCTACATTCGTCTCCCCGGCCCGCAGCCCGCGCACTATAGCAAAATGTTAATGCAGGTTCTGCCCTC'
'CATATAGATCACACGCTAAGTCAAAATACGACCCTGTGACCAGCCGCAATCACTTGCTGAATTCCGCAC'
'CTTGCTCCAGCGACTATCTTCTTCCTTAAGCCCCTGGT'
]
assert refrs == testrefrs
assert alts == testalts
assert mutations[0].genotypes is None
@pytest.mark.parametrize('seed', [None, 1101097205845186752])
def test_gen_with_inversions(seed):
seqs = {'1': 'ACGT'}
w = {'inv': 1.0}
with pytest.raises(ValueError, match=r'unknown mutation type inv'):
# Invoke the mutator!!!
list(kevlar.gentrio.generate_mutations(seqs, weights=w, rng=seed))
def test_sim_var_geno_smoketest():
seqstream = kevlar.open(data_file('100kbx3.fa.gz'), 'r')
sequences = kevlar.seqio.parse_seq_dict(seqstream)
ninh = random.randint(1, 10)
ndenovo = random.randint(1, 10)
simulator = kevlar.gentrio.simulate_variant_genotypes(
sequences, ninh=ninh, ndenovo=ndenovo
)
variants = list(simulator)
assert len(variants) == ninh + ndenovo
def test_gentrio_cli_smoketest_weights():
tempdir = mkdtemp()
prefix = os.path.join(tempdir, 'outfile')
vcffile = prefix + '.vcf'
ninh = random.randint(1, 10)
ndenovo = random.randint(1, 10)
weights = 'ins=0.5,del=0.5'
arglist = [
'gentrio', '--prefix', prefix, '--weights', weights, '--vcf', vcffile,
'--inherited', str(ninh), '--de-novo', str(ndenovo),
data_file('100kbx3.fa.gz')
]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.gentrio.main(args)
with open(vcffile, 'r') as vcf:
for line in vcf:
if line.strip() == '' or line.startswith('#'):
continue
values = line.split('\t')
refr, alt = values[3:5]
print('DEBUG', refr, alt)
assert len(refr) != len(alt)
rmtree(tempdir)
def test_sim_var_geno():
seqstream = kevlar.open(data_file('100kbx3.fa.gz'), 'r')
sequences = kevlar.seqio.parse_seq_dict(seqstream)
simulator = kevlar.gentrio.simulate_variant_genotypes(
sequences, ninh=2, ndenovo=2, rng=112358 ^ 853211
)
variants = list(simulator)
seqids = [v.seqid for v in variants]
positions = [v.position for v in variants]
genotypes = [v.genotypes for v in variants]
print('DEBUG', seqids, positions, genotypes)
assert len(variants) == 4
assert seqids == ['scaf3', 'scaf3', 'scaf1', 'scaf2']
assert positions == [4936, 57391, 67028, 88584]
assert genotypes == [
('0/1', '0/1', '1/0'),
('1/1', '1/1', '1/1'),
('1/0', '0/0', '0/0'),
('0/1', '0/0', '0/0')
]
def test_apply_mut_snv():
contig = MutableString('ACGTACGTACGT')
kevlar.gentrio.apply_mutation(contig, 5, 'C', 'G')
assert contig == 'ACGTAGGTACGT'
kevlar.gentrio.apply_mutation(contig, 5, 'G', 'A')
assert contig == 'ACGTAAGTACGT'
kevlar.gentrio.apply_mutation(contig, 0, 'A', 'T')
assert contig == 'TCGTAAGTACGT'
def test_apply_mut_ins():
contig = MutableString('ACGTACGTACGT')
kevlar.gentrio.apply_mutation(contig, 5, 'A', 'AAAAA')
assert contig == 'ACGTAAAAACGTACGT'
contig = MutableString('CTTGAGACTTAGTAAAACCGTC')
kevlar.gentrio.apply_mutation(contig, 7, 'A', 'ATTCTTGTT')
assert contig == 'CTTGAGATTCTTGTTCTTAGTAAAACCGTC'
def test_apply_mut_del():
contig = MutableString('ACGTACGTACGT')
kevlar.gentrio.apply_mutation(contig, 5, 'ACGTAC', 'A')
assert contig == 'ACGTAGT'
def test_gentrio_smoketest():
seqstream = kevlar.open(data_file('100kbx3.fa.gz'), 'r')
sequences = kevlar.seqio.parse_seq_dict(seqstream)
outstreams = [StringIO(), StringIO(), StringIO()]
mutator = kevlar.gentrio.gentrio(sequences, outstreams, ninh=2, ndenovo=1,
seed=1985)
variants = list(mutator)
for variant in variants:
print(variant.vcf, file=sys.stderr)
for i in range(3):
outstreams[i].seek(0)
probandseqs = kevlar.seqio.parse_seq_dict(outstreams[0])
motherseqs = kevlar.seqio.parse_seq_dict(outstreams[1])
fatherseqs = kevlar.seqio.parse_seq_dict(outstreams[2])
print(probandseqs['scaf1_haplo1'][variants[0].position])
print(probandseqs['scaf1_haplo2'][variants[0].position])
assert variants[0].genotypes[0] == '0/1'
assert variants[0].refrwindow in probandseqs['scaf1_haplo1']
assert variants[0].refrwindow not in probandseqs['scaf1_haplo2']
assert variants[0].window not in probandseqs['scaf1_haplo1']
assert variants[0].window in probandseqs['scaf1_haplo2']
print(probandseqs['scaf3_haplo1'][variants[2].position])
print(probandseqs['scaf3_haplo2'][variants[2].position])
print(motherseqs['scaf3_haplo1'][variants[2].position])
print(motherseqs['scaf3_haplo2'][variants[2].position])
print(fatherseqs['scaf3_haplo1'][variants[2].position])
print(fatherseqs['scaf3_haplo2'][variants[2].position])
assert variants[2].window in probandseqs['scaf3_haplo1']
assert variants[2].refrwindow in probandseqs['scaf3_haplo2']
assert variants[2].refrwindow in motherseqs['scaf3_haplo1']
assert variants[2].refrwindow in motherseqs['scaf3_haplo2']
assert variants[2].refrwindow in fatherseqs['scaf3_haplo1']
assert variants[2].window in fatherseqs['scaf3_haplo2']
def test_gentrio_cli_smoketest():
tempdir = mkdtemp()
prefix = os.path.join(tempdir, 'outfile')
arglist = [
'gentrio', '--prefix', prefix, data_file('100kbx3.fa.gz')
]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.gentrio.main(args)
rmtree(tempdir)
def test_gentrio_cli(capsys):
tempdir = mkdtemp()
prefix = os.path.join(tempdir, 'outfile')
arglist = [
'gentrio', '--prefix', prefix, '--vcf', '-', data_file('100kbx3.fa.gz')
]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.gentrio.main(args)
rmtree(tempdir)
out, err = capsys.readouterr()
outlines = out.strip().split('\n')
numvariants = sum([1 for line in outlines if not line.startswith('#')])
assert numvariants == 30
def test_mutagenize():
rng = random.Random(1123581321)
mutseq = kevlar.gentrio.mutagenize('GATTACA' * 3, rng, rate=0.1)
assert mutseq == 'GATAACAGATTACAGATTACG'
```
#### File: kevlar/tests/test_localize.py
```python
from io import StringIO
import pytest
import kevlar
from kevlar.localize import Localizer, KevlarRefrSeqNotFoundError
from kevlar.localize import decompose_seeds, contigs_2_seeds, get_seed_matches
from kevlar.localize import cutout, localize
from kevlar.reference import ReferenceCutout
from kevlar.seqio import Record
from kevlar.tests import data_file
from tempfile import NamedTemporaryFile
def test_localizer_simple():
intervals = Localizer(seedsize=25)
assert list(intervals.get_cutouts()) == []
intervals.add_seed_match('chr1', 100)
intervals.add_seed_match('chr1', 115)
intervals.add_seed_match('chr2', 200)
intervals.add_seed_match('chr2', 205)
intervals.add_seed_match('chr2', 207)
intervals.add_seed_match('chr2', 235008)
intervals.add_seed_match('chr2', 235075)
testint = [c.interval for c in intervals.get_cutouts()]
print('DEBUG', testint)
assert testint == [
('chr1', 100, 140),
('chr2', 200, 232),
('chr2', 235008, 235100)
]
def test_localizer_incl_excl():
intervals = Localizer(seedsize=25)
intervals.add_seed_match('1', 100)
intervals.add_seed_match('1', 120)
intervals.add_seed_match('12', 200)
intervals.add_seed_match('12', 209)
intervals.add_seed_match('12', 213)
intervals.add_seed_match('X', 1234)
intervals.add_seed_match('X', 1245)
intervals.add_seed_match('Un', 13579)
intervals.add_seed_match('Un', 13597)
testint = [c.interval for c in intervals.get_cutouts()]
assert sorted(testint) == [
('1', 100, 145),
('12', 200, 238),
('Un', 13579, 13622),
('X', 1234, 1270),
]
intervals.exclpattern = 'Un'
testint = [c.interval for c in intervals.get_cutouts()]
assert sorted(testint) == [
('1', 100, 145),
('12', 200, 238),
('X', 1234, 1270),
]
intervals.inclpattern = r'^\d+$'
testint = [c.interval for c in intervals.get_cutouts()]
assert sorted(testint) == [
('1', 100, 145),
('12', 200, 238),
]
def test_get_cutouts_basic():
intervals = Localizer(seedsize=10)
intervals.add_seed_match('bogus-genome-chr2', 10)
refrstream = open(data_file('bogus-genome/refr.fa'), 'r')
seqs = kevlar.seqio.parse_seq_dict(refrstream)
cutouts = list(intervals.get_cutouts(refrseqs=seqs))
assert len(cutouts) == 1
assert cutouts[0].defline == 'bogus-genome-chr2_10-20'
assert cutouts[0].sequence == 'GTTACATTAC'
def test_get_cutouts_basic_2():
intervals = Localizer(seedsize=21)
intervals.add_seed_match('simple', 49)
intervals.add_seed_match('simple', 52)
intervals.add_seed_match('simple', 59)
refrstream = open(data_file('simple-genome-ctrl1.fa'), 'r')
seqs = kevlar.seqio.parse_seq_dict(refrstream)
cutouts = list(intervals.get_cutouts(refrseqs=seqs, delta=5))
assert len(cutouts) == 1
assert cutouts[0].defline == 'simple_44-85'
assert cutouts[0].sequence == 'AATACTATGCCGATTTATTCTTACACAATTAAATTGCTAGT'
def test_get_cutouts_basic_3():
intervals = Localizer(seedsize=21)
intervals.add_seed_match('simple', 40)
intervals.add_seed_match('simple', 80)
intervals.add_seed_match('simple', 120)
intervals.add_seed_match('simple', 500)
refrstream = open(data_file('simple-genome-ctrl1.fa'), 'r')
seqs = kevlar.seqio.parse_seq_dict(refrstream)
cutouts = list(
intervals.get_cutouts(refrseqs=seqs, clusterdist=None, delta=10)
)
assert len(cutouts) == 1
assert cutouts[0].defline == 'simple_30-531'
assert len(cutouts[0].sequence) == 501
def test_get_cutouts_large_span():
refrstream = open(data_file('simple-genome-ctrl1.fa'), 'r')
seqs = kevlar.seqio.parse_seq_dict(refrstream)
intervals = Localizer(seedsize=21)
intervals.add_seed_match('simple', 100)
intervals.add_seed_match('simple', 200)
cutouts = intervals.get_cutouts(refrseqs=seqs, clusterdist=50, delta=25)
deflines = [c.defline for c in cutouts]
assert deflines == ['simple_75-146', 'simple_175-246']
cutouts = intervals.get_cutouts(refrseqs=seqs, clusterdist=100, delta=50)
deflines = [c.defline for c in cutouts]
assert deflines == ['simple_50-271']
def test_get_cutouts_missing_seq():
intervals = Localizer(seedsize=21)
intervals.add_seed_match('simple', 100)
intervals.add_seed_match('simple', 200)
intervals.add_seed_match('TheCakeIsALie', 42)
intervals.add_seed_match('TheCakeIsALie', 100)
intervals.add_seed_match('TheCakeIsALie', 77)
refrstream = open(data_file('simple-genome-ctrl1.fa'), 'r')
seqs = kevlar.seqio.parse_seq_dict(refrstream)
with pytest.raises(KevlarRefrSeqNotFoundError, match=r'TheCakeIsALie'):
list(intervals.get_cutouts(refrseqs=seqs))
def test_extract_regions_boundaries():
refrstream = open(data_file('simple-genome-ctrl1.fa'), 'r')
seqs = kevlar.seqio.parse_seq_dict(refrstream)
intervals = Localizer(seedsize=31)
intervals.add_seed_match('simple', 15)
cutouts = list(intervals.get_cutouts(refrseqs=seqs, delta=20))
assert len(cutouts) == 1
assert cutouts[0].defline == 'simple_0-66'
intervals = Localizer(seedsize=31)
intervals.add_seed_match('simple', 925)
intervals.add_seed_match('simple', 955)
intervals.add_seed_match('simple', 978)
cutouts = list(intervals.get_cutouts(refrseqs=seqs, delta=20))
assert len(cutouts) == 1
assert cutouts[0].defline == 'simple_905-1000'
@pytest.mark.parametrize('X,numtargets', [
(100000, 1),
(10000, 5),
(1000, 33),
(0, 1),
(None, 33),
])
def test_maxdiff(X, numtargets):
contigstream = kevlar.parse_partitioned_reads(
kevlar.parse_augmented_fastx(
kevlar.open(data_file('maxdiff-contig.augfasta'), 'r')
)
)
refrfile = data_file('maxdiff-refr.fa.gz')
targeter = kevlar.localize.localize(contigstream, refrfile, seedsize=51,
delta=50, maxdiff=X)
targets = [cutout for partid, cutout in targeter]
print([t.defline for t in targets])
assert len(targets) == numtargets
@pytest.mark.parametrize('incl,excl,output', [
(None, None, '>seq1_10-191'),
(r'seq1', None, '>seq1_10-191'),
(None, 'seq1', 'WARNING: no reference matches'),
(r'chr[XY]', None, 'WARNING: no reference matches'),
(None, r'b0Gu$', '>seq1_10-191'),
])
def test_main(incl, excl, output, capsys):
contig = data_file('localize-contig.fa')
refr = data_file('localize-refr.fa')
arglist = ['localize', '--seed-size', '23', '--delta', '50', refr, contig]
args = kevlar.cli.parser().parse_args(arglist)
args.include = incl
args.exclude = excl
kevlar.localize.main(args)
out, err = capsys.readouterr()
print(out)
assert output in out or output in err
def test_main_no_matches(capsys):
contig = data_file('localize-contig-bad.fa')
refr = data_file('localize-refr.fa')
arglist = ['localize', '--seed-size', '23', refr, contig]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.localize.main(args)
out, err = capsys.readouterr()
assert 'WARNING: no reference matches' in err
def test_decompose_seeds():
assert list(decompose_seeds('GATTACA', 5)) == ['GATTA', 'ATTAC', 'TTACA']
assert list(decompose_seeds('GATTACA', 3)) == ['GAT', 'ATT', 'TTA', 'TAC',
'ACA']
def test_contigs_2_seeds():
seedfile = StringIO()
partitions = [[Record(name='seq', sequence='GATTACA')]]
contigs_2_seeds(partitions, seedfile, seedsize=5)
testoutput = '>seed0\nATTAC\n>seed1\nGATTA\n>seed2\nTGTAA\n'
assert seedfile.getvalue() == testoutput
def test_get_seed_matches():
seedfasta = (
'>seed0\nATCTGTTCTTGGCCAATAGAAAAAGCAAGGAGCCCTGAAAGACTCACAGTG\n'
'>seed1\nAAAAGGAAATGTTAACAACAAAATCACACAGATAAACCATCACAAGATCTG\n'
'>seed2\nGATTCTAGGAGCTTGTTACTGCTGCTGAAAAAGGAAATGTTAACAACAAAA\n'
'>seed3\nAACCAATAGAGGTCCACAGAAGTATATATAATCTGTTCTTGGCCAATAGAA\n'
'>seed4\nTTGTGTGTAAAAACCAATAGAGGTCCACAGAAGTATATATAATCTGTTCTT\n'
'>seed5\nAAGATACTATAATATGTTTCCCTGAGCACACCCCTTCGAAAGAGCAGAATT\n'
)
with NamedTemporaryFile(suffix='.fa', mode='w') as seedfile:
print(seedfasta, file=seedfile, flush=True)
refrfile = data_file('fiveparts-refr.fa.gz')
seed_matches = get_seed_matches(seedfile.name, refrfile, seedsize=51)
print(seed_matches)
assert seed_matches == {
'AACCAATAGAGGTCCACAGAAGTATATATAATCTGTTCTTGGCCAATAGAA': {('seq1',
284819)},
'AAGATACTATAATATGTTTCCCTGAGCACACCCCTTCGAAAGAGCAGAATT': {('seq1',
284722)},
'ATCTGTTCTTGGCCAATAGAAAAAGCAAGGAGCCCTGAAAGACTCACAGTG': {('seq1',
284849)},
'AAGAACAGATTATATATACTTCTGTGGACCTCTATTGGTTTTTACACACAA': {('seq1',
284808)},
}
def test_get_seed_matches_no_matches():
seedfasta = (
'>seed0\nAAAAGGAAATGTTAACAACAAAATCACACAGATAAACCATCACAAGATCTG\n'
'>seed1\nGATTCTAGGAGCTTGTTACTGCTGCTGAAAAAGGAAATGTTAACAACAAAA\n'
)
with NamedTemporaryFile(suffix='.fa', mode='w') as seedfile:
print(seedfasta, file=seedfile, flush=True)
refrfile = data_file('fiveparts-refr.fa.gz')
seed_matches = get_seed_matches(seedfile.name, refrfile, seedsize=51)
assert seed_matches == {}
def test_localize_new():
refr_file = data_file('fiveparts-refr.fa.gz')
contig_file = data_file('fiveparts.contigs.augfasta.gz')
contigstream = kevlar.parse_augmented_fastx(kevlar.open(contig_file, 'r'))
pstream = kevlar.parse_partitioned_reads(contigstream)
localizer = localize(pstream, refr_file, seedsize=51, debug=True)
cutoutdata = list(localizer)
partids = [partid for partid, gdna in cutoutdata]
gdnas = [gdna for partid, gdna in cutoutdata]
deflines = [g.defline for g in gdnas]
assert partids == ['1', '1', '2', '3', '4', '5']
assert sorted(deflines) == sorted([
'seq1_284663-284950', 'seq1_1924681-1925049', 'seq1_1660589-1660884',
'seq1_2315741-2316037', 'seq1_2321099-2321322', 'seq1_593102-593389'
])
def test_localize_no_match(capsys):
refr_file = data_file('fiveparts-refr.fa.gz')
contig_file = data_file('wasp-pass.contig.augfasta')
contigstream = kevlar.parse_augmented_fastx(kevlar.open(contig_file, 'r'))
pstream = kevlar.parse_partitioned_reads(contigstream)
localizer = localize(pstream, refr_file, seedsize=41, debug=True)
cutoutdata = list(localizer)
assert cutoutdata == []
out, err = capsys.readouterr()
assert 'WARNING: no reference matches' in err
@pytest.mark.parametrize('partid,testdeflines', [
('1', ['seq1_1924681-1925049', 'seq1_284663-284950']),
('4', ['seq1_2321099-2321322'])
])
def test_localize_new_single_partition(partid, testdeflines):
refr_file = data_file('fiveparts-refr.fa.gz')
contig_file = data_file('fiveparts.contigs.augfasta.gz')
contigstream = kevlar.parse_augmented_fastx(kevlar.open(contig_file, 'r'))
pstream = kevlar.parse_single_partition(contigstream, partid)
localizer = localize(pstream, refr_file, seedsize=51)
cutoutdata = list(localizer)
partids = [partid for partid, gdna in cutoutdata]
gdnas = [gdna for partid, gdna in cutoutdata]
deflines = sorted([g.defline for g in gdnas])
assert deflines == testdeflines
def test_localize_cli(capsys):
refr_file = data_file('fiveparts-refr.fa.gz')
contig_file = data_file('fiveparts.contigs.augfasta.gz')
arglist = ['localize', '--part-id', '2', refr_file, contig_file]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.localize.main(args)
out, err = capsys.readouterr()
assert out == (
'>seq1_1660589-1660884 kvcc=2\n'
'GATAGATCTCCAAGAATTTTATACAGCAGGGCCCTGAGAATGAGCATGGAAGTGAATTTATTAGCCAGT'
'GACAGTCACTTCACACTCTTCCTATATCAAAATTGAAGCCCAGGCTGGAGGTGGGCAGGGGTAGTACTT'
'TTATGGACTGGACAGGGCGTAATCCCACCTGGGCGTGGGAGGAATATAAAAATAACCTTTAATTAATTC'
'TGTCTGTAATTTATCTATGGGATGGGGTTGTTCAGAGAAGACTTCAATACCAGTTATTTAAGCCTGACC'
'CTGGCTTGCCTTGACCCCA\n'
)
arglist = ['localize', refr_file, contig_file]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.localize.main(args)
out, err = capsys.readouterr()
outlines = out.strip().split('\n')
assert len(outlines) == 12
```
#### File: kevlar/tests/test_mutate.py
```python
from sys import stderr
import pytest
import kevlar
from kevlar.mutate import Mutation
from kevlar.tests import data_file
def test_load_mutations_x():
instream = kevlar.open(data_file('muts-x.txt'), 'r')
mutations = kevlar.mutate.load_mutations(instream, stderr)
assert len(mutations) == 1
assert '1' in mutations
assert len(mutations['1']) == 1
mut = mutations['1'][0]
assert mut == Mutation(seq='1', pos=441274, type='snv', data='3')
def test_load_mutations_y():
instream = kevlar.open(data_file('muts-y.tsv'), 'r')
mutations = kevlar.mutate.load_mutations(instream, stderr)
assert len(mutations) == 3
assert 'scaffold399' in mutations
assert len(mutations['scaffold399']) == 1
mut = mutations['scaffold399'][0]
assert mut == Mutation(seq='scaffold399', pos=685357, type='ins',
data='AGCTACCCCAGTGAGTCGGTAATGTGATC')
assert 'scaffold982' in mutations
assert len(mutations['scaffold982']) == 1
mut = mutations['scaffold982'][0]
assert mut == Mutation(seq='scaffold982', pos=108754, type='del',
data='23')
assert 'scaffold1102' in mutations
assert len(mutations['scaffold1102']) == 1
mut = mutations['scaffold1102'][0]
assert mut == Mutation(seq='scaffold1102', pos=260686, type='snv',
data='1')
def test_load_mutations_z():
instream = kevlar.open(data_file('muts-z.csv'), 'r')
with pytest.raises(ValueError, match=r'error parsing mutation'):
mutations = kevlar.mutate.load_mutations(instream, stderr)
def test_mutate_snv():
mutation = Mutation(seq='contig', pos=5, type='snv', data='1')
contig = 'ACGTACGTACGT'
assert kevlar.mutate.mutate_snv(contig, mutation) == 'ACGTAGGTACGT'
mutation = Mutation(seq='contig', pos=5, type='snv', data='-1')
assert kevlar.mutate.mutate_snv(contig, mutation) == 'ACGTAAGTACGT'
mutation = Mutation(seq='contig', pos=0, type='snv', data='-1')
assert kevlar.mutate.mutate_snv(contig, mutation) == 'TCGTACGTACGT'
def test_mutate_ins():
mutation = Mutation(seq='contig', pos=5, type='ins', data='AAAA')
contig = 'ACGTACGTACGT'
mutcontig = 'ACGTAAAAACGTACGT'
assert kevlar.mutate.mutate_insertion(contig, mutation) == mutcontig
def test_mutate_del():
mutation = Mutation(seq='contig', pos=5, type='ins', data='5')
contig = 'ACGTACGTACGT'
assert kevlar.mutate.mutate_deletion(contig, mutation) == 'ACGTAGT'
def test_mutate_inv():
mutation = Mutation(seq='contig', pos=5, type='inv', data='5')
contig = 'ACGTACGTACGT'
assert kevlar.mutate.mutate_inversion(contig, mutation) == 'ACGTACATGCGT'
def test_mutate_bogus():
instream = kevlar.open(data_file('muts-w.txt'), 'r')
with pytest.raises(ValueError, match=r'invalid variant type "slippage"'):
mutations = kevlar.mutate.load_mutations(instream, stderr)
def test_mutate_main(capsys):
genome = data_file('mut-genome.fa')
muts = data_file('mut-genome.txt')
args = kevlar.cli.parser().parse_args(['mutate', muts, genome])
kevlar.mutate.main(args)
out, err = capsys.readouterr()
contig1 = '>contig1\nGTACGGCTATTGTCTGAGCTCTTTTTAAGACTAATACGCGCTGGCTCACGGAA'
assert contig1 in out
contig2 = '>contig2\nGTCATGAACTGACTCGCACGCGCTTCGGAAATTGCCGTATGATATGAC'
assert contig2 in out
contig3 = '>contig3\nAGTCGAGTATTGTGGCATAAGCGGAACA'
assert contig3 in out
```
#### File: kevlar/tests/test_novel.py
```python
import filecmp
import glob
import json
import pytest
import re
from tempfile import NamedTemporaryFile, mkdtemp
import screed
from shutil import rmtree
import sys
import kevlar
from kevlar.tests import data_file, data_glob
from khmer import Counttable
def test_novel_banding_args():
errormsg = r'Must specify `numbands` and `band` together'
with pytest.raises(ValueError, match=errormsg):
reads = list(kevlar.novel.novel(None, [], [], numbands=4))
with pytest.raises(ValueError, match=errormsg):
reads = list(kevlar.novel.novel(None, [], [], band=0))
errormsg = r'`band` must be a value between 0 and 3'
with pytest.raises(ValueError, match=errormsg):
reads = list(kevlar.novel.novel(None, [], [], numbands=4, band=-1))
def test_cli():
args = kevlar.cli.parser().parse_args([
'novel', '--case', 'case1.fq', '--control', 'cntl1.fq', '--control',
'cntl2.fq', '-k', '17',
])
assert args.ksize == 17
assert args.case_min == 6
assert args.ctrl_max == 1
assert args.num_bands is None
assert args.band is None
args = kevlar.cli.parser().parse_args([
'novel', '--num-bands', '8', '--band', '1', '--case', 'case1.fq',
'--control', 'cntl1.fq', '--control', 'cntl2.fq',
])
assert args.ksize == 31
assert args.case_min == 6
assert args.ctrl_max == 1
assert args.num_bands == 8
assert args.band == 1
errormsg = r'Must specify --num-bands and --band together'
with pytest.raises(ValueError, match=errormsg):
args = kevlar.cli.parser().parse_args([
'novel', '--case', 'case1.fq', '--control', 'cntl1.fq',
'--band', '1'
])
kevlar.novel.main(args)
@pytest.mark.parametrize('kmer', [
('ACCGTACAA' * 3),
('TTATAATAG' * 3),
('CGAAAAATT' * 3),
])
def test_assumptions(kmer):
ct = Counttable(27, 1e5, 2)
kmer_rc = kevlar.revcom(kmer)
assert ct.hash(kmer) == ct.hash(kmer_rc)
assert ct.get_kmer_hashes(kmer)[0] == ct.get_kmer_hashes(kmer_rc)[0]
@pytest.mark.parametrize('case,ctrl', [
('microtrios/trio-li-proband.fq.gz', 'microtrios/trio-li-??ther.fq.gz'),
('microtrios/trio-na-proband.fq.gz', 'microtrios/trio-na-??ther.fq.gz'),
('microtrios/trio-k-proband.fq.gz', 'microtrios/trio-k-??ther.fq.gz'),
])
def test_novel_single_mutation(case, ctrl, capsys):
casestr = data_file(case)
ctrls = kevlar.tests.data_glob(ctrl)
arglist = ['novel', '--case', casestr, '--ksize', '25', '--case-min', '7',
'--control', ctrls[0], '--control', ctrls[1],
'--num-bands', '2', '--band', '2',
'--ctrl-max', '0', '--memory', '500K']
args = kevlar.cli.parser().parse_args(arglist)
kevlar.novel.main(args)
out, err = capsys.readouterr()
for line in out.split('\n'):
if not line.endswith('#') or line.startswith('#mateseq'):
continue
abundmatch = re.search(r'(\d+) (\d+) (\d+)#$', line)
assert abundmatch, line
case = int(abundmatch.group(1))
ctl1 = int(abundmatch.group(2))
ctl2 = int(abundmatch.group(3))
assert case >= 7, line
assert ctl1 == 0 and ctl2 == 0, line
def test_novel_two_cases(capsys):
cases = kevlar.tests.data_glob('trio1/case6*.fq')
controls = kevlar.tests.data_glob('trio1/ctrl[5,6].fq')
with NamedTemporaryFile(suffix='.ct') as case1ct, \
NamedTemporaryFile(suffix='.ct') as case2ct, \
NamedTemporaryFile(suffix='.ct') as ctrl1ct, \
NamedTemporaryFile(suffix='.ct') as ctrl2ct:
counttables = [case1ct, case2ct, ctrl1ct, ctrl2ct]
seqfiles = cases + controls
for ct, seqfile in zip(counttables, seqfiles):
arglist = ['count', '--ksize', '19', '--memory', '1e7', ct.name,
seqfile]
print(arglist)
args = kevlar.cli.parser().parse_args(arglist)
kevlar.count.main(args)
arglist = ['novel', '--ksize', '19', '--memory', '1e7',
'--ctrl-max', '1', '--case-min', '7',
'--case', cases[0], '--case', cases[1],
'--case-counts', case1ct.name, case2ct.name,
'--control-counts', ctrl1ct.name, ctrl2ct.name]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.novel.main(args)
out, err = capsys.readouterr()
assert out.strip() != ''
for line in out.split('\n'):
if not line.endswith('#') or line.startswith('#mateseq'):
continue
abundmatch = re.search(r'(\d+) (\d+) (\d+) (\d+)#$', line)
assert abundmatch, line
case1 = int(abundmatch.group(1))
case2 = int(abundmatch.group(2))
ctl1 = int(abundmatch.group(3))
ctl2 = int(abundmatch.group(4))
assert case1 >= 7 and case2 >= 7
assert ctl1 <= 1 and ctl2 <= 1
def test_kmer_rep_in_read(capsys):
from sys import stdout
read = ('AGGATGAGGATGAGGATGAGGATGAGGATGAGGATGAGGATGAGGATGAGGATGAGGATGAGGAT'
'GAGGATGAGGATGAGGAT')
record = kevlar.sequence.Record(name='reqseq', sequence=read)
record.annotate('GATGAGGATGAGGATGAGGATGAGG', 2, (11, 1, 0))
record.annotate('GATGAGGATGAGGATGAGGATGAGG', 8, (11, 1, 0))
kevlar.print_augmented_fastx(record, stdout)
out, err = capsys.readouterr()
assert read in out
def test_iter_read_multi_file():
infiles = kevlar.tests.data_glob('bogus-genome/mask-chr[1,2].fa')
print(infiles)
records = [r for r in kevlar.multi_file_iter_khmer(infiles)]
assert len(records) == 4
def test_novel_abund_screen(capsys):
case = data_file('screen-case.fa')
ctrl = data_file('screen-ctrl.fa')
arglist = ['novel', '--ksize', '25', '--ctrl-max', '1', '--case-min', '8',
'--case', case, '--control', ctrl, '--abund-screen', '3']
args = kevlar.cli.parser().parse_args(arglist)
kevlar.novel.main(args)
out, err = capsys.readouterr()
assert '>seq_error' not in out
def test_skip_until(capsys):
readname = 'bogus-genome-chr1_115_449_0:0:0_0:0:0_1f4/1'
case = data_file('trio1/case1.fq')
ctrls = kevlar.tests.data_glob('trio1/ctrl[1,2].fq')
arglist = [
'novel', '--ctrl-max', '0', '--case-min', '6', '--case', case,
'--control', ctrls[0], '--control', ctrls[1], '--skip-until', readname
]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.logstream, logstream = sys.stderr, kevlar.logstream
kevlar.novel.main(args)
out, err = capsys.readouterr()
message = ('Found read bogus-genome-chr1_115_449_0:0:0_0:0:0_1f4/1 '
'(skipped 1001 reads)')
assert message in err
assert '29 unique novel kmers in 14 reads' in err
readname = 'BOGUSREADNAME'
arglist = [
'novel', '--ctrl-max', '0', '--case-min', '6', '--case', case,
'--control', ctrls[0], '--control', ctrls[1], '--skip-until', readname
]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.novel.main(args)
kevlar.logstream = logstream
out, err = capsys.readouterr()
assert 'Found read' not in err
assert '(skipped ' not in err
assert 'Found 0 instances of 0 unique novel kmers in 0 reads' in err
def test_novel_save_counts():
outdir = mkdtemp()
try:
for ind in ('father', 'mother', 'proband'):
outfile = '{:s}/{:s}.ct'.format(outdir, ind)
infile = data_file('microtrios/trio-na-{:s}.fq.gz'.format(ind))
arglist = ['count', '--ksize', '27', '--memory', '500K', outfile,
infile]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.count.main(args)
arglist = [
'novel', '--ksize', '27', '--out', outdir + '/novel.augfastq.gz',
'--save-case-counts', outdir + '/kid.ct', '--save-ctrl-counts',
outdir + '/mom.ct', outdir + '/dad.ct', '--case',
data_file('microtrios/trio-na-proband.fq.gz'),
'--control', data_file('microtrios/trio-na-mother.fq.gz'),
'--control', data_file('microtrios/trio-na-father.fq.gz'),
'--memory', '500K'
]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.novel.main(args)
counts = ('father', 'mother', 'proband')
testcounts = ('dad', 'mom', 'kid')
for c1, c2 in zip(counts, testcounts):
f1 = '{:s}/{:s}.ct'.format(outdir, c1)
f2 = '{:s}/{:s}.ct'.format(outdir, c2)
assert filecmp.cmp(f1, f2)
finally:
rmtree(outdir)
def test_novel_save_counts_mismatch(capsys):
outdir = mkdtemp()
try:
arglist = [
'novel', '--ksize', '27', '--out', outdir + '/novel.augfastq.gz',
'--save-case-counts', outdir + '/kid.ct', '--save-ctrl-counts',
outdir + '/mom.ct', outdir + '/dad.ct', outdir + '/sibling.ct',
'--case', data_file('microtrios/trio-k-proband.fq.gz'),
'--control', data_file('microtrios/trio-k-mother.fq.gz'),
'--control', data_file('microtrios/trio-k-father.fq.gz'),
'--memory', '500K'
]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.logstream, logstream = sys.stderr, kevlar.logstream
kevlar.novel.main(args)
kevlar.logstream = logstream
finally:
rmtree(outdir)
out, err = capsys.readouterr()
assert 'stubbornly refusing to save k-mer counts' in err
def test_novel_load_counts(capsys):
file1 = data_file('simple-genome-case-reads.fa.gz')
file2 = data_file('ambig.fasta')
file3 = data_file('simple-genome-case.ct')
file4, file5 = data_glob('simple-genome-ctrl?.ct')
arglist = [
'novel', '-k', '25',
'--case', file1, file2, '--case-counts', file3,
'--control-counts', file4, file5
]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.logstream, logstream = sys.stderr, kevlar.logstream
kevlar.novel.main(args)
kevlar.logstream = logstream
out, err = capsys.readouterr()
assert 'counttables for 2 sample(s) provided' in err
```
#### File: kevlar/tests/test_readgraph.py
```python
import pytest
import kevlar
from kevlar.readgraph import ReadGraph
from kevlar.tests import data_file
@pytest.mark.parametrize('partfile,edges,strictedges', [
('connectivity-1311.augfastq', 30, 11),
('connectivity-1541.augfastq', 31, 12),
])
def test_populate(partfile, edges, strictedges):
with kevlar.open(data_file(partfile), 'r') as fh:
reader = kevlar.parse_augmented_fastx(fh)
reads = list(reader)
rg = ReadGraph()
rg.load(reads)
rg.populate_edges()
assert rg.number_of_edges() == pytest.approx(edges, 1)
rg = ReadGraph()
rg.load(reads)
rg.populate_edges(strict=True)
assert rg.number_of_edges() == pytest.approx(strictedges, 1)
```
#### File: kevlar/tests/test_varfilter.py
```python
import kevlar
from kevlar.tests import data_file
def test_load_predictions():
with kevlar.open(data_file('five-snvs-with-likelihood.vcf'), 'r') as vcf:
vcfreader = kevlar.vcf.VCFReader(vcf)
index = kevlar.varfilter.load_predictions(vcfreader)
assert len(index) == 5
assert list(index.trees.keys()) == ['chr17']
assert index.query('chr1', 1, 1000000) == set()
assert index.query('chr17', 1, 1000000) == set()
result = [i.data.region for i in index.query('chr17', 36385017)]
assert result == [('chr17', 36385017, 36385018)]
def test_load_predictions_multi_chrom():
with kevlar.open(data_file('case-low-abund/calls.vcf.gz'), 'r') as vcf:
vcfreader = kevlar.vcf.VCFReader(vcf)
index = kevlar.varfilter.load_predictions(vcfreader)
assert len(index) == 5
assert set(index.trees.keys()) == set(['1', '9', '14'])
assert index.query('chr1', 1, 1000000) == set()
assert index.query('1', 1, 1000000) == set()
result = [i.data.region for i in index.query('1', 91850000, 91860000)]
assert set(result) == set([
('1', 91853096, 91853097),
('1', 91853110, 91853111),
])
result = [i.data.region for i in index.query('14', 82461000, 82462000)]
assert result == [('14', 82461856, 82461857)]
def test_varfilter_single():
bedstream = kevlar.parse_bed(
kevlar.open(data_file('fiveparts-ignore-single.bed'), 'r')
)
vcffile = data_file('five-snvs-with-likelihood.vcf')
with kevlar.open(vcffile, 'r') as vcfstream:
reader = kevlar.vcf.VCFReader(vcfstream)
varcalls = list(kevlar.varfilter.varfilter(reader, bedstream))
assert len(varcalls) == 5
filtered = [vc for vc in varcalls if vc.filterstr != 'PASS']
assert len(filtered) == 1
assert filtered[0].position == 36385017
def test_varfilter_main(capsys):
arglist = [
'varfilter', data_file('fiveparts-ignore.bed'),
data_file('five-snvs-with-likelihood.vcf')
]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.varfilter.main(args)
out, err = capsys.readouterr()
outlines = out.strip().split('\n')
calls = [line for line in outlines if not line.startswith('#')]
assert len(calls) == 5
filtered = [c for c in calls if '\tUserFilter\t' in c]
assert len(filtered) == 2
positions = [c.split('\t')[1] for c in filtered]
assert sorted(positions) == sorted(['36385018', '3547691'])
```
#### File: kevlar/kevlar/timer.py
```python
import time
class Timer(object):
def __init__(self):
self._start_times = dict()
self._stop_times = dict()
def start(self, key=None):
if key is None:
key = ''
if key in self._start_times:
raise ValueError('Timer already started for "' + key + '"')
self._start_times[key] = time.time()
def stop(self, key=None):
if key is None:
key = ''
if key not in self._start_times:
raise ValueError('No timer started for "' + key + '"')
self._stop_times[key] = time.time()
return self._stop_times[key] - self._start_times[key]
def probe(self, key=None):
if key is None:
key = ''
if key not in self._start_times:
raise ValueError('No timer started for "' + key + '"')
current = time.time()
return current - self._start_times[key]
```
#### File: kevlar/kevlar/varfilter.py
```python
import kevlar
def load_predictions(varcalls):
kevlar.plog('[kevlar::varfilter] Loading predictions to filter')
index = kevlar.IntervalForest()
for call in varcalls:
index.insert(*call.region, data=call)
return index
def varfilter(callstream, maskstream):
callindex = load_predictions(callstream)
message = 'Filtering preliminary variant calls'
kevlar.plog('[kevlar::varfilter]', message)
progress_indictator = kevlar.ProgressIndicator(
'[kevlar::varfilter] {counter} regions processed', interval=1e5,
breaks=[1e6, 1e6, 1e7], usetimer=True,
)
for chrom, start, end, data in maskstream:
hits = callindex.query(chrom, start, end)
for interval in hits:
interval.data.filter(kevlar.vcf.VariantFilter.UserFilter)
progress_indictator.update()
for varcall in callindex:
yield varcall
def main(args):
reader = kevlar.vcf.vcfstream(args.vcf)
bedstream = kevlar.parse_bed(kevlar.open(args.filt, 'r'))
outstream = kevlar.open(args.out, 'w')
writer = kevlar.vcf.VCFWriter(outstream, source='kevlar::varfilter')
writer.write_header()
for varcall in varfilter(reader, bedstream):
writer.write(varcall)
``` |
{
"source": "johnsmith400/bunny-storm",
"score": 2
} |
#### File: bunny-storm/bunny_storm/channel_configuration.py
```python
import asyncio
from logging import Logger
from typing import Union, List
from aio_pika import RobustChannel, RobustExchange, RobustQueue
from aio_pika.types import CloseCallbackType, Sender
from . import AsyncConnection
class ChannelConfiguration:
"""
Responsible for the management of a single channel in a given connection.
Exposes APIs for the declaration of exchanges and queues.
Automatically recovers from being closed.
"""
_logger: Logger
_connection: AsyncConnection
_loop: asyncio.AbstractEventLoop
_prefetch_count: int
_channel_number: int
_publisher_confirms: bool
_on_return_raises: bool
_channel_lock: asyncio.Lock
_channel: Union[RobustChannel, None]
_started: bool
_channel_close_callbacks: List[CloseCallbackType]
def __init__(self, connection: AsyncConnection, logger: Logger, loop: asyncio.AbstractEventLoop = None,
prefetch_count: int = 1, channel_number: int = None, publisher_confirms: bool = True,
on_return_raises: bool = False):
"""
:param connection: AsyncConnection instance to create a channel with
:param logger: Logger
:param loop: Event loop
:param prefetch_count: Prefetch count of channel
:param channel_number: Index of channel (set automatically if None)
:param publisher_confirms: Whether or not to use publisher confirms
:param on_return_raises: Whether or not to raise an error in case publishing a message causes a return
"""
self._logger = logger
self._connection = connection
self._loop = loop or asyncio.get_event_loop()
self._prefetch_count = prefetch_count
self._channel_number = channel_number
self._publisher_confirms = publisher_confirms
self._on_return_raises = on_return_raises
self._channel_lock = asyncio.Lock()
self._channel = None
self._started = False
self._channel_close_callbacks = []
@property
def started(self) -> bool:
"""
Whether or not the channel has been started
:return: self._started
"""
return self._started
@property
def publisher_confirms(self) -> bool:
"""
:return: self._publisher_confirms
"""
return self._publisher_confirms
@property
def logger(self) -> Logger:
"""
:return: self._logger
"""
return self._logger
@property
def loop(self) -> asyncio.AbstractEventLoop:
"""
:return: self._loop
"""
return self._loop
async def get_default_exchange(self) -> RobustExchange:
await self.ensure_channel()
return self._channel.default_exchange
def add_close_callback(self, callback: CloseCallbackType) -> None:
"""
Adds a callback which will be called in case the channel is closed. Callbacks are called sequentially.
:param callback: Callback to add
"""
self._channel_close_callbacks.append(callback)
async def ensure_channel(self) -> RobustChannel:
"""
Ensures that the channel has been started and is open, then returns it.
If the channel has not been started, starts the channel.
If the channel is closed, reopens it.
Uses a Lock to ensure that no race conditions can occur.
:return: Robust channel held by this instance
"""
async with self._channel_lock:
if not self._started:
await self._start_channel()
if self._channel.is_closed:
await self._channel.reopen()
return self._channel
async def close_channel(self, exc: BaseException) -> None:
"""
Resets the channel.
If the channel is already closed, calls the callback for the channel being closed.
If the channel is open, closes the channel.
:param exc: Exception to pass for the channel's closing
"""
if self._channel.is_closed:
self._on_channel_close(None, exc)
else:
await self._channel.close(exc)
def _on_channel_close(self, sender: Sender, exc: BaseException) -> None:
"""
Handles the channel getting closed.
Prints indicative logs, sets self._started to False, and calls all channel close callbacks.
:param sender: Closer
:param exc: Exception which caused the closing
"""
self.logger.error("Channel closed. Exception info: ")
self.logger.error(exc, exc_info=True)
self._started = False
for callback in self._channel_close_callbacks:
callback(sender, exc)
async def _start_channel(self) -> RobustChannel:
"""
Creates a new channel using the AsyncConnection given to this instance, and sets the relevant parameters.
:return: Channel created
"""
self.logger.info("Creating channel")
connection = await self._connection.get_connection()
self._channel = await connection.channel(channel_number=self._channel_number,
publisher_confirms=self._publisher_confirms,
on_return_raises=self._on_return_raises)
self._channel.add_close_callback(self._on_channel_close)
await self._channel.set_qos(prefetch_count=self._prefetch_count)
self._started = True
return self._channel
async def declare_exchange(self,
exchange_name: str,
exchange_type: str = "direct",
durable: bool = None,
auto_delete: bool = False) -> RobustExchange:
"""
Declares an exchange with the given parameters.
If an exchange with the given name already exists in the channel, returns it instead of creating a new one.
:param exchange_name: Exchange name
:param exchange_type: Exchange type
:param durable: Exchange durability
:param auto_delete: Whether or not the exchange is auto deleted
:return: Exchange which was declared or gotten
"""
await self.ensure_channel()
self.logger.info(f"Declaring exchange: {exchange_name}")
if exchange_name not in self._channel._exchanges:
exchange = await self._channel.declare_exchange(
name=exchange_name,
type=exchange_type,
durable=durable,
auto_delete=auto_delete,
)
self.logger.info(f"Declared exchange: {exchange_name}")
else:
exchange = list(self._channel._exchanges[exchange_name])[0]
return exchange
async def declare_queue(self,
queue_name: str,
exchange: RobustExchange = None,
routing_key: str = None,
durable: bool = None,
auto_delete: bool = False) -> RobustQueue:
"""
Declares a queue with the given parameters.
If a queue with the given name already exists in the channel, gets it instead of creating a new one.
If an exchange and valid routing key are passed, the queue is bound to the exchange with the routing key.
:param queue_name: Queue name
:param exchange: Exchange to bind queue to
:param routing_key: Routing key to bind queue with
:param durable: Queue durability
:param auto_delete: Whether or not the queue auto deletes
:return: Declared or gotten queue
"""
await self.ensure_channel()
self.logger.info(f"Declaring queue: {queue_name}")
if queue_name not in self._channel._queues:
queue = await self._channel.declare_queue(
name=queue_name,
durable=durable,
auto_delete=auto_delete,
)
self.logger.info(f"Declared queue: {queue_name}")
else:
queue = list(self._channel._queues[queue_name])[0]
if exchange and routing_key:
self.logger.info(f"Binding queue: {queue_name} to exchange: {exchange}, route: {routing_key}")
await queue.bind(exchange=exchange, routing_key=routing_key)
return queue
```
#### File: bunny-storm/Examples/FibonacciExecutor.py
```python
import sys
import asyncio
from bunny_storm import AsyncAdapter, RabbitMQConnectionData
def calc_fib(message):
n = int(message)
a, b = 0, 1
for i in range(n):
a, b = b, a + b
return a
async def handle_message(logger, message):
logger.info("Fibonacci calc request {}".format(message))
res = str(calc_fib(message.body)).encode()
logger.info("Fibonacci calc result {}".format(res))
return res
async def handle_test(logger, message):
logger.info(f"Got message: {message}")
logger.info("Test succeeded!")
if __name__ == "__main__":
if sys.platform == 'win32':
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
print("Creating Rabbit Listener")
configuration = dict(
receive=dict(
incoming_1=dict(
exchange_name="test_rpc",
exchange_type="direct",
routing_key="fib_calc",
queue_name="fib_calc_q",
durable=True,
auto_delete=False,
prefetch_count=1
),
incoming_2=dict(
exchange_name="test_2",
exchange_type="direct",
routing_key="test_2",
queue_name="test_2",
durable=True,
auto_delete=False,
prefetch_count=1
)
),
publish=dict(
outgoing=dict(
exchange_name="test_server",
exchange_type="direct",
routing_key="fib_server",
durable=True,
auto_delete=False,
prefetch_count=1
)
)
)
# Using asyncio IO Loop
loop = asyncio.get_event_loop()
rabbit_connection_data = RabbitMQConnectionData(username="test_user",
password="<PASSWORD>",
virtual_host="vhost",
connection_name="executor")
rabbit_connection = AsyncAdapter(rabbitmq_connection_data=rabbit_connection_data,
configuration=configuration,
loop=loop)
loop.create_task(rabbit_connection.receive(handler=handle_message,
queue=configuration["receive"]["incoming_1"]["queue_name"]))
loop.create_task(rabbit_connection.receive(handler=handle_test,
queue=configuration["receive"]["incoming_2"]["queue_name"]))
loop.run_forever()
``` |
{
"source": "John-smith-889/difpy",
"score": 3
} |
#### File: difpy/difpy/initialize.py
```python
import difpy as dp
import networkx as nx
import numpy as np
import random
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import statistics as st
#=============================================================================#
# Function for create graph and initialize #
#==========================================#
def graph_init(n = 26, # number of nodes
k= 5, # number of single node neighbours before rewriting
# edges
rewire_prob = 0.1, # probability of node rewrite
initiation_perc = 0.1, # percent of randomly informed nodes
show_attr = True, # show node weights and attributes
draw_graph = True): # probability of rewrite edge
# in random place
""" Graph initialization with watts_strogatz_graph() function.
Create a graph with added weights as edges attributes, and the
following nodes attributes: extraversion, receptiveness, engagement.
The graph is ready to perform simulation in difpy package.
Parameters
----------
n : integer
Nodes number of the graph.
k : integer
number of single node neighbours before rewriting edges
rewire_prob : float
probability of rewrite edge in random place
initiation_perc : float
Percent of randomly aware nodes.
show_attr : bool, optional
Show list of wages and other generated attributes of nodes.
draw_graph : bool, optional
Draw graph.
Returns
-------
G : graph
A networkx graph object.
pos : dictionary with 2 element ndarrays as values
Object contains positions of nodes in the graph chart. Pos is used
to draw the graph after simulation step.
"""
#==============#
# Create graph #
#==============#
# Create basic watts-strogatz graph
G = nx.watts_strogatz_graph(n = n, k = k, p = rewire_prob, seed=None)
# Compute a position of graph elements
pos = nx.spring_layout(G)
#======================#
# Add weights to graph #
#======================#
# Weights - are probabilities of contact between nodes of given social
# network.
# Weights are randomly sampled from exponential distribution.
# Values have to be scaled so we cannot add it directly to graph,
# but after generation and scaling, and filling zeros with 0.000001
# for computation purposes
# Create ndarray of weights
weights = np.round(np.random.exponential(scale = 0.1,
size = G.number_of_edges()), 6).reshape(G.number_of_edges(),1)
# Scale weights to [0,1] range
scaler = MinMaxScaler()
scaler.fit(weights)
scaled_weights = scaler.transform(weights)
scaled_weights = np.round(scaled_weights, 6)
# eliminate zeros for computation purposes
for (x,y), i in np.ndenumerate(scaled_weights):
if i == 0:
scaled_weights[x,y] =0.000001
# Add weights to the graph
for i, (u, v) in enumerate(G.edges()):
G[u][v]['weight'] = scaled_weights[i,0]
#============================#
# Set node attribute - state #
#============================#
# "State" Variable levels:
# * Unaware - is actor who did not internalized the information and
# is not able to pass it down. Initially, all nodes are
# in state: Unaware.
# * Aware - is the actor who internalized the information and is able
# to pass it down.
nx.set_node_attributes(G, 'unaware', 'state') # (G, value, key)
#====================================#
# Set node attribute - receptiveness #
#====================================#
# Receptiveness - general parameter of each node, expressing how much
# in general the actor is receptive in context of given social network.
# Receptiveness is randomly sampled from normal distribution.
# Create ndarray of receptiveness
receptiveness = np.round(np.random.normal(
size = G.number_of_edges()), 6).reshape(G.number_of_edges(),1)
# Scale weights to [0,1] range
scaler = MinMaxScaler()
scaler.fit(receptiveness)
scaled_receptiveness = scaler.transform(receptiveness)
scaled_receptiveness = np.round(scaled_receptiveness, 6)
# eliminate zeros for computation purposes
for (x,y), i in np.ndenumerate(scaled_receptiveness):
if i == 0:
scaled_receptiveness[x,y] =0.000001
# Add receptiveness parameter to nodes
for v in G.nodes():
G.nodes[v]['receptiveness'] = scaled_receptiveness[v,0]
#===================================#
# Set node attribute - extraversion #
#===================================#
# Extraversion is agent eagerness to express itself to other agents
# Extraversion is randomly sampled from normal distribution.
# Create ndarray of extraversion
extraversion = np.round(np.random.normal(
size = G.number_of_edges()), 6).reshape(G.number_of_edges(),1)
# Scale weights to [0,1] range
scaler = MinMaxScaler()
scaler.fit(extraversion)
scaled_extraversion = scaler.transform(extraversion)
scaled_extraversion = np.round(scaled_extraversion, 6)
# eliminate zeros for computation purposes
for (x,y), i in np.ndenumerate(scaled_extraversion):
if i == 0:
scaled_extraversion[x,y] =0.000001
# Add receptiveness parameter to nodes
for v in G.nodes():
G.nodes[v]['extraversion'] = scaled_extraversion[v,0]
#=================================#
# Set node attribute - engagement #
#=================================#
# Engagement - engagement with the information related topic,
# strengthness of the experiences connected with information topic.
# How much the information is objectivly relevant for actor.
# Engagement is randomly sampled from exponential distribution.
# Create ndarray of engagement
engagement = np.round(np.random.exponential(
size = G.number_of_edges()), 6).reshape(G.number_of_edges(),1)
# Scale weights to [0,1] range
scaler = MinMaxScaler()
scaler.fit(engagement)
scaled_engagement = scaler.transform(engagement)
scaled_engagement = np.round(scaled_engagement, 6)
# eliminate zeros for computation purposes
for (x,y), i in np.ndenumerate(scaled_engagement):
if i == 0:
scaled_engagement[x,y] =0.000001
# Add receptiveness parameter to nodes
for v in G.nodes():
G.nodes[v]['engagement'] = scaled_engagement[v,0]
#===================#
# Random initiation #
#===================#
# Compute number of nodes
N = G.number_of_nodes()
# Return list of numbers of randomly aware agents
infected_agents_id = random.sample(population = range(0,N),
k = int(N * initiation_perc))
# Set those nodes as aware
for v in infected_agents_id:
G.nodes[v]['state'] = 'aware'
#=======================#
# Show nodes attributes #
#=======================#
if show_attr == True:
print("Node attributes:")
for (u, v) in G.nodes.data():
print(u, v)
# Check how scaled weights looks like
x = list(range(len(scaled_weights)))
scaled_weights = np.sort(scaled_weights, axis = 0)
# show numbered values
dict_0 = dict(zip(x,scaled_weights))
print("Wages:")
for u, v in dict_0.items():
print(u, v)
#============#
# Draw graph #
#============#
if draw_graph == True:
dp.draw_graph(G = G, pos = pos)
# draw_colored_graph_2
return G, pos
#=============================================================================#
# Function for drawing the graph #
#================================#
def draw_graph(G, # graph
pos, # position of nodes
aware_color = '#f63f89',
not_aware_color = '#58f258',
legend = True):
""" Draw the graph G using Matplotlib and NetworkX.
Draw the graph with Matplotlib and NetworkX with two colors associated
with 2 types of agents - aware of certain information, and unaware one.
Legend describing nodes is optional.
Parameters
----------
G : graph
A networkx graph
pos : dictionary with 2 element ndarrays as values
Object contains positions of nodes in the graph chart. Pos is used
to draw the graph after simulation step.
aware_color : string
Specify the color of nodes aware of certain information.
not_aware_color : string
Specify the color of nodes unaware of certain information.
legend : bool, optional
Add legend to the graph which describes colored nodes.
"""
# Create variables for store nodes numbers
color_map_1 = []
color_map_2 = []
# Create list of nodes numbers which are 'aware'
awarelist = [i for i, d in G.nodes.data() if d['state'] == 'aware' ]
# Create list of nodes numbers which are not 'aware'
notawarelist = [i for i in range(len(G.nodes.data())) if i not in awarelist]
# Append strings about colors to color_map lists
for node in G:
if node in awarelist:
color_map_1.append(aware_color) # aware
else: color_map_2.append(not_aware_color) # not aware
# Draw the graph
plt.title("Graph")
nx.draw_networkx_nodes(G,pos = pos, nodelist = awarelist,
node_color = color_map_1, with_labels = True,
label='Aware agent', alpha = 0.7)
nx.draw_networkx_nodes(G,pos = pos, nodelist = notawarelist,
node_color = color_map_2, with_labels = True,
label='Not aware agent', alpha = 0.7)
nx.draw_networkx_labels(G, pos = pos, font_size=12, font_color='k',
font_family='sans-serif', font_weight='normal',
alpha=1.0)
nx.draw_networkx_edges(G,pos=pos)
# optional legend
if legend == True:
plt.legend(numpoints = 1)
#=============================================================================#
# Function for graph review #
#===========================#
def graph_stats(G, pos, draw_degree = True, show_attr = True,
draw_graph = True):
"""
Function for checking basic graph statistics, node attributes and
wages.
Parameters
----------
G : graph
A networkx graph object.
pos : dictionary with 2 element ndarrays as values
Object contains positions of nodes in the graph chart. Pos is used
to draw the graph after simulation step.
show_attr : bool
Show nodes attributes and weights.
draw_degree : bool
Draw nodes degree distribution.
draw_graph : bool
Draw graph.
Returns
-------
dict_stat : dictionary
A dictionary with graph statistics.
"""
#===============================#
# Compute basic graph satistics #
#===============================#
nodes = len(G.nodes())
edges = len(G.edges())
mean_degree = st.mean([v for k,v in nx.degree(G)])
avg_clustering_coef = nx.average_clustering(G, nodes=None,
weight=None,
count_zeros=True)
avg_clustering_coef = round(avg_clustering_coef, 4)
# https://en.wikipedia.org/wiki/Clustering_coefficient
# https://networkx.github.io/documentation/stable/reference/
# algorithms/generated/networkx.algorithms.cluster.average_
# clustering.html#networkx.algorithms.cluster.average_clustering
# average of local clustering coefficients (for each node)
transitivity = nx.transitivity(G) # fraction of all possible triangles
transitivity = round(transitivity, 4)
global dict_stat
dict_stat = {'nodes': nodes,
'edges': edges,
'mean node degree': mean_degree,
'average clustering coefficient': avg_clustering_coef,
'transitivity': transitivity}
print('\n' + "General information:" + '\n')
for k,v in dict_stat.items():
print(k,': ', v)
#========================#
# Show nodes' attributes #
#========================#
if show_attr == True:
print('\n' + "Node attributes:" + '\n')
for (u, v) in G.nodes.data():
print(u, v)
print('\n' + "Sorted weights:" + '\n')
#global wages_list
#wages_list = []
for i,(u, v, wt) in enumerate(sorted(G.edges.data('weight'), key = lambda x: x[2])):
print(i, wt)
#wages_list.append((i, wt))
#==========================#
# Degree distribution plot #
#==========================#
if draw_degree == True:
# degree distribution
degree_distribution = sorted([v for k,v in nx.degree(G)], reverse = True)
x = range(len(degree_distribution))
fig_01, ax_01 = plt.subplots() # enable to plot one by one
plt.scatter(x, degree_distribution, marker='o', c= 'blue', alpha=0.5)
plt.ylabel('Node degree');
plt.xlabel('Node number');
plt.suptitle('Nodes degree distribution', fontsize=16)
#============#
# Draw graph #
#============#
if draw_graph == True:
fig_01, ax_01 = plt.subplots() # enable to plot one by one
# in separate windows
dp.draw_graph(G = G, pos = pos)
#=============================================================================#
# Function for adding feature to a graph #
#========================================#
def add_feature(G,
pos,
feature = None,
feature_type = None,
scaling = True,
decimals = 6,
show_attr = True, # show node weights and attributes
show_weights = True,
draw_graph = False):
""" Add feature to the graph.
Function dedicated for adding existing feature to the graph
with optional feature scaling.
Parameters
----------
G : graph
A networkx graph object.
pos : dictionary with 2 element ndarrays as values
Object contains positions of nodes in the graph chart. Pos is used
to draw the graph after simulation step.
feature : ndarray
ndarray in shape (<number of nodes/edges>, 1).
feature_type : string
Levels: "weights", "receptiveness", "extraversion", "engagement",
"state", or custom ones which may be used for measuring
feature importance in information propagation during
modelling.
scaling : bool, optional
Scale weights to (0,1] range.
decimals : integer, optional
Number of decimal digits due to rounding weights.
show_attr : bool, optional
Show list of wages and other generated attributes of nodes.
draw_graph : bool, optional
Draw graph.
Returns
-------
G : graph
A networkx graph object.
"""
# Values may be scaled so we cannot add it directly to graph,
# but after generation and scaling, and filling zeros with 0.000001
# for computation purposes
# Only for numeric variables
if scaling == True:
# Scale weights to [0,1] range
scaler = MinMaxScaler()
scaler.fit(feature)
feature = scaler.transform(feature)
feature = np.round(feature, decimals)
# eliminate zeros for computation purposes
for (x,y), i in np.ndenumerate(feature):
if i == 0:
feature[x,y] =0.000001
#======================#
# Add weights to graph #
#======================#
# Weights - are probabilities of contact between nodes of given social
# network.
if feature_type == "weights":
# Add weights to the graph
for i, (u, v) in enumerate(G.edges()):
G[u][v]['weight'] = feature[i,0]
#====================================#
# Set node attribute - receptiveness #
#====================================#
# Receptiveness - general parameter of each node, expressing how much
# in general the actor is receptive in context of given social network.
if feature_type == "receptiveness":
# Add receptiveness parameter to nodes
for v in G.nodes():
G.nodes[v]['receptiveness'] = feature[v,0]
#===================================#
# Set node attribute - extraversion #
#===================================#
# Extraversion is agent eagerness to express itself to other agents.
if feature_type == "extraversion":
# Add extraversion parameter to nodes
for v in G.nodes():
G.nodes[v]['extraversion'] = feature[v,0]
#=================================#
# Set node attribute - engagement #
#=================================#
# Engagement - engagement with the information related topic,
# strengthness of the experiences connected with information topic.
# How much the information is objectivly relevant for actor.
if feature_type == "engagement":
# Add engagement parameter to nodes
for v in G.nodes():
G.nodes[v]['engagement'] = feature[v,0]
#============================#
# Set node attribute - state #
#============================#
# "State" Variable levels:
# * Unaware - is actor who did not internalized the information and
# is not able to pass it down.
# * Aware - is the actor who internalized the information and is able
# to pass it down.
if feature_type == "state":
# Add engagement parameter to nodes
for v in G.nodes():
G.nodes[v]['state'] = feature[v,0]
#=======================================#
# Set node attribute - custom parameter #
#=======================================#
if feature_type not in ["weights", "receptiveness", "extraversion",
"engagement", "state"]:
# Add parameter to nodes
for v in G.nodes():
G.nodes[v][feature_type] = feature[v,0]
#========================#
# Show nodes' attributes #
#========================#
if show_attr == True:
print('\n' + "Nodes' attributes:" + '\n')
for (u, v) in G.nodes.data():
print(u, v)
#========================#
# Show nodes' attributes #
#========================#
if show_weights == True:
# Show weights
print('\n' + "Sorted weights:" + '\n')
for i,(u, v, wt) in enumerate(sorted(G.edges.data('weight'),
key = lambda x: x[2])):
print(i, wt)
#============#
# Draw graph #
#============#
if draw_graph == True:
dp.draw_graph(G = G, pos = pos)
return G
#=============================================================================#
# Function for adding random state to graph #
#===========================================#
def add_state_random(G, pos, initiation_perc, show_attr = True,
draw_graph = True):
""" Add state variable values to the graph's nodes.
State is the variable which describe state of node - if it is aware
of some information or not.
Parameters
----------
G : graph
A networkx graph object.
pos : dictionary with 2 element ndarrays as values
Object contains positions of nodes in the graph chart. Pos is used
to draw the graph after simulation step.
initiation_perc : float
Percent of randomly aware nodes.
show_attr : bool, optional
Show list of wages and other generated attributes of nodes.
draw_graph : bool, optional
Draw graph.
Returns
-------
G : graph
A networkx graph object.
"""
#===================#
# Random initiation #
#===================#
# Add 'unaware' state for all nodes
nx.set_node_attributes(G, 'unaware', 'state') # (G, value, key)
# Compute number of nodes
N = G.number_of_nodes()
# Return list of numbers of randomly aware agents
infected_agents_id = random.sample(population = range(0,N),
k = int(N * initiation_perc))
# Set those nodes as aware
for v in infected_agents_id:
G.nodes[v]['state'] = 'aware'
#========================#
# Show nodes' attributes #
#========================#
if show_attr == True:
print("Node attributes:")
for (u, v) in G.nodes.data():
print(u, v)
#============#
# Draw graph #
#============#
if draw_graph == True:
fig_01, ax_01 = plt.subplots() # enable to plot one by one
# in separate windows
dp.draw_graph(G = G, pos = pos)
``` |
{
"source": "JohnSmith-Eins/door_guard_suite_with_face_recognition",
"score": 3
} |
#### File: JohnSmith-Eins/door_guard_suite_with_face_recognition/core(without_parameters).py
```python
import face_recognition
import cv2
import numpy as np
import paramiko
import RPi.GPIO as GPIO
import time
from datetime import datetime
import _thread
#import结束
#初始化参数
#GPIO针脚
Pin = 3
#设置从开门到锁门的间隔时间
stime = 10
#设置云服务器地址
Host = ''
#设置端口
Port = 22
#设置登陆用户名
uname = ''
#设置登陆密码
pwd = ''
#设置照片实例
#实例为奥巴马
obama_image = face_recognition.load_image_file("obama.jpg")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
#参数初始化完成
#初始化
#初始化GPIO引脚电平为低
GPIO.setmode(GPIO.BOARD)
GPIO.setup(Pin, GPIO.OUT, initial = GPIO.HIGH)
#定义GPIO变化函数,方便后续多线程处理
def DoorOpenwithGPIO( ):
#发现已知人脸启动开门动作,stime秒后恢复关门状态
GPIO.output(Pin,GPIO.LOW)
time.sleep(stime)
GPIO.output(Pin,GPIO.HIGH)
#初始化sftp连接,为上传图片做准备
trans = paramiko.Transport(Host, Port)
trans.start_client()
trans.auth_password(username = uname, password = <PASSWORD>)
sftp = paramiko.SFTPClient.from_transport(trans)
#读取摄像头数据
video_capture = cv2.VideoCapture(0)
#导入已知人脸列表
known_face_encodings = [
obama_face_encoding
]
known_face_names = [
"<NAME>"
]
#初始化人脸识别部分的变量
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
#初始化结束
#人脸识别核心代码(英文注释来自项目face_recognition)
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
#同时将陌生人的脸以"年月日_时分秒.jpg"输出,并上传到服务器
outputfilename = datetime.now().strftime("%Y%m%d_%H%M%S") + '.jpg'
cv2.imwrite(outputfilename, small_frame)
outputfilepath = '/home/hgf/human/' + outputfilename
sftp.put(outputfilename, outputfilepath)
# # If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
_thread.start_new_thread( DoorOpenwithGPIO, () )
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#核心代码部分结束
#资源释放
#还原GPIO数据
GPIO.cleanup()
#关闭sftp连接
trans.close()
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
#资源释放结束
``` |
{
"source": "JohnSmith-Eins/GoBang_With_AI",
"score": 3
} |
#### File: JohnSmith-Eins/GoBang_With_AI/aiGobang.py
```python
import random
from itertools import product
class aiGobang():
def __init__(self, ai_color, player_color, search_depth=1, **kwargs):
assert search_depth % 2, 'search_depth must be odd number'
self.ai_color = ai_color
self.player_color = player_color
self.search_depth = search_depth
self.score_model = [(50, (0, 1, 1, 0, 0)), (50, (0, 0, 1, 1, 0)), (200, (1, 1, 0, 1, 0)),
(500, (0, 0, 1, 1, 1)), (500, (1, 1, 1, 0, 0)), (5000, (0, 1, 1, 1, 0)),
(5000, (0, 1, 0, 1, 1, 0)), (5000, (0, 1, 1, 0, 1, 0)), (5000, (1, 1, 1, 0, 1)),
(5000, (1, 1, 0, 1, 1)), (5000, (1, 0, 1, 1, 1)), (5000, (1, 1, 1, 1, 0)),
(5000, (0, 1, 1, 1, 1)), (50000, (0, 1, 1, 1, 1, 0)), (99999999, (1, 1, 1, 1, 1))]
self.alpha = -99999999
self.beta = 99999999
self.all_list = [(i, j) for i, j in product(range(19), range(19))]
'''外部调用'''
def act(self, history_record):
self.ai_list = []
self.player_list = []
self.aiplayer_list = []
for item in history_record:
self.aiplayer_list.append((item[0], item[1]))
if item[-1] == self.ai_color:
self.ai_list.append((item[0], item[1]))
elif item[-1] == self.player_color:
self.player_list.append((item[0], item[1]))
while True:
self.next_point = random.choice(range(19)), random.choice(range(19))
if self.next_point not in self.aiplayer_list:
break
self.__doSearch(True, self.search_depth, self.alpha, self.beta)
return self.next_point
'''负极大值搜索, alpha+beta剪枝'''
def __doSearch(self, is_ai_round, depth, alpha, beta):
if self.__isgameover(self.ai_list) or self.__isgameover(self.player_list) or depth == 0:
return self.__evaluation(is_ai_round)
blank_list = list(set(self.all_list).difference(set(self.aiplayer_list)))
blank_list = self.__rearrange(blank_list)
for next_step in blank_list:
if not self.__hasNeighbor(next_step):
continue
if is_ai_round:
self.ai_list.append(next_step)
else:
self.player_list.append(next_step)
self.aiplayer_list.append(next_step)
value = -self.__doSearch(not is_ai_round, depth-1, -beta, -alpha)
if is_ai_round:
self.ai_list.remove(next_step)
else:
self.player_list.remove(next_step)
self.aiplayer_list.remove(next_step)
if value > alpha:
if depth == self.search_depth:
self.next_point = next_step
if value >= beta:
return beta
alpha = value
return alpha
'''游戏是否结束了'''
def __isgameover(self, oneslist):
for i, j in product(range(19), range(19)):
if i < 15 and (i, j) in oneslist and (i+1, j) in oneslist and (i+2, j) in oneslist and (i+3, j) in oneslist and (i+4, j) in oneslist:
return True
elif j < 15 and (i, j) in oneslist and (i, j+1) in oneslist and (i, j+2) in oneslist and (i, j+3) in oneslist and (i, j+4) in oneslist:
return True
elif i < 15 and j < 15 and (i, j) in oneslist and (i+1, j+1) in oneslist and (i+2, j+2) in oneslist and (i+3, j+3) in oneslist and (i+4, j+4) in oneslist:
return True
elif i > 3 and j < 15 and (i, j) in oneslist and (i-1, j+1) in oneslist and (i-2, j+2) in oneslist and (i-3, j+3) in oneslist and (i-4, j+4) in oneslist:
return True
return False
'''重新排列未落子位置'''
def __rearrange(self, blank_list):
last_step = self.aiplayer_list[-1]
for item in blank_list:
for i, j in product(range(-1, 2), range(-1, 2)):
if i == 0 and j == 0:
continue
next_step = (last_step[0]+i, last_step[1]+j)
if next_step in blank_list:
blank_list.remove(next_step)
blank_list.insert(0, next_step)
return blank_list
'''是否存在近邻'''
def __hasNeighbor(self, next_step):
for i, j in product(range(-1, 2), range(-1, 2)):
if i == 0 and j == 0:
continue
if (next_step[0]+i, next_step[1]+j) in self.aiplayer_list:
return True
return False
'''计算得分'''
def __calcScore(self, i, j, x_direction, y_direction, list1, list2, all_scores):
add_score = 0
max_score = (0, None)
for each in all_scores:
for item in each[1]:
if i == item[0] and j == item[1] and x_direction == each[2][0] and y_direction == each[2][1]:
return 0, all_scores
for noffset in range(-5, 1):
position = []
for poffset in range(6):
x, y = i + (poffset + noffset) * x_direction, j + (poffset + noffset) * y_direction
if (x, y) in list2:
position.append(2)
elif (x, y) in list1:
position.append(1)
else:
position.append(0)
shape_len5 = tuple(position[0: -1])
shape_len6 = tuple(position)
for score, shape in self.score_model:
if shape_len5 == shape or shape_len6 == shape:
if score > max_score[0]:
max_score = (score, ((i + (0 + noffset) * x_direction, j + (0 + noffset) * y_direction),
(i + (1 + noffset) * x_direction, j + (1 + noffset) * y_direction),
(i + (2 + noffset) * x_direction, j + (2 + noffset) * y_direction),
(i + (3 + noffset) * x_direction, j + (3 + noffset) * y_direction),
(i + (4 + noffset) * x_direction, j + (4 + noffset) * y_direction)), (x_direction, y_direction))
if max_score[1] is not None:
for each in all_scores:
for p1 in each[1]:
for p2 in max_score[1]:
if p1 == p2 and max_score[0] > 10 and each[0] > 10:
add_score += max_score[0] + each[0]
all_scores.append(max_score)
return add_score+max_score[0], all_scores
'''评估函数'''
def __evaluation(self, is_ai_round):
if is_ai_round:
list1 = self.ai_list
list2 = self.player_list
else:
list2 = self.ai_list
list1 = self.player_list
active_all_scores = []
active_score = 0
for item in list1:
score, active_all_scores = self.__calcScore(item[0], item[1], 0, 1, list1, list2, active_all_scores)
active_score += score
score, active_all_scores = self.__calcScore(item[0], item[1], 1, 0, list1, list2, active_all_scores)
active_score += score
score, active_all_scores = self.__calcScore(item[0], item[1], 1, 1, list1, list2, active_all_scores)
active_score += score
score, active_all_scores = self.__calcScore(item[0], item[1], -1, 1, list1, list2, active_all_scores)
active_score += score
passive_all_scores = []
passive_score = 0
for item in list2:
score, passive_all_scores = self.__calcScore(item[0], item[1], 0, 1, list2, list1, passive_all_scores)
passive_score += score
score, passive_all_scores = self.__calcScore(item[0], item[1], 1, 0, list2, list1, passive_all_scores)
passive_score += score
score, passive_all_scores = self.__calcScore(item[0], item[1], 1, 1, list2, list1, passive_all_scores)
passive_score += score
score, passive_all_scores = self.__calcScore(item[0], item[1], -1, 1, list2, list1, passive_all_scores)
passive_score += score
total_score = active_score - passive_score * 0.1
return total_score
``` |
{
"source": "johnsmithm/handwritten-sequence-tensorflow",
"score": 3
} |
#### File: johnsmithm/handwritten-sequence-tensorflow/util.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def makeTFRecordFile(s):
"""Create TFRecord files
@param s: s[i][0] - [36,90] array, the image, float32,
s[i][2] - the length of the image with handwritting data, <90 , int32
s[i][1][0] - [?] array of int32, reprezenting the text in the image, vocabulary length = 29 (small letters)
save the file: trainer/data/handwritten-test-0.tfrecords
"""
# iterate over each example
# wrap with tqdm for a progress bar
writer = tf.python_io.TFRecordWriter("trainer/data/handwritten-test-{}.tfrecords".format(0))
for ii in range(len(s[1])):
# construct the Example proto boject
example = tf.train.Example(
# Example contains a Features proto object
features=tf.train.Features(
# Features contains a map of string to Feature proto objects
feature={
# A Feature contains one of either a int64_list,
# float_list, or bytes_list
'seq_len': tf.train.Feature(
int64_list=tf.train.Int64List(value=[s[2][ii]])),
'target': tf.train.Feature(
int64_list=tf.train.Int64List(value=s[1][ii][0].astype("int64"))),
'imageInput': tf.train.Feature(
float_list=tf.train.FloatList(value=(s[0][ii]-0.5).reshape(-1).astype("float"))),
}))
# use the proto object to serialize the example to a string
serialized = example.SerializeToString()
# write the serialized object to disk
writer.write(serialized)
writer.close()
#videoInputs, seq_len , target
``` |
{
"source": "john-s-morgan/electionguard-python",
"score": 3
} |
#### File: src/electionguard/ballot_code.py
```python
from .hash import hash_elems
from .group import ElementModQ
def get_hash_for_device(
device_id: int, session_id: int, launch_code: int, location: str
) -> ElementModQ:
"""
Get starting hash for given device.
:param device_id: Unique identifier of device
:param session_id: Unique identifier for the session
:param launch_code: A unique launch code for the election
:param location: Location of device
:return: Starting hash of device
"""
return hash_elems(device_id, session_id, launch_code, location)
def get_ballot_code(
prev_code: ElementModQ, timestamp: int, ballot_hash: ElementModQ
) -> ElementModQ:
"""
Get the rotated code for a particular ballot.
:param prev_code: Previous code or starting hash from device
:param timestamp: Timestamp in ticks
:param ballot_hash: Hash of ballot
:return: code
"""
return hash_elems(prev_code, timestamp, ballot_hash)
```
#### File: src/electionguard/election.py
```python
from dataclasses import dataclass
from .group import (
Q,
P,
R,
G,
ElementModQ,
ElementModP,
int_to_p_unchecked,
int_to_q_unchecked,
)
from .hash import hash_elems
from .serializable import Serializable
@dataclass(eq=True, unsafe_hash=True)
class ElectionConstants(Serializable):
"""
The constants for mathematical functions during the election.
"""
large_prime = int_to_p_unchecked(P)
"""large prime or p"""
small_prime = int_to_q_unchecked(Q)
"""small prime or q"""
cofactor = int_to_p_unchecked(R)
"""cofactor or r"""
generator = int_to_p_unchecked(G)
"""generator or g"""
@dataclass(eq=True, unsafe_hash=True)
class CiphertextElectionContext(Serializable):
"""
`CiphertextElectionContext` is the ElectionGuard representation of a specific election
Note: The ElectionGuard Data Spec deviates from the NIST model in that
this object includes fields that are populated in the course of encrypting an election
Specifically, `crypto_base_hash`, `crypto_extended_base_hash` and `elgamal_public_key`
are populated with election-specific information necessary for encrypting the election.
Refer to the [Electionguard Specification](https://github.com/microsoft/electionguard) for more information.
To make an instance of this class, don't construct it directly. Use
`make_ciphertext_election_context` instead.
"""
number_of_guardians: int
"""
The number of guardians necessary to generate the public key
"""
quorum: int
"""
The quorum of guardians necessary to decrypt an election. Must be less than `number_of_guardians`
"""
elgamal_public_key: ElementModP
"""the `joint public key (K)` in the [ElectionGuard Spec](https://github.com/microsoft/electionguard/wiki)"""
commitment_hash: ElementModQ
"""
the `commitment hash H(K 1,0 , K 2,0 ... , K n,0 )` of the public commitments
guardians make to each other in the [ElectionGuard Spec](https://github.com/microsoft/electionguard/wiki)
"""
manifest_hash: ElementModQ
"""The hash of the election metadata"""
crypto_base_hash: ElementModQ
"""The `base hash code (𝑄)` in the [ElectionGuard Spec](https://github.com/microsoft/electionguard/wiki)"""
crypto_extended_base_hash: ElementModQ
"""The `extended base hash code (𝑄')` in [ElectionGuard Spec](https://github.com/microsoft/electionguard/wiki)"""
def make_ciphertext_election_context(
number_of_guardians: int,
quorum: int,
elgamal_public_key: ElementModP,
commitment_hash: ElementModQ,
manifest_hash: ElementModQ,
) -> CiphertextElectionContext:
"""
Makes a CiphertextElectionContext object.
:param number_of_guardians: The number of guardians necessary to generate the public key
:param quorum: The quorum of guardians necessary to decrypt an election. Must be less than `number_of_guardians`
:param elgamal_public_key: the public key of the election
:param commitment_hash: the hash of the commitments the guardians make to each other
:param manifest_hash: the hash of the election metadata
"""
# What's a crypto_base_hash?
# The metadata of this object are hashed together with the
# - prime modulus (𝑝),
# - subgroup order (𝑞),
# - generator (𝑔),
# - number of guardians (𝑛),
# - decryption threshold value (𝑘),
# to form a base hash code (𝑄) which will be incorporated
# into every subsequent hash computation in the election.
# What's a crypto_extended_base_hash?
# Once the baseline parameters have been produced and confirmed,
# all of the public guardian commitments 𝐾𝑖,𝑗 are hashed together
# with the base hash 𝑄 to form an extended base hash 𝑄' that will
# form the basis of subsequent hash computations.
crypto_base_hash = hash_elems(
int_to_p_unchecked(P),
int_to_q_unchecked(Q),
int_to_p_unchecked(G),
number_of_guardians,
quorum,
manifest_hash,
)
crypto_extended_base_hash = hash_elems(crypto_base_hash, commitment_hash)
return CiphertextElectionContext(
number_of_guardians=number_of_guardians,
quorum=quorum,
elgamal_public_key=elgamal_public_key,
commitment_hash=commitment_hash,
manifest_hash=manifest_hash,
crypto_base_hash=crypto_base_hash,
crypto_extended_base_hash=crypto_extended_base_hash,
)
```
#### File: tests/unit/test_manifest.py
```python
import unittest
from datetime import datetime
from electionguard.manifest import (
ContestDescriptionWithPlaceholders,
Manifest,
InternalManifest,
SelectionDescription,
VoteVariationType,
)
from electionguard.serializable import read_json
import electionguardtest.election_factory as ElectionFactory
import electionguardtest.ballot_factory as BallotFactory
election_factory = ElectionFactory.ElectionFactory()
ballot_factory = BallotFactory.BallotFactory()
class TestManifest(unittest.TestCase):
"""Manifest tests"""
def test_simple_manifest_is_valid(self):
# Act
subject = election_factory.get_simple_manifest_from_file()
# Assert
self.assertIsNotNone(subject.election_scope_id)
self.assertEqual(subject.election_scope_id, "jefferson-county-primary")
self.assertTrue(subject.is_valid())
def test_simple_manifest_can_serialize(self):
# Arrange
subject = election_factory.get_simple_manifest_from_file()
intermediate = subject.to_json()
# Act
result = Manifest.from_json(intermediate)
# Assert
self.assertIsNotNone(result.election_scope_id)
self.assertEqual(result.election_scope_id, "jefferson-county-primary")
def test_manifest_has_deterministic_hash(self):
# Act
subject1 = election_factory.get_simple_manifest_from_file()
subject2 = election_factory.get_simple_manifest_from_file()
# Assert
self.assertEqual(subject1.crypto_hash(), subject2.crypto_hash())
def test_manifest_hash_is_consistent_regardless_of_format(self):
# Act
subject1 = election_factory.get_simple_manifest_from_file()
subject1.start_date = read_json('"2020-03-01T08:00:00-05:00"', datetime)
subject2 = election_factory.get_simple_manifest_from_file()
subject2.start_date = read_json('"2020-03-01T13:00:00-00:00"', datetime)
subject3 = election_factory.get_simple_manifest_from_file()
subject3.start_date = read_json('"2020-03-01T13:00:00.000-00:00"', datetime)
subjects = [subject1, subject2, subject3]
# Assert
hashes = [subject.crypto_hash() for subject in subjects]
for other_hash in hashes[1:]:
self.assertEqual(hashes[0], other_hash)
def test_manifest_from_file_generates_consistent_internal_description_contest_hashes(
self,
):
# Arrange
comparator = election_factory.get_simple_manifest_from_file()
subject = InternalManifest(comparator)
self.assertEqual(len(comparator.contests), len(subject.contests))
for expected in comparator.contests:
for actual in subject.contests:
if expected.object_id == actual.object_id:
self.assertEqual(expected.crypto_hash(), actual.crypto_hash())
def test_contest_description_valid_input_succeeds(self):
description = ContestDescriptionWithPlaceholders(
object_id="<EMAIL>-contest",
electoral_district_id="<EMAIL>-gp-unit",
sequence_order=1,
vote_variation=VoteVariationType.n_of_m,
number_elected=1,
votes_allowed=1,
name="",
ballot_selections=[
SelectionDescription(
object_id="<EMAIL>-selection",
candidate_id="<EMAIL>",
sequence_order=0,
),
SelectionDescription(
object_id="<EMAIL>-selection",
candidate_id="<EMAIL>",
sequence_order=1,
),
],
ballot_title=None,
ballot_subtitle=None,
placeholder_selections=[
SelectionDescription(
object_id="<EMAIL>-contest-2-placeholder",
candidate_id="<EMAIL>-contest-2-candidate",
sequence_order=2,
)
],
)
self.assertTrue(description.is_valid())
def test_contest_description_invalid_input_fails(self):
description = ContestDescriptionWithPlaceholders(
object_id="<EMAIL>-contest",
electoral_district_id="<EMAIL>-gp-unit",
sequence_order=1,
vote_variation=VoteVariationType.n_of_m,
number_elected=1,
votes_allowed=1,
name="",
ballot_selections=[
SelectionDescription(
object_id="<EMAIL>-selection",
candidate_id="<EMAIL>",
sequence_order=0,
),
# simulate a bad selection description input
SelectionDescription(
object_id="<EMAIL>-selection",
candidate_id="<EMAIL>",
sequence_order=1,
),
],
ballot_title=None,
ballot_subtitle=None,
placeholder_selections=[
SelectionDescription(
object_id="<EMAIL>-contest-2-placeholder",
candidate_id="<EMAIL>-contest-2-candidate",
sequence_order=2,
)
],
)
self.assertFalse(description.is_valid())
if __name__ == "__main__":
unittest.main()
```
#### File: tests/unit/test_publish.py
```python
from datetime import datetime, timezone
from os import path
from shutil import rmtree
from unittest import TestCase
from electionguard.ballot import (
PlaintextBallot,
make_ciphertext_ballot,
)
from electionguard.election import ElectionConstants, make_ciphertext_election_context
from electionguard.group import ONE_MOD_Q, ONE_MOD_P, int_to_q_unchecked
from electionguard.guardian import GuardianRecord
from electionguard.manifest import ElectionType, Manifest
from electionguard.publish import publish, publish_private_data, RESULTS_DIR
from electionguard.tally import (
CiphertextTally,
PlaintextTally,
)
class TestPublish(TestCase):
"""Publishing tests"""
def test_publish(self) -> None:
# Arrange
now = datetime.now(timezone.utc)
manifest = Manifest("", ElectionType.unknown, now, now, [], [], [], [], [], [])
context = make_ciphertext_election_context(
1, 1, ONE_MOD_P, ONE_MOD_Q, ONE_MOD_Q
)
constants = ElectionConstants()
devices = []
guardian_records = [GuardianRecord("", "", ONE_MOD_Q, [], [])]
encrypted_ballots = []
spoiled_ballots = []
plaintext_tally = PlaintextTally("", [])
ciphertext_tally = CiphertextTally("", manifest, context)
# Act
publish(
manifest,
context,
constants,
devices,
encrypted_ballots,
spoiled_ballots,
ciphertext_tally.publish(),
plaintext_tally,
guardian_records,
)
# Assert
self.assertTrue(path.exists(RESULTS_DIR))
# Cleanup
rmtree(RESULTS_DIR)
def test_publish_private_data(self) -> None:
# Arrange
plaintext_ballots = [PlaintextBallot("", "", [])]
encrypted_ballots = [
make_ciphertext_ballot(
"", "", int_to_q_unchecked(0), int_to_q_unchecked(0), []
)
]
guardian_records = [GuardianRecord("", "", ONE_MOD_Q, [], [])]
# Act
publish_private_data(
plaintext_ballots,
encrypted_ballots,
guardian_records,
)
# Assert
self.assertTrue(path.exists(RESULTS_DIR))
# Cleanup
rmtree(RESULTS_DIR)
```
#### File: tests/unit/test_rsa.py
```python
from unittest import TestCase
from electionguard.rsa import rsa_decrypt, rsa_encrypt, rsa_keypair
class TestRSA(TestCase):
"""RSA encryption tests"""
def test_rsa_encrypt(self) -> None:
# Arrange
message = (
"9893e1c926521dc595d501056d03c4387b87986089539349"
"bed6eb1018229b2e0029dd38647bfc80746726b3710c8ac3f"
"69187da2234b438370a4348a784791813b9857446eb14afc67"
"6eece5b789a207bcf633ba1676d3410913ae46dd247166c6a682cb0ccc5ecde53"
)
# Act
key_pair = rsa_keypair()
encrypted_message = rsa_encrypt(message, key_pair.public_key)
decrypted_message = rsa_decrypt(encrypted_message, key_pair.private_key)
# Assert
self.assertIsNotNone(key_pair)
self.assertGreater(len(key_pair.private_key), 0)
self.assertGreater(len(key_pair.public_key), 0)
self.assertIsNotNone(encrypted_message)
self.assertIsNotNone(decrypted_message)
self.assertEqual(message, decrypted_message)
``` |
{
"source": "johnsoft/mitmproxy",
"score": 2
} |
#### File: contentviews/image/image_parser.py
```python
import io
import typing
from kaitaistruct import KaitaiStream
from mitmproxy.contrib.kaitaistruct import png
Metadata = typing.List[typing.Tuple[str, str]]
def parse_png(data: bytes) -> Metadata:
img = png.Png(KaitaiStream(io.BytesIO(data)))
parts = [
('Format', 'Portable network graphics')
]
parts.append(('Size', "{0} x {1} px".format(img.ihdr.width, img.ihdr.height)))
for chunk in img.chunks:
if chunk.type == 'gAMA':
parts.append(('gamma', str(chunk.body.gamma_int / 100000)))
elif chunk.type == 'pHYs':
aspectx = chunk.body.pixels_per_unit_x
aspecty = chunk.body.pixels_per_unit_y
parts.append(('aspect', "{0} x {1}".format(aspectx, aspecty)))
elif chunk.type == 'tEXt':
parts.append((chunk.body.keyword, chunk.body.text))
elif chunk.type == 'iTXt':
parts.append((chunk.body.keyword, chunk.body.text))
elif chunk.type == 'zTXt':
parts.append((chunk.body.keyword, chunk.body.text_datastream.decode('iso8859-1')))
return parts
```
#### File: mitmproxy/test/conftest.py
```python
import os
import pytest
import OpenSSL
import functools
import mitmproxy.net.tcp
requires_alpn = pytest.mark.skipif(
not mitmproxy.net.tcp.HAS_ALPN,
reason='requires OpenSSL with ALPN support')
skip_windows = pytest.mark.skipif(
os.name == "nt",
reason='Skipping due to Windows'
)
skip_not_windows = pytest.mark.skipif(
os.name != "nt",
reason='Skipping due to not Windows'
)
skip_appveyor = pytest.mark.skipif(
"APPVEYOR" in os.environ,
reason='Skipping due to Appveyor'
)
original_pytest_raises = pytest.raises
@functools.wraps(original_pytest_raises)
def raises(exc, *args, **kwargs):
if isinstance(exc, str):
return RaisesContext(exc)
else:
return original_pytest_raises(exc, *args, **kwargs)
pytest.raises = raises
class RaisesContext:
def __init__(self, expected_exception):
self.expected_exception = expected_exception
def __enter__(self):
return
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_type:
raise AssertionError("No exception raised.")
else:
if self.expected_exception.lower() not in str(exc_val).lower():
raise AssertionError(
"Expected %s, but caught %s" % (repr(self.expected_exception), repr(exc_val))
)
return True
@pytest.fixture()
def disable_alpn(monkeypatch):
monkeypatch.setattr(mitmproxy.net.tcp, 'HAS_ALPN', False)
monkeypatch.setattr(OpenSSL.SSL._lib, 'Cryptography_HAS_ALPN', False)
enable_coverage = False
coverage_values = []
coverage_passed = False
def pytest_addoption(parser):
parser.addoption('--full-cov',
action='append',
dest='full_cov',
default=[],
help="Require full test coverage of 100%% for this module/path/filename (multi-allowed). Default: none")
parser.addoption('--no-full-cov',
action='append',
dest='no_full_cov',
default=[],
help="Exclude file from a parent 100%% coverage requirement (multi-allowed). Default: none")
def pytest_configure(config):
global enable_coverage
enable_coverage = (
len(config.getoption('file_or_dir')) == 0 and
len(config.getoption('full_cov')) > 0 and
config.pluginmanager.getplugin("_cov") is not None and
config.pluginmanager.getplugin("_cov").cov_controller is not None and
config.pluginmanager.getplugin("_cov").cov_controller.cov is not None
)
@pytest.hookimpl(hookwrapper=True)
def pytest_runtestloop(session):
global enable_coverage
global coverage_values
global coverage_passed
if not enable_coverage:
yield
return
cov = pytest.config.pluginmanager.getplugin("_cov").cov_controller.cov
if os.name == 'nt':
cov.exclude('pragma: windows no cover')
yield
coverage_values = dict([(name, 0) for name in pytest.config.option.full_cov])
prefix = os.getcwd()
excluded_files = [os.path.normpath(f) for f in pytest.config.option.no_full_cov]
measured_files = [os.path.normpath(os.path.relpath(f, prefix)) for f in cov.get_data().measured_files()]
measured_files = [f for f in measured_files if not any(f.startswith(excluded_f) for excluded_f in excluded_files)]
for name in pytest.config.option.full_cov:
files = [f for f in measured_files if f.startswith(os.path.normpath(name))]
try:
with open(os.devnull, 'w') as null:
coverage_values[name] = cov.report(files, ignore_errors=True, file=null)
except:
pass
if any(v < 100 for v in coverage_values.values()):
# make sure we get the EXIT_TESTSFAILED exit code
session.testsfailed += 1
else:
coverage_passed = True
def pytest_terminal_summary(terminalreporter, exitstatus):
global enable_coverage
global coverage_values
global coverage_passed
if not enable_coverage:
return
terminalreporter.write('\n')
if not coverage_passed:
markup = {'red': True, 'bold': True}
msg = "FAIL: Full test coverage not reached!\n"
terminalreporter.write(msg, **markup)
for name, value in coverage_values.items():
if value < 100:
markup = {'red': True, 'bold': True}
else:
markup = {'green': True}
msg = 'Coverage for {}: {:.2f}%\n'.format(name, value)
terminalreporter.write(msg, **markup)
else:
markup = {'green': True}
msg = 'SUCCESS: Full test coverage reached in modules and files:\n'
msg += '{}\n\n'.format('\n'.join(pytest.config.option.full_cov))
terminalreporter.write(msg, **markup)
msg = 'Excluded files:\n'
msg += '{}\n'.format('\n'.join(pytest.config.option.no_full_cov))
terminalreporter.write(msg)
```
#### File: mitmproxy/addons/test_dumper.py
```python
import io
import shutil
import pytest
from unittest import mock
from mitmproxy.test import tflow
from mitmproxy.test import taddons
from mitmproxy.test import tutils
from mitmproxy.addons import dumper
from mitmproxy import exceptions
from mitmproxy.tools import dump
from mitmproxy import http
def test_configure():
d = dumper.Dumper()
with taddons.context(options=dump.Options()) as ctx:
ctx.configure(d, filtstr="~b foo")
assert d.filter
f = tflow.tflow(resp=True)
assert not d.match(f)
f.response.content = b"foo"
assert d.match(f)
ctx.configure(d, filtstr=None)
assert not d.filter
with pytest.raises(exceptions.OptionsError):
ctx.configure(d, filtstr="~~")
assert not d.filter
def test_simple():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(options=dump.Options()) as ctx:
ctx.configure(d, flow_detail=0)
d.response(tflow.tflow(resp=True))
assert not sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=1)
d.response(tflow.tflow(resp=True))
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=1)
d.error(tflow.tflow(err=True))
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
d.response(tflow.tflow(resp=True))
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
d.response(tflow.tflow(resp=True))
assert "<<" in sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
d.response(tflow.tflow(err=True))
assert "<<" in sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
flow = tflow.tflow()
flow.request = tutils.treq()
flow.request.stickycookie = True
flow.client_conn = mock.MagicMock()
flow.client_conn.address.host = "foo"
flow.response = tutils.tresp(content=None)
flow.response.is_replay = True
flow.response.status_code = 300
d.response(flow)
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
flow = tflow.tflow(resp=tutils.tresp(content=b"{"))
flow.response.headers["content-type"] = "application/json"
flow.response.status_code = 400
d.response(flow)
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
flow = tflow.tflow()
flow.request.content = None
flow.response = http.HTTPResponse.wrap(tutils.tresp())
flow.response.content = None
d.response(flow)
assert "content missing" in sio.getvalue()
sio.truncate(0)
def test_echo_body():
f = tflow.tflow(client_conn=True, server_conn=True, resp=True)
f.response.headers["content-type"] = "text/html"
f.response.content = b"foo bar voing\n" * 100
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(options=dump.Options()) as ctx:
ctx.configure(d, flow_detail=3)
d._echo_message(f.response)
t = sio.getvalue()
assert "cut off" in t
def test_echo_request_line():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(options=dump.Options()) as ctx:
ctx.configure(d, flow_detail=3, showhost=True)
f = tflow.tflow(client_conn=None, server_conn=True, resp=True)
f.request.is_replay = True
d._echo_request_line(f)
assert "[replay]" in sio.getvalue()
sio.truncate(0)
f = tflow.tflow(client_conn=None, server_conn=True, resp=True)
f.request.is_replay = False
d._echo_request_line(f)
assert "[replay]" not in sio.getvalue()
sio.truncate(0)
f = tflow.tflow(client_conn=None, server_conn=True, resp=True)
f.request.http_version = "nonstandard"
d._echo_request_line(f)
assert "nonstandard" in sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=0, showhost=True)
f = tflow.tflow(client_conn=None, server_conn=True, resp=True)
terminalWidth = max(shutil.get_terminal_size()[0] - 25, 50)
f.request.url = "http://address:22/" + ("x" * terminalWidth) + "textToBeTruncated"
d._echo_request_line(f)
assert "textToBeTruncated" not in sio.getvalue()
sio.truncate(0)
class TestContentView:
@mock.patch("mitmproxy.contentviews.auto.ViewAuto.__call__")
def test_contentview(self, view_auto):
view_auto.side_effect = exceptions.ContentViewException("")
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(options=dump.Options()) as ctx:
ctx.configure(d, flow_detail=4, verbosity=3)
d.response(tflow.tflow())
assert "Content viewer failed" in ctx.master.event_log[0][1]
def test_tcp():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(options=dump.Options()) as ctx:
ctx.configure(d, flow_detail=3, showhost=True)
f = tflow.ttcpflow()
d.tcp_message(f)
assert "it's me" in sio.getvalue()
sio.truncate(0)
f = tflow.ttcpflow(client_conn=True, err=True)
d.tcp_error(f)
assert "Error in TCP" in sio.getvalue()
def test_websocket():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(options=dump.Options()) as ctx:
ctx.configure(d, flow_detail=3, showhost=True)
f = tflow.twebsocketflow()
d.websocket_message(f)
assert "hello text" in sio.getvalue()
sio.truncate(0)
d.websocket_end(f)
assert "WebSocket connection closed by" in sio.getvalue()
f = tflow.twebsocketflow(client_conn=True, err=True)
d.websocket_error(f)
assert "Error in WebSocket" in sio.getvalue()
```
#### File: mitmproxy/console/test_common.py
```python
from mitmproxy.test import tflow
from mitmproxy.tools.console import common
from ...conftest import skip_appveyor
@skip_appveyor
def test_format_flow():
f = tflow.tflow(resp=True)
assert common.format_flow(f, True)
assert common.format_flow(f, True, hostheader=True)
assert common.format_flow(f, True, extended=True)
```
#### File: mitmproxy/console/test_help.py
```python
import mitmproxy.tools.console.help as help
from ...conftest import skip_appveyor
@skip_appveyor
class TestHelp:
def test_helptext(self):
h = help.HelpView(None)
assert h.helptext()
def test_keypress(self):
h = help.HelpView([1, 2, 3])
assert not h.keypress((0, 0), "q")
assert not h.keypress((0, 0), "?")
assert h.keypress((0, 0), "o") == "o"
```
#### File: net/websockets/test_frame.py
```python
import os
import codecs
import pytest
from mitmproxy.net import websockets
from mitmproxy.test import tutils
class TestFrameHeader:
@pytest.mark.parametrize("input,expected", [
(0, '0100'),
(125, '017D'),
(126, '017E007E'),
(127, '017E007F'),
(142, '017E008E'),
(65534, '017EFFFE'),
(65535, '017EFFFF'),
(65536, '017F0000000000010000'),
(8589934591, '017F00000001FFFFFFFF'),
(2 ** 64 - 1, '017FFFFFFFFFFFFFFFFF'),
])
def test_serialization_length(self, input, expected):
h = websockets.FrameHeader(
opcode=websockets.OPCODE.TEXT,
payload_length=input,
)
assert bytes(h) == codecs.decode(expected, 'hex')
def test_serialization_too_large(self):
h = websockets.FrameHeader(
payload_length=2 ** 64 + 1,
)
with pytest.raises(ValueError):
bytes(h)
@pytest.mark.parametrize("input,expected", [
('0100', 0),
('017D', 125),
('017E007E', 126),
('017E007F', 127),
('017E008E', 142),
('017EFFFE', 65534),
('017EFFFF', 65535),
('017F0000000000010000', 65536),
('017F00000001FFFFFFFF', 8589934591),
('017FFFFFFFFFFFFFFFFF', 2 ** 64 - 1),
])
def test_deserialization_length(self, input, expected):
h = websockets.FrameHeader.from_file(tutils.treader(codecs.decode(input, 'hex')))
assert h.payload_length == expected
@pytest.mark.parametrize("input,expected", [
('0100', (False, None)),
('018000000000', (True, '00000000')),
('018012345678', (True, '12345678')),
])
def test_deserialization_masking(self, input, expected):
h = websockets.FrameHeader.from_file(tutils.treader(codecs.decode(input, 'hex')))
assert h.mask == expected[0]
if h.mask:
assert h.masking_key == codecs.decode(expected[1], 'hex')
def test_equality(self):
h = websockets.FrameHeader(mask=True, masking_key=b'1234')
h2 = websockets.FrameHeader(mask=True, masking_key=b'1234')
assert h == h2
h = websockets.FrameHeader(fin=True)
h2 = websockets.FrameHeader(fin=False)
assert h != h2
assert h != 'foobar'
def test_roundtrip(self):
def round(*args, **kwargs):
h = websockets.FrameHeader(*args, **kwargs)
h2 = websockets.FrameHeader.from_file(tutils.treader(bytes(h)))
assert h == h2
round()
round(fin=True)
round(rsv1=True)
round(rsv2=True)
round(rsv3=True)
round(payload_length=1)
round(payload_length=100)
round(payload_length=1000)
round(payload_length=10000)
round(opcode=websockets.OPCODE.PING)
round(masking_key=b"test")
def test_human_readable(self):
f = websockets.FrameHeader(
masking_key=b"test",
fin=True,
payload_length=10
)
assert repr(f)
f = websockets.FrameHeader()
assert repr(f)
def test_funky(self):
f = websockets.FrameHeader(masking_key=b"test", mask=False)
raw = bytes(f)
f2 = websockets.FrameHeader.from_file(tutils.treader(raw))
assert not f2.mask
def test_violations(self):
with pytest.raises("opcode"):
websockets.FrameHeader(opcode=17)
with pytest.raises("masking key"):
websockets.FrameHeader(masking_key=b"x")
def test_automask(self):
f = websockets.FrameHeader(mask=True)
assert f.masking_key
f = websockets.FrameHeader(masking_key=b"foob")
assert f.mask
f = websockets.FrameHeader(masking_key=b"foob", mask=0)
assert not f.mask
assert f.masking_key
class TestFrame:
def test_equality(self):
f = websockets.Frame(payload=b'1234')
f2 = websockets.Frame(payload=b'1234')
assert f == f2
assert f != b'1234'
def test_roundtrip(self):
def round(*args, **kwargs):
f = websockets.Frame(*args, **kwargs)
raw = bytes(f)
f2 = websockets.Frame.from_file(tutils.treader(raw))
assert f == f2
round(b"test")
round(b"test", fin=1)
round(b"test", rsv1=1)
round(b"test", opcode=websockets.OPCODE.PING)
round(b"test", masking_key=b"test")
def test_human_readable(self):
f = websockets.Frame()
assert repr(f)
f = websockets.Frame(b"foobar")
assert "foobar" in repr(f)
@pytest.mark.parametrize("masked", [True, False])
@pytest.mark.parametrize("length", [100, 50000, 150000])
def test_serialization_bijection(self, masked, length):
frame = websockets.Frame(
os.urandom(length),
fin=True,
opcode=websockets.OPCODE.TEXT,
mask=int(masked),
masking_key=(os.urandom(4) if masked else None)
)
serialized = bytes(frame)
assert frame == websockets.Frame.from_bytes(serialized)
``` |
{
"source": "johnsom6/TheAccumulatorPattern",
"score": 4
} |
#### File: johnsom6/TheAccumulatorPattern/quiz 4.py
```python
letter = 'A'
def grade(letter):
if letter == 'A':
return 4
elif letter == 'B':
return 3
elif letter == 'C':
return 2
def main():
grade(letter)
main()
import random
def odds(n):
number_of_odds = 0
for k in range(n):
r = random.randrange(10)
print (r)
if (r % 2) == 1:
number_of_odds = number_of_odds + 1
return (number_of_odds)
odds(8)
``` |
{
"source": "johnsom/octavia",
"score": 2
} |
#### File: agent/api_server/test_keepalived.py
```python
import subprocess
from unittest import mock
import flask
from octavia.amphorae.backends.agent.api_server import keepalived
import octavia.tests.unit.base as base
class KeepalivedTestCase(base.TestCase):
def setUp(self):
super(KeepalivedTestCase, self).setUp()
self.app = flask.Flask(__name__)
self.client = self.app.test_client()
self._ctx = self.app.test_request_context()
self._ctx.push()
self.test_keepalived = keepalived.Keepalived()
@mock.patch('subprocess.check_output')
def test_manager_keepalived_service(self, mock_check_output):
res = self.test_keepalived.manager_keepalived_service('start')
cmd = ("/usr/sbin/service octavia-keepalived {action}".format(
action='start'))
mock_check_output.assert_called_once_with(cmd.split(),
stderr=subprocess.STDOUT)
self.assertEqual(202, res.status_code)
res = self.test_keepalived.manager_keepalived_service('restart')
self.assertEqual(400, res.status_code)
mock_check_output.side_effect = subprocess.CalledProcessError(1,
'blah!')
res = self.test_keepalived.manager_keepalived_service('start')
self.assertEqual(500, res.status_code)
```
#### File: backends/utils/test_network_namespace.py
```python
import random
from unittest import mock
from octavia.amphorae.backends.utils import network_namespace
from octavia.tests.common import utils as test_utils
import octavia.tests.unit.base as base
class TestNetworkNamespace(base.TestCase):
def setUp(self):
super(TestNetworkNamespace, self).setUp()
@mock.patch('ctypes.get_errno')
@mock.patch('ctypes.CDLL')
def test_error_handler(self, mock_cdll, mock_get_errno):
FAKE_NETNS = 'fake-netns'
netns = network_namespace.NetworkNamespace(FAKE_NETNS)
# Test result 0
netns._error_handler(0, None, None)
mock_get_errno.assert_not_called()
# Test result -1
mock_get_errno.reset_mock()
self.assertRaises(OSError, netns._error_handler, -1, None, None)
mock_get_errno.assert_called_once_with()
@mock.patch('os.getpid')
@mock.patch('ctypes.CDLL')
def test_init(self, mock_cdll, mock_getpid):
FAKE_NETNS = 'fake-netns'
FAKE_PID = random.randrange(100000)
mock_cdll_obj = mock.MagicMock()
mock_cdll.return_value = mock_cdll_obj
mock_getpid.return_value = FAKE_PID
expected_current_netns = '/proc/{pid}/ns/net'.format(pid=FAKE_PID)
expected_target_netns = '/var/run/netns/{netns}'.format(
netns=FAKE_NETNS)
netns = network_namespace.NetworkNamespace(FAKE_NETNS)
self.assertEqual(expected_current_netns, netns.current_netns)
self.assertEqual(expected_target_netns, netns.target_netns)
self.assertEqual(mock_cdll_obj.setns, netns.set_netns)
self.assertEqual(netns.set_netns.errcheck, netns._error_handler)
@mock.patch('os.getpid')
@mock.patch('ctypes.CDLL')
def test_enter(self, mock_cdll, mock_getpid):
CLONE_NEWNET = 0x40000000
FAKE_NETNS = 'fake-netns'
FAKE_PID = random.randrange(100000)
current_netns_fd = random.randrange(100000)
target_netns_fd = random.randrange(100000)
mock_getpid.return_value = FAKE_PID
mock_cdll_obj = mock.MagicMock()
mock_cdll.return_value = mock_cdll_obj
expected_current_netns = '/proc/{pid}/ns/net'.format(pid=FAKE_PID)
expected_target_netns = '/var/run/netns/{netns}'.format(
netns=FAKE_NETNS)
netns = network_namespace.NetworkNamespace(FAKE_NETNS)
current_mock_open = self.useFixture(
test_utils.OpenFixture(expected_current_netns)).mock_open
current_mock_open.return_value = current_netns_fd
target_mock_open = self.useFixture(
test_utils.OpenFixture(expected_target_netns)).mock_open
handle = target_mock_open()
handle.fileno.return_value = target_netns_fd
netns.__enter__()
self.assertEqual(current_netns_fd, netns.current_netns_fd)
netns.set_netns.assert_called_once_with(target_netns_fd, CLONE_NEWNET)
@mock.patch('os.getpid')
@mock.patch('ctypes.CDLL')
def test_exit(self, mock_cdll, mock_getpid):
CLONE_NEWNET = 0x40000000
FAKE_NETNS = 'fake-netns'
FAKE_PID = random.randrange(100000)
current_netns_fileno = random.randrange(100000)
mock_getpid.return_value = FAKE_PID
mock_cdll_obj = mock.MagicMock()
mock_cdll.return_value = mock_cdll_obj
mock_current_netns_fd = mock.MagicMock()
mock_current_netns_fd.fileno.return_value = current_netns_fileno
netns = network_namespace.NetworkNamespace(FAKE_NETNS)
netns.current_netns_fd = mock_current_netns_fd
netns.__exit__()
netns.set_netns.assert_called_once_with(current_netns_fileno,
CLONE_NEWNET)
mock_current_netns_fd.close.assert_called_once_with()
```
#### File: keepalived/jinja/test_jinja_cfg.py
```python
import copy
from unittest import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from octavia.amphorae.drivers.keepalived.jinja import jinja_cfg
from octavia.common import constants
import octavia.tests.unit.base as base
class TestVRRPRestDriver(base.TestCase):
def setUp(self):
super(TestVRRPRestDriver, self).setUp()
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
conf.config(group="haproxy_amphora", base_path='/tmp/test')
conf.config(group="keepalived_vrrp", vrrp_garp_refresh_interval=5)
conf.config(group="keepalived_vrrp", vrrp_garp_refresh_count=2)
conf.config(group="keepalived_vrrp", vrrp_check_interval=5)
conf.config(group="keepalived_vrrp", vrrp_fail_count=2)
conf.config(group="keepalived_vrrp", vrrp_success_count=2)
self.templater = jinja_cfg.KeepalivedJinjaTemplater()
self.amphora1 = mock.MagicMock()
self.amphora1.status = constants.AMPHORA_ALLOCATED
self.amphora1.vrrp_ip = '10.0.0.1'
self.amphora1.role = constants.ROLE_MASTER
self.amphora1.vrrp_interface = 'eth1'
self.amphora1.vrrp_id = 1
self.amphora1.vrrp_priority = 100
self.amphora2 = mock.MagicMock()
self.amphora2.status = constants.AMPHORA_ALLOCATED
self.amphora2.vrrp_ip = '10.0.0.2'
self.amphora2.role = constants.ROLE_BACKUP
self.amphora2.vrrp_interface = 'eth1'
self.amphora2.vrrp_id = 1
self.amphora2.vrrp_priority = 90
self.lb = mock.MagicMock()
self.lb.amphorae = [self.amphora1, self.amphora2]
self.lb.vrrp_group.vrrp_group_name = 'TESTGROUP'
self.lb.vrrp_group.vrrp_auth_type = constants.VRRP_AUTH_DEFAULT
self.lb.vrrp_group.vrrp_auth_pass = '<PASSWORD>'
self.lb.vip.ip_address = '10.1.0.5'
self.lb.vrrp_group.advert_int = 10
self.ref_conf = ("vrrp_script check_script {\n"
" script /tmp/test/vrrp/check_script.sh\n"
" interval 5\n"
" fall 2\n"
" rise 2\n"
"}\n"
"\n"
"vrrp_instance TESTGROUP {\n"
" state MASTER\n"
" interface eth1\n"
" virtual_router_id 1\n"
" priority 100\n"
" nopreempt\n"
" accept\n"
" garp_master_refresh 5\n"
" garp_master_refresh_repeat 2\n"
" advert_int 10\n"
" authentication {\n"
" auth_type PASS\n"
" auth_pass <PASSWORD>"
" }\n"
"\n"
" unicast_src_ip 10.0.0.1\n"
" unicast_peer {\n"
" 10.0.0.2\n"
" }\n"
"\n"
" virtual_ipaddress {\n"
" 10.1.0.5\n"
" }\n\n"
" virtual_routes {\n"
" 10.1.0.0/24 dev eth1 src 10.1.0.5 scope link "
"table 1\n"
" }\n\n"
" virtual_rules {\n"
" from 10.1.0.5/32 table 1 priority 100\n"
" }\n\n"
" track_script {\n"
" check_script\n"
" }\n"
"}")
self.amphora1v6 = copy.deepcopy(self.amphora1)
self.amphora1v6.vrrp_ip = '2001:db8::10'
self.amphora2v6 = copy.deepcopy(self.amphora2)
self.amphora2v6.vrrp_ip = '2001:db8::11'
self.lbv6 = copy.deepcopy(self.lb)
self.lbv6.amphorae = [self.amphora1v6, self.amphora2v6]
self.lbv6.vip.ip_address = '2001:db8::15'
self.ref_v6_conf = ("vrrp_script check_script {\n"
" script /tmp/test/vrrp/check_script.sh\n"
" interval 5\n"
" fall 2\n"
" rise 2\n"
"}\n"
"\n"
"vrrp_instance TESTGROUP {\n"
" state MASTER\n"
" interface eth1\n"
" virtual_router_id 1\n"
" priority 100\n"
" nopreempt\n"
" accept\n"
" garp_master_refresh 5\n"
" garp_master_refresh_repeat 2\n"
" advert_int 10\n"
" authentication {\n"
" auth_type PASS\n"
" auth_pass <PASSWORD>"
" }\n"
"\n"
" unicast_src_ip 2001:db8::10\n"
" unicast_peer {\n"
" 2001:db8::11\n"
" }\n"
"\n"
" virtual_ipaddress {\n"
" 2001:db8::15\n"
" }\n\n"
" virtual_routes {\n"
" 2001:db8::/64 dev eth1 src "
"2001:db8::15 scope link table 1\n"
" }\n\n"
" virtual_rules {\n"
" from 2001:db8::15/128 table 1 "
"priority 100\n"
" }\n\n"
" track_script {\n"
" check_script\n"
" }\n"
"}")
def test_build_keepalived_config(self):
config = self.templater.build_keepalived_config(
self.lb, self.amphora1, '10.1.0.0/24')
self.assertEqual(self.ref_conf, config)
def test_build_keepalived_ipv6_config(self):
config = self.templater.build_keepalived_config(
self.lbv6, self.amphora1v6, '2001:db8::/64')
self.assertEqual(self.ref_v6_conf, config)
```
#### File: amphora_driver/v1/test_amphora_driver.py
```python
from unittest import mock
from octavia_lib.api.drivers import data_models as driver_dm
from octavia_lib.api.drivers import exceptions
from oslo_utils import uuidutils
from octavia.api.drivers.amphora_driver.v1 import driver
from octavia.common import constants as consts
from octavia.network import base as network_base
from octavia.tests.common import sample_data_models
from octavia.tests.unit import base
class TestAmphoraDriver(base.TestRpc):
def setUp(self):
super(TestAmphoraDriver, self).setUp()
self.amp_driver = driver.AmphoraProviderDriver()
self.sample_data = sample_data_models.SampleDriverDataModels()
@mock.patch('octavia.common.utils.get_network_driver')
def test_create_vip_port(self, mock_get_net_driver):
mock_net_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_net_driver
mock_net_driver.allocate_vip.return_value = self.sample_data.db_vip
provider_vip_dict = self.amp_driver.create_vip_port(
self.sample_data.lb_id, self.sample_data.project_id,
self.sample_data.provider_vip_dict)
self.assertEqual(self.sample_data.provider_vip_dict, provider_vip_dict)
@mock.patch('octavia.common.utils.get_network_driver')
def test_create_vip_port_failed(self, mock_get_net_driver):
mock_net_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_net_driver
mock_net_driver.allocate_vip.side_effect = (
network_base.AllocateVIPException())
self.assertRaises(exceptions.DriverError,
self.amp_driver.create_vip_port,
self.sample_data.lb_id, self.sample_data.project_id,
self.sample_data.provider_vip_dict)
# Load Balancer
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_create(self, mock_cast):
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
self.amp_driver.loadbalancer_create(provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
consts.FLAVOR: None,
consts.AVAILABILITY_ZONE: None}
mock_cast.assert_called_with({}, 'create_load_balancer', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_delete(self, mock_cast):
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
self.amp_driver.loadbalancer_delete(provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
'cascade': False}
mock_cast.assert_called_with({}, 'delete_load_balancer', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_failover(self, mock_cast):
self.amp_driver.loadbalancer_failover(self.sample_data.lb_id)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id}
mock_cast.assert_called_with({}, 'failover_load_balancer', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_update(self, mock_cast):
old_provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id, admin_state_up=True)
lb_dict = {'enabled': True}
self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
consts.LOAD_BALANCER_UPDATES: lb_dict}
mock_cast.assert_called_with({}, 'update_load_balancer', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_update_name(self, mock_cast):
old_provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id, name='Great LB')
lb_dict = {'name': 'Great LB'}
self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
consts.LOAD_BALANCER_UPDATES: lb_dict}
mock_cast.assert_called_with({}, 'update_load_balancer', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_update_qos(self, mock_cast):
qos_policy_id = uuidutils.generate_uuid()
old_provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id,
vip_qos_policy_id=qos_policy_id)
lb_dict = {'vip': {'qos_policy_id': qos_policy_id}}
self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
consts.LOAD_BALANCER_UPDATES: lb_dict}
mock_cast.assert_called_with({}, 'update_load_balancer', **payload)
# Listener
@mock.patch('oslo_messaging.RPCClient.cast')
def test_listener_create(self, mock_cast):
provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id)
self.amp_driver.listener_create(provider_listener)
payload = {consts.LISTENER_ID: self.sample_data.listener1_id}
mock_cast.assert_called_with({}, 'create_listener', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_listener_delete(self, mock_cast):
provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id)
self.amp_driver.listener_delete(provider_listener)
payload = {consts.LISTENER_ID: self.sample_data.listener1_id}
mock_cast.assert_called_with({}, 'delete_listener', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_listener_update(self, mock_cast):
old_provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id)
provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id, admin_state_up=False)
listener_dict = {'enabled': False}
self.amp_driver.listener_update(old_provider_listener,
provider_listener)
payload = {consts.LISTENER_ID: self.sample_data.listener1_id,
consts.LISTENER_UPDATES: listener_dict}
mock_cast.assert_called_with({}, 'update_listener', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_listener_update_name(self, mock_cast):
old_provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id)
provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id, name='Great Listener')
listener_dict = {'name': 'Great Listener'}
self.amp_driver.listener_update(old_provider_listener,
provider_listener)
payload = {consts.LISTENER_ID: self.sample_data.listener1_id,
consts.LISTENER_UPDATES: listener_dict}
mock_cast.assert_called_with({}, 'update_listener', **payload)
# Pool
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_create(self, mock_cast):
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id,
lb_algorithm=consts.LB_ALGORITHM_ROUND_ROBIN)
self.amp_driver.pool_create(provider_pool)
payload = {consts.POOL_ID: self.sample_data.pool1_id}
mock_cast.assert_called_with({}, 'create_pool', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_create_unsupported_algorithm(self, mock_cast):
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
provider_pool.lb_algorithm = 'foo'
self.assertRaises(
exceptions.UnsupportedOptionError,
self.amp_driver.pool_create,
provider_pool)
mock_cast.assert_not_called()
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_delete(self, mock_cast):
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
self.amp_driver.pool_delete(provider_pool)
payload = {consts.POOL_ID: self.sample_data.pool1_id}
mock_cast.assert_called_with({}, 'delete_pool', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_update(self, mock_cast):
old_provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id, admin_state_up=True)
pool_dict = {'enabled': True}
self.amp_driver.pool_update(old_provider_pool, provider_pool)
payload = {consts.POOL_ID: self.sample_data.pool1_id,
consts.POOL_UPDATES: pool_dict}
mock_cast.assert_called_with({}, 'update_pool', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_update_name(self, mock_cast):
old_provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id, name='Great pool',
admin_state_up=True, tls_enabled=True)
pool_dict = {'name': 'Great pool',
'enabled': True,
'tls_enabled': True}
self.amp_driver.pool_update(old_provider_pool, provider_pool)
payload = {consts.POOL_ID: self.sample_data.pool1_id,
consts.POOL_UPDATES: pool_dict}
mock_cast.assert_called_with({}, 'update_pool', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_update_unsupported_algorithm(self, mock_cast):
old_provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
provider_pool.lb_algorithm = 'foo'
self.assertRaises(
exceptions.UnsupportedOptionError,
self.amp_driver.pool_update,
old_provider_pool,
provider_pool)
mock_cast.assert_not_called()
# Member
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_create(self, mock_cast, mock_pool_get, mock_session):
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id)
self.amp_driver.member_create(provider_member)
payload = {consts.MEMBER_ID: self.sample_data.member1_id}
mock_cast.assert_called_with({}, 'create_member', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_create_udp_ipv4(self, mock_cast, mock_pool_get,
mock_session):
mock_lb = mock.MagicMock()
mock_lb.vip = mock.MagicMock()
mock_lb.vip.ip_address = "172.16.31.10"
mock_listener = mock.MagicMock()
mock_listener.load_balancer = mock_lb
mock_pool = mock.MagicMock()
mock_pool.protocol = consts.PROTOCOL_UDP
mock_pool.listeners = [mock_listener]
mock_pool_get.return_value = mock_pool
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id,
address="192.0.2.1")
self.amp_driver.member_create(provider_member)
payload = {consts.MEMBER_ID: self.sample_data.member1_id}
mock_cast.assert_called_with({}, 'create_member', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_create_udp_ipv4_ipv6(self, mock_cast, mock_pool_get,
mock_session):
mock_lb = mock.MagicMock()
mock_lb.vip = mock.MagicMock()
mock_lb.vip.ip_address = "fe80::1"
mock_listener = mock.MagicMock()
mock_listener.load_balancer = mock_lb
mock_pool = mock.MagicMock()
mock_pool.protocol = consts.PROTOCOL_UDP
mock_pool.listeners = [mock_listener]
mock_pool_get.return_value = mock_pool
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id,
address="192.0.2.1")
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.member_create,
provider_member)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_delete(self, mock_cast):
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id)
self.amp_driver.member_delete(provider_member)
payload = {consts.MEMBER_ID: self.sample_data.member1_id}
mock_cast.assert_called_with({}, 'delete_member', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_update(self, mock_cast):
old_provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id)
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id, admin_state_up=True)
member_dict = {'enabled': True}
self.amp_driver.member_update(old_provider_member, provider_member)
payload = {consts.MEMBER_ID: self.sample_data.member1_id,
consts.MEMBER_UPDATES: member_dict}
mock_cast.assert_called_with({}, 'update_member', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_update_name(self, mock_cast):
old_provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id)
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id, name='Great member')
member_dict = {'name': 'Great member'}
self.amp_driver.member_update(old_provider_member, provider_member)
payload = {consts.MEMBER_ID: self.sample_data.member1_id,
consts.MEMBER_UPDATES: member_dict}
mock_cast.assert_called_with({}, 'update_member', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_batch_update(self, mock_cast, mock_pool_get, mock_session):
mock_pool = mock.MagicMock()
mock_pool.members = self.sample_data.db_pool1_members
mock_pool_get.return_value = mock_pool
prov_mem_update = driver_dm.Member(
member_id=self.sample_data.member2_id,
pool_id=self.sample_data.pool1_id, admin_state_up=False,
address='192.0.2.17', monitor_address='192.0.2.77',
protocol_port=80, name='updated-member2')
prov_new_member = driver_dm.Member(
member_id=self.sample_data.member3_id,
pool_id=self.sample_data.pool1_id,
address='192.0.2.18', monitor_address='192.0.2.28',
protocol_port=80, name='member3')
prov_members = [prov_mem_update, prov_new_member]
update_mem_dict = {'ip_address': '192.0.2.17',
'name': 'updated-member2',
'monitor_address': '192.0.2.77',
'id': self.sample_data.member2_id,
'enabled': False,
'protocol_port': 80,
'pool_id': self.sample_data.pool1_id}
self.amp_driver.member_batch_update(
self.sample_data.pool1_id, prov_members)
payload = {'old_member_ids': [self.sample_data.member1_id],
'new_member_ids': [self.sample_data.member3_id],
'updated_members': [update_mem_dict]}
mock_cast.assert_called_with({}, 'batch_update_members', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_batch_update_no_admin_addr(self, mock_cast,
mock_pool_get, mock_session):
mock_pool = mock.MagicMock()
mock_pool.members = self.sample_data.db_pool1_members
mock_pool_get.return_value = mock_pool
prov_mem_update = driver_dm.Member(
member_id=self.sample_data.member2_id,
pool_id=self.sample_data.pool1_id,
monitor_address='192.0.2.77',
protocol_port=80, name='updated-member2')
prov_new_member = driver_dm.Member(
member_id=self.sample_data.member3_id,
pool_id=self.sample_data.pool1_id,
address='192.0.2.18', monitor_address='192.0.2.28',
protocol_port=80, name='member3')
prov_members = [prov_mem_update, prov_new_member]
update_mem_dict = {'name': 'updated-member2',
'monitor_address': '192.0.2.77',
'id': self.sample_data.member2_id,
'protocol_port': 80,
'pool_id': self.sample_data.pool1_id}
self.amp_driver.member_batch_update(
self.sample_data.pool1_id, prov_members)
payload = {'old_member_ids': [self.sample_data.member1_id],
'new_member_ids': [self.sample_data.member3_id],
'updated_members': [update_mem_dict]}
mock_cast.assert_called_with({}, 'batch_update_members', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_batch_update_clear_already_empty(
self, mock_cast, mock_pool_get, mock_session):
mock_pool = mock.MagicMock()
mock_pool_get.return_value = mock_pool
self.amp_driver.member_batch_update(
self.sample_data.pool1_id, [])
mock_cast.assert_not_called()
# Health Monitor
@mock.patch('oslo_messaging.RPCClient.cast')
def test_health_monitor_create(self, mock_cast):
provider_HM = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id)
self.amp_driver.health_monitor_create(provider_HM)
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id}
mock_cast.assert_called_with({}, 'create_health_monitor', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_health_monitor_delete(self, mock_cast):
provider_HM = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id)
self.amp_driver.health_monitor_delete(provider_HM)
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id}
mock_cast.assert_called_with({}, 'delete_health_monitor', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_batch_update_udp_ipv4(self, mock_cast, mock_pool_get,
mock_session):
mock_lb = mock.MagicMock()
mock_lb.vip = mock.MagicMock()
mock_lb.vip.ip_address = "172.16.31.10"
mock_listener = mock.MagicMock()
mock_listener.load_balancer = mock_lb
mock_pool = mock.MagicMock()
mock_pool.protocol = consts.PROTOCOL_UDP
mock_pool.listeners = [mock_listener]
mock_pool.members = self.sample_data.db_pool1_members
mock_pool_get.return_value = mock_pool
prov_mem_update = driver_dm.Member(
member_id=self.sample_data.member2_id,
pool_id=self.sample_data.pool1_id, admin_state_up=False,
address='192.0.2.17', monitor_address='192.0.2.77',
protocol_port=80, name='updated-member2')
prov_new_member = driver_dm.Member(
member_id=self.sample_data.member3_id,
pool_id=self.sample_data.pool1_id,
address='192.0.2.18', monitor_address='192.0.2.28',
protocol_port=80, name='member3')
prov_members = [prov_mem_update, prov_new_member]
update_mem_dict = {'ip_address': '192.0.2.17',
'name': 'updated-member2',
'monitor_address': '192.0.2.77',
'id': self.sample_data.member2_id,
'enabled': False,
'protocol_port': 80,
'pool_id': self.sample_data.pool1_id}
self.amp_driver.member_batch_update(
self.sample_data.pool1_id, prov_members)
payload = {'old_member_ids': [self.sample_data.member1_id],
'new_member_ids': [self.sample_data.member3_id],
'updated_members': [update_mem_dict]}
mock_cast.assert_called_with({}, 'batch_update_members', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_batch_update_udp_ipv4_ipv6(self, mock_cast, mock_pool_get,
mock_session):
mock_lb = mock.MagicMock()
mock_lb.vip = mock.MagicMock()
mock_lb.vip.ip_address = "172.16.31.10"
mock_listener = mock.MagicMock()
mock_listener.load_balancer = mock_lb
mock_pool = mock.MagicMock()
mock_pool.protocol = consts.PROTOCOL_UDP
mock_pool.listeners = [mock_listener]
mock_pool.members = self.sample_data.db_pool1_members
mock_pool_get.return_value = mock_pool
prov_mem_update = driver_dm.Member(
member_id=self.sample_data.member2_id,
pool_id=self.sample_data.pool1_id, admin_state_up=False,
address='fe80::1', monitor_address='fe80::2',
protocol_port=80, name='updated-member2')
prov_new_member = driver_dm.Member(
member_id=self.sample_data.member3_id,
pool_id=self.sample_data.pool1_id,
address='192.0.2.18', monitor_address='192.0.2.28',
protocol_port=80, name='member3')
prov_members = [prov_mem_update, prov_new_member]
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.member_batch_update,
self.sample_data.pool1_id, prov_members)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_health_monitor_update(self, mock_cast):
old_provider_hm = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id)
provider_hm = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id, admin_state_up=True,
max_retries=1, max_retries_down=2)
hm_dict = {'enabled': True, 'rise_threshold': 1, 'fall_threshold': 2}
self.amp_driver.health_monitor_update(old_provider_hm, provider_hm)
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id,
consts.HEALTH_MONITOR_UPDATES: hm_dict}
mock_cast.assert_called_with({}, 'update_health_monitor', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_health_monitor_update_name(self, mock_cast):
old_provider_hm = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id)
provider_hm = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id, name='Great HM')
hm_dict = {'name': 'Great HM'}
self.amp_driver.health_monitor_update(old_provider_hm, provider_hm)
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id,
consts.HEALTH_MONITOR_UPDATES: hm_dict}
mock_cast.assert_called_with({}, 'update_health_monitor', **payload)
# L7 Policy
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7policy_create(self, mock_cast):
provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id)
self.amp_driver.l7policy_create(provider_l7policy)
payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id}
mock_cast.assert_called_with({}, 'create_l7policy', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7policy_delete(self, mock_cast):
provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id)
self.amp_driver.l7policy_delete(provider_l7policy)
payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id}
mock_cast.assert_called_with({}, 'delete_l7policy', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7policy_update(self, mock_cast):
old_provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id)
provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id, admin_state_up=True)
l7policy_dict = {'enabled': True}
self.amp_driver.l7policy_update(old_provider_l7policy,
provider_l7policy)
payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id,
consts.L7POLICY_UPDATES: l7policy_dict}
mock_cast.assert_called_with({}, 'update_l7policy', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7policy_update_name(self, mock_cast):
old_provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id)
provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id, name='Great L7Policy')
l7policy_dict = {'name': 'Great L7Policy'}
self.amp_driver.l7policy_update(old_provider_l7policy,
provider_l7policy)
payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id,
consts.L7POLICY_UPDATES: l7policy_dict}
mock_cast.assert_called_with({}, 'update_l7policy', **payload)
# L7 Rules
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7rule_create(self, mock_cast):
provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id)
self.amp_driver.l7rule_create(provider_l7rule)
payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id}
mock_cast.assert_called_with({}, 'create_l7rule', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7rule_delete(self, mock_cast):
provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id)
self.amp_driver.l7rule_delete(provider_l7rule)
payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id}
mock_cast.assert_called_with({}, 'delete_l7rule', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7rule_update(self, mock_cast):
old_provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id)
provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id, admin_state_up=True)
l7rule_dict = {'enabled': True}
self.amp_driver.l7rule_update(old_provider_l7rule, provider_l7rule)
payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id,
consts.L7RULE_UPDATES: l7rule_dict}
mock_cast.assert_called_with({}, 'update_l7rule', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7rule_update_invert(self, mock_cast):
old_provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id)
provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id, invert=True)
l7rule_dict = {'invert': True}
self.amp_driver.l7rule_update(old_provider_l7rule, provider_l7rule)
payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id,
consts.L7RULE_UPDATES: l7rule_dict}
mock_cast.assert_called_with({}, 'update_l7rule', **payload)
# Flavor
def test_get_supported_flavor_metadata(self):
test_schema = {
"properties": {
"test_name": {"description": "Test description"},
"test_name2": {"description": "Another description"}}}
ref_dict = {"test_name": "Test description",
"test_name2": "Another description"}
# mock out the supported_flavor_metadata
with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.'
'SUPPORTED_FLAVOR_SCHEMA', test_schema):
result = self.amp_driver.get_supported_flavor_metadata()
self.assertEqual(ref_dict, result)
# Test for bad schema
with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.'
'SUPPORTED_FLAVOR_SCHEMA', 'bogus'):
self.assertRaises(exceptions.DriverError,
self.amp_driver.get_supported_flavor_metadata)
def test_validate_flavor(self):
ref_dict = {consts.LOADBALANCER_TOPOLOGY: consts.TOPOLOGY_SINGLE}
self.amp_driver.validate_flavor(ref_dict)
# Test bad flavor metadata value is bad
ref_dict = {consts.LOADBALANCER_TOPOLOGY: 'bogus'}
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.validate_flavor,
ref_dict)
# Test bad flavor metadata key
ref_dict = {'bogus': 'bogus'}
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.validate_flavor,
ref_dict)
# Test for bad schema
with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.'
'SUPPORTED_FLAVOR_SCHEMA', 'bogus'):
self.assertRaises(exceptions.DriverError,
self.amp_driver.validate_flavor, 'bogus')
# Availability Zone
def test_get_supported_availability_zone_metadata(self):
test_schema = {
"properties": {
"test_name": {"description": "Test description"},
"test_name2": {"description": "Another description"}}}
ref_dict = {"test_name": "Test description",
"test_name2": "Another description"}
# mock out the supported_availability_zone_metadata
with mock.patch('octavia.api.drivers.amphora_driver.'
'availability_zone_schema.'
'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', test_schema):
result = self.amp_driver.get_supported_availability_zone_metadata()
self.assertEqual(ref_dict, result)
# Test for bad schema
with mock.patch('octavia.api.drivers.amphora_driver.'
'availability_zone_schema.'
'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', 'bogus'):
self.assertRaises(
exceptions.DriverError,
self.amp_driver.get_supported_availability_zone_metadata)
def test_validate_availability_zone(self):
ref_dict = {consts.COMPUTE_ZONE: 'my_compute_zone'}
self.amp_driver.validate_availability_zone(ref_dict)
# Test bad availability zone metadata key
ref_dict = {'bogus': 'bogus'}
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.validate_availability_zone,
ref_dict)
# Test for bad schema
with mock.patch('octavia.api.drivers.amphora_driver.'
'availability_zone_schema.'
'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', 'bogus'):
self.assertRaises(exceptions.DriverError,
self.amp_driver.validate_availability_zone,
'bogus')
```
#### File: api/drivers/test_driver_factory.py
```python
from unittest import mock
from octavia.api.drivers import driver_factory
from octavia.common import exceptions
import octavia.tests.unit.base as base
class TestDriverFactory(base.TestCase):
def setUp(self):
super(TestDriverFactory, self).setUp()
@mock.patch('stevedore.driver.DriverManager')
def test_driver_factory_no_provider(self, mock_drivermgr):
mock_mgr = mock.MagicMock()
mock_drivermgr.return_value = mock_mgr
driver = driver_factory.get_driver(None)
self.assertEqual(mock_mgr.driver, driver)
@mock.patch('stevedore.driver.DriverManager')
def test_driver_factory_failed_to_load_driver(self, mock_drivermgr):
mock_drivermgr.side_effect = Exception('boom')
self.assertRaises(exceptions.ProviderNotFound,
driver_factory.get_driver, None)
@mock.patch('stevedore.driver.DriverManager')
def test_driver_factory_not_enabled(self, mock_drivermgr):
self.assertRaises(exceptions.ProviderNotEnabled,
driver_factory.get_driver,
'dont-enable-this-fake-driver-name')
``` |
{
"source": "Johnson070/Graphics.py-to-Python-code",
"score": 3
} |
#### File: Johnson070/Graphics.py-to-Python-code/main_menu.py
```python
from graphics import *
def create_menu(win):
ln = Line(Point(1000, 0), Point(1000, 999))
ln.setOutline("black")
ln.draw(win)
``` |
{
"source": "Johnson145/RapidObjectDetectionUsingCascadedCNNs",
"score": 2
} |
#### File: RapidObjectDetectionUsingCascadedCNNs/app/abstract_inference_app.py
```python
import abc
import traceback
from typing import List
import numpy as np
from concurrent.futures.thread import ThreadPoolExecutor
from app.base_app import BaseApp
from data.image_info import ImageInfo
from data.rectangles import LabeledBoundingBox, Window
from utils import log
from utils.time_watcher import TimeWatcher
import config as cf
class AbstractInferenceApp(BaseApp):
"""This class is the common root of all custom and external inference apps."""
@abc.abstractmethod
def run_inference_on_windows(self, windows_info: List[Window], windows_raw) -> List[LabeledBoundingBox]:
"""Run inference on a set of sliding windows.
:param windows_info: The sliding windows to process. May belong to different images.
:param windows_raw: The sliding windows to process. May belong to different images.
:return: A list containing all bounding boxes belonging to the foreground classes.
"""
return
@abc.abstractmethod
def run_inference_on_image(self, image: ImageInfo) -> List[LabeledBoundingBox]:
"""Run inference on a single image.
:param image: The image to process. Not yet split into windows.
:return: A list containing all bounding boxes belonging to the foreground classes.
"""
return
def run_inference_on_images(self, images: List[ImageInfo], merge=True) -> List[List[LabeledBoundingBox]]:
"""Run inference on the given image list.
:param images: The images to process. Not yet split into windows.
:param merge: If True, performance will be optimized by processing all images at once. Otherwise,
performance may be worse, but you can get additional evaluations referring to a single image only.
:return: The outer list contains one inner list for each provided input image. Each of such inner lists contains
bounding boxes for all found foreground classes.
"""
# init TensorFlow before starting any timers
self._init_tf()
# The outer result list contains one inner list for each provided input image.
all_results = []
timer_multiple = TimeWatcher("inference_img_multiple: {} imgs".format(len(images)))
if merge:
# extract windows from all images first and merge them
timer_extracting = TimeWatcher("extract windows from all images and merge them")
# extract using multiple threads
# TODO most of the code's runtime is currently required for the "thread.acquire" method
log.log(" -> extract")
with ThreadPoolExecutor() as executor:
results_per_img = list(executor.map(lambda img: self._extract_windows(img, convert_raw_to_np=False),
images))
# merge
log.log(" -> merge")
windows_merged_info = [] # merge infos first
for _, window_infos_of_one_image in results_per_img:
windows_merged_info += window_infos_of_one_image
# merge raw data by directly creating a common numpy array
# (so no combined list in between)
windows_merged_raw = np.empty(
shape=[len(windows_merged_info), cf.get("img_width"), cf.get("img_height"), 3],
dtype=cf.get("img_dtype"))
raw_window_index = 0
for raw_windows_of_one_image, _ in results_per_img:
for raw_window in raw_windows_of_one_image:
windows_merged_raw[raw_window_index] = raw_window
raw_window_index += 1
# release memory
results_per_img = None
timer_extracting.stop()
# run inference using the merged windows
log.log("run inference using the merged windows (total: {}, avg per img: {:.0f})".format(
len(windows_merged_info),
len(windows_merged_info) / len(images)
))
merged_bboxes = self.run_inference_on_windows(windows_merged_info, windows_merged_raw)
# separate results: group them by the input images
all_results_dict = dict()
for img in images:
all_results_dict[img.path_original] = []
for bbox in merged_bboxes:
all_results_dict[bbox.image.path_original].append(bbox)
# transform dict to final result list
all_results = []
for img in images:
all_results.append(all_results_dict[img.path_original])
else:
# process image after image
# TODO implement multi-threading for the non-merging mode, too
for img in images:
img_results = []
try:
timer_single = TimeWatcher("inference_img_single")
img_results = self.run_inference_on_image(img)
timer_single.stop()
except FileNotFoundError:
log.log(" .. Skipped {}, because the file could not be found".format(
img.path_resized
))
except:
log.log(" .. Skipped {}, because of an unexpected error:\n{}".format(
img.path_resized,
traceback.format_exc()
))
all_results.append(img_results)
timer_multiple.stop()
if merge:
# runtime stats for inference only are available in merge mode only
runtime_total = timer_extracting.elapsed_seconds
runtime_avg = runtime_total / float(len(images))
log.log("Runtime window extraction: {} images in {} (avg: {}).".format(
len(images),
TimeWatcher.seconds_to_str(runtime_total),
TimeWatcher.seconds_to_str(runtime_avg)
))
runtime_total = timer_multiple.elapsed_seconds - timer_extracting.elapsed_seconds
runtime_avg = runtime_total / float(len(images))
log.log("Runtime inference only: {} images in {} (avg: {}).".format(
len(images),
TimeWatcher.seconds_to_str(runtime_total),
TimeWatcher.seconds_to_str(runtime_avg)
))
# log runtime stats: inference including extracting
runtime_total = timer_multiple.elapsed_seconds
runtime_avg = runtime_total / float(len(images))
log.log("Runtime inference including window extraction: {} images in {} (avg: {}).".format(
len(images),
TimeWatcher.seconds_to_str(runtime_total),
TimeWatcher.seconds_to_str(runtime_avg)
))
return all_results
def _extract_windows(self, img: ImageInfo, convert_raw_to_np=True):
"""Extract all sliding windows from the given img.
Essentially, this is a wrapper for Window.extract_windows(img) to allow additional steps required by subclasses.
Exceptions will be caught and replaced by an empty list along with an error message, because we don't want the
complete inference process to get stopped because of single images.
"""
try:
windows_raw, windows_info = Window.extract_windows(img, convert_raw_to_np)
if len(windows_raw) < 1:
raise ValueError("Could not extract any windows from the given image")
return windows_raw, windows_info
except FileNotFoundError:
log.log(" .. Skipped {}, because the file could not be found".format(
img.path_resized
))
return [], []
except:
log.log(" .. Skipped {}, because of an unexpected error:\n{}".format(
img.path_resized,
traceback.format_exc()
))
return [], []
@abc.abstractmethod
def _init_tf(self):
"""Initialize TensorFlow and load required resources.
If TensorFlow is already initialized, nothing will happen.
"""
return
```
#### File: RapidObjectDetectionUsingCascadedCNNs/app/base_app.py
```python
import abc
import os
import sys
from utils import log
from utils.time_watcher import TimeWatcher
import config as cf
from subprocess import call
class BaseApp(metaclass=abc.ABCMeta):
"""This class is the base class of all "apps" created in this project."""
def __init__(self, run_now=True):
"""Create a new BaseApp.
:param run_now: Whether this app should run right now.
"""
self._time_watcher = None
# overclock GPU
if cf.get("overclock_gpu_shell") is not None:
log.log("automatically overclocking the GPU by using the following shell script: {}".format(
cf.get("overclock_gpu_shell")
))
call(cf.get("overclock_gpu_shell"), shell=True)
if run_now:
self.run()
def run(self):
"""Run this app.
This method is wrapping the main method to introduce some additional events.
:return:
"""
self._time_watcher = TimeWatcher(os.path.basename(sys.argv[0]).replace(".py", ""))
try:
self._main()
except KeyboardInterrupt:
log.log("WARNING: User interrupted progress.")
self._on_cancel()
self._on_finished()
self._time_watcher.stop()
@abc.abstractmethod
def _main(self):
"""This method will be called on object initialization to run the actual programme."""
return
def _on_cancel(self):
"""This method will be called when the user interrupted the main method."""
return
def _on_finished(self):
"""This method will be called when the main method is done (either finished regularly or cancelled)."""
# save log files
log.log_set_name(self.__class__.__name__)
# we don't flush the log here, because other apps include each other
log.log_save(cf.get("log_dir"), flush=False)
return
```
#### File: RapidObjectDetectionUsingCascadedCNNs/app/tune_cascade_app.py
```python
from app.tune_single_app import TuneSingleApp
from app.train_cascade_app import TrainCascadeApp
class TuneCascadeApp(TuneSingleApp):
"""Tune a cascade instead of a single net."""
def _create_trainer(self) -> TrainCascadeApp:
return TrainCascadeApp(run_now=False)
```
#### File: RapidObjectDetectionUsingCascadedCNNs/data/cache.py
```python
import os
import pickle
from typing import Optional, Dict
import config as cf
import numpy as np
from data.datasets import Dataset
from data.preprocessor import Preprocessor
from utils import log
from utils.singleton import Singleton
class Cache(metaclass=Singleton):
"""This singleton class handles persisting and loading of user-defined data in order to reduce the need of
re-calculation.
"""
# this version will be added to each created cached file. if the version number stored in a loaded file is
# smaller than this, it will not be used
# -> increase it whenever old cache data is incompatible
_cache_version = 8
# the following keys will be used to access the cached data in a dictionary
# they will also be used as the name of the file they will be saved to
# Dataset keys
KEY_DATA_X = "x"
KEY_DATA_Y = "y"
KEY_CACHE_VERSION = "cache_version"
KEY_CONFIG = "config"
KEY_NEXT_NEW_IID = "next_new_iid"
KEY_LABEL_IDS = "label_ids"
KEY_LABEL_NAME_BY_ID = "label_name_by_id"
KEY_LABEL_ID_BY_NAME = "label_id_by_name"
KEY_PREPROCESSOR = "preprocessor"
# file list loader keys
CATEGORY_PREFIX_FILE_LIST_LOADER = "file_list_loader_"
KEY_FLL_IMG_INFOS_PER_DS = "image_infos_per_dataset"
KEY_FLL_IMG_INFOS = "image_infos"
KEY_FLL_IMG_INFOS_PER_IID = "image_infos_per_iid_label"
def __init__(self):
"""Create the singleton object."""
# ensure that the root cache path does exist
if not os.path.exists(self._ds_path("")):
os.makedirs(self._ds_path(""))
# inform the user about deprecated cache data
deprecated_cache_num = self._count_old_cache_version_folders()
if deprecated_cache_num > 0:
log.log("Found {} deprecated cache folders. Go ahead and delete them manually.".format(deprecated_cache_num))
def _ds_path(self, dataset_key: str, suffix=None, suffix_extension=".npy") -> str:
"""Get the file path to the specified dataset cache.
Note, datasets are cached in a slightly-different structure than other objects.
:param dataset_key: The key identifying the dataset which should be cached.
:param suffix: An additional suffix which will be appended to the base file name.
:param suffix_extension: The file extension. Either ".npy" or ".p".
:return:
"""
# each version gets its own subdirectory
path = os.path.join(self._base_path("dataset"), "{}x{}".format(
cf.get("img_width"),
cf.get("img_height")
))
# each dataset, too
path = os.path.join(path, dataset_key)
# suffix is used for the actual file name + .npy
if suffix is not None:
path = os.path.join(path, suffix + suffix_extension)
return path
def _base_path(self, category, suffix=None, suffix_extension=".npy") -> str:
"""Get the file path to the given non-dataset cache element.
:param category: Cache elements are grouped in categories.
:param suffix: This suffix should describe the individual element of the associated category.
:param suffix_extension: The file extension. Either ".npy" or ".p".
:return:
"""
# each version gets its own subdirectory
path = os.path.join(cf.get("cache_path_root"), "v{}".format(
self._cache_version,
))
# each dataset, too
path = os.path.join(path, category)
# suffix is used for the actual file name + .npy
if suffix is not None:
path = os.path.join(path, suffix + suffix_extension)
return path
def load_dataset(self, dataset_key: str) -> Optional[Dict]:
"""Return the requested dataset parts structured in a dictionary, or None if not available/valid.
:param dataset_key: The key identifying the dataset which should be loaded from cache.
:return:
"""
# if a cached file does exist
if os.path.isfile(self._ds_path(dataset_key, self.KEY_CACHE_VERSION)):
log.log("Found cached data")
log.log(".. loading cached dataset {}".format(dataset_key))
loaded_data = dict()
for file_name in os.listdir(self._ds_path(dataset_key)):
key = os.path.splitext(file_name)[0]
if file_name.endswith(".npy"):
loaded_data[key] = np.load(self._ds_path(dataset_key, key))
elif file_name.endswith(".p"):
with open(self._ds_path(dataset_key, key, ".p"), "rb") as input_file:
loaded_data[key] = pickle.load(input_file)
log.log(".. dataset has been loaded successfully from the cache")
# restore class attributes
# TODO do not use private vars, ensure that no conflict between different datasets can exist
log.log(".. loading global meta information about available labels")
Dataset._next_new_iid = loaded_data[self.KEY_NEXT_NEW_IID]
Dataset._label_internal_ids = loaded_data[self.KEY_LABEL_IDS]
Dataset._label_name_by_internal_id = loaded_data[self.KEY_LABEL_NAME_BY_ID]
Dataset._label_internal_id_by_name = loaded_data[self.KEY_LABEL_ID_BY_NAME]
return loaded_data
else:
log.log("Cache for dataset {} is empty".format(
dataset_key
))
return None
def load(self, category, data_keys=None) -> Optional[Dict]:
"""Return the requested non-dataset data from cache, or None if not available/valid.
:param category: The category which should be (partially) loaded.
:param data_keys: If None, all found files of that category will be loaded. Otherwise, only the specified ones.
:return:
"""
# if a cached category folder does exist
if not self.is_empty(category, data_keys):
if data_keys is None:
log.log("Loading everything from cached category {}".format(category))
else:
log.log("Loading {} from cached category {}".format(category, data_keys))
loaded_data = dict()
for file_name in os.listdir(self._base_path(category)):
key = os.path.splitext(file_name)[0]
if data_keys is None or key in data_keys:
if file_name.endswith(".npy"):
loaded_data[key] = np.load(self._base_path(category, key))
elif file_name.endswith(".p"):
with open(self._base_path(category, key, ".p"), "rb") as input_file:
loaded_data[key] = pickle.load(input_file)
log.log(".. category {} has been loaded successfully from the cache".format(category))
return loaded_data
else:
if data_keys is None:
log.log("Cache for category {} is completely empty".format(
category
))
else:
log.log("Cache for category {} and data keys {} is empty".format(
category,
data_keys
))
return None
def load_single(self, category: str, data_key: str):
"""Load only a single data file from the cached category.
:param category: The category which should be partially loaded.
:param data_key: A key describing the specific element of the given category.
:return: None, if no such element has been cached.
"""
result_list = self.load(category, [data_key])
if result_list is not None:
result_list = result_list[data_key]
return result_list
def is_empty(self, category: str, data_keys=None) -> bool:
"""Check whether any (specific) data for category has been cached.
:param category: The category which should be checked.
:param data_keys: If None, just any data needs to exist. Otherwise, at least one file specified by the data_keys.
:return:
"""
category_dir_exists = os.path.isdir(self._base_path(category))
if category_dir_exists and data_keys is not None:
is_empty = True
for file_name in os.listdir(self._base_path(category)):
key = os.path.splitext(file_name)[0]
if key in data_keys:
is_empty = False
break
else:
is_empty = not category_dir_exists
return is_empty
def save(self, category: str, data, suffix_extension=".npy"):
"""Save an arbitrary category to the cache.
Currently, all elements of data must use the same suffix_extension.
:param category: The category which should be saved.
:param data: The actual data that should be cached.
:param suffix_extension: The file extension. Either ".npy" or ".p".
:return:
"""
# create folder for this category
if not os.path.exists(self._base_path(category)):
os.makedirs(self._base_path(category))
# each element of the data dictionary to a separate file
for key, value in data.items():
log.log(" .. saving {}.{}".format(
category,
key
))
if suffix_extension == ".npy":
np.save(self._base_path(category, key, suffix_extension), value)
else:
with open(self._base_path(category, key, ".p"), "wb") as output_file:
pickle.dump(value, output_file)
# additional log message to signal the end of this category cache. but only, if there is more than one file
if len(data) > 1:
log.log(".. saved {}".format(category))
def save_single(self, category, data_key, data_value, suffix_extension=".npy"):
"""Save only a single data file of a category."""
self.save(category, {data_key: data_value}, suffix_extension)
def save_dataset(self, dataset_key: str, x: np.ndarray, y: np.ndarray, preprocessor: Preprocessor):
"""Cache the specified dataset.
Does not work directly with a Dataset object to allow saving python lists of x and y before they are
converted to numpy arrays. the latter can not happen inplace and might cause a memory error. While first saving
them to disk, will result in an automatic conversion to numpy arrays which do not need the memory.
:param dataset_key: The key identifying the dataset which should be saved.
:param x: The raw data of the associated dataset.
:param y: The label data of the associated dataset.
:param preprocessor: The preprocessor of the associated dataset.
:return:
"""
# create folder for this dataset
if not os.path.exists(self._ds_path(dataset_key)):
os.makedirs(self._ds_path(dataset_key))
data_np = dict()
data_np[self.KEY_CACHE_VERSION] = self._cache_version
# do not save the complete dataset object, but X and Y.
# this way the calculated data will be restored, but parameters for splitting etc. can be refreshed
data_np[self.KEY_DATA_X] = x
data_np[self.KEY_DATA_Y] = y
# add the complete current configuration to ensure that no information about the loaded dataset are missing
data_np[self.KEY_CONFIG] = cf._cf
# save each element of the dictionary to a separate file
for key, value in data_np.items():
np.save(self._ds_path(dataset_key, key), value)
# pickle instead of numpy
data_pickle = dict()
# store further dataset class attributes
# TODO do not use private vars
data_pickle[self.KEY_NEXT_NEW_IID] = Dataset._next_new_iid
data_pickle[self.KEY_LABEL_IDS] = Dataset._label_internal_ids
data_pickle[self.KEY_LABEL_NAME_BY_ID] = Dataset._label_name_by_internal_id
data_pickle[self.KEY_LABEL_ID_BY_NAME] = Dataset._label_internal_id_by_name
data_pickle[self.KEY_PREPROCESSOR] = preprocessor
for key, value in data_pickle.items():
with open(self._ds_path(dataset_key, key, ".p"), "wb") as output_file:
pickle.dump(value, output_file)
log.log("Cached dataset " + dataset_key)
# save copy of current log file, but don't flush
log.log_save(self._ds_path(dataset_key), flush=False)
def _count_old_cache_version_folders(self) -> int:
"""Get the number of deprecated cache versions that still exist on disk."""
# assuming that there is at least one folder for the current version. all others are old
return len(os.listdir(cf.get("cache_path_root"))) - 1
```
#### File: RapidObjectDetectionUsingCascadedCNNs/network/net_builder.py
```python
import tensorflow as tf
def max_pool(x, size=2, stride=2):
"""Create a max-pooling layer.
:param x: The input of the pooling layer.
:param size: The width and height of the kernel.
:param stride: The stride between consecutive kernels.
:return:
"""
return tf.nn.max_pool(x,
ksize=[1, size, size, 1],
strides=[1, stride, stride, 1],
padding="SAME")
def conv2d(x, n_output, k_h=5, k_w=5, stride_vertical=2, stride_horizontal=2, padding="SAME", name="conv2d"):
"""Create a 2D convolutional layer.
:param x: The input of the convolutional layer.
:param n_output: The number of filters / feature maps.
:param k_h: The kernel height (height of the input neighborhood).
:param k_w: The kernel width (width of the input neighborhood).
:param stride_vertical: Vertical stride between consecutive kernel duplicates.
:param stride_horizontal: Horizontal stride between consecutive kernel duplicates.
:param padding: Padding method.
:param name: The name of the convolutional layer.
:return:
"""
with tf.variable_scope(name, reuse=None):
# create new weight matrix
W = tf.get_variable(
name='W',
shape=[k_h, k_w, x.get_shape()[-1], n_output],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
# create the actual convolutional layer using the given weights
conv = tf.nn.conv2d(
name='conv',
input=x,
filter=W,
strides=[1, stride_vertical, stride_horizontal, 1],
padding=padding)
# create bias
b = tf.get_variable(
name='b',
shape=[n_output],
initializer=tf.constant_initializer(0.0))
# add bias to the convolutional layer
h = tf.nn.bias_add(
name='h',
value=conv,
bias=b)
return h, W
def fully_connected(x, n_output, name="fc", activation=None):
"""Create a fully-connected layer.
:param x: The input of the fully-connected layer.
:param n_output: The number of neurons used in the fully-connected layer.
:param name: The name of the fully-connected layer.
:param activation: An optional activation method which may be applied right after the fully-connected layer.
:return:
"""
if n_output < 1:
raise ValueError("Can not create a fully-connected layer with {} neurons.".format(n_output))
if len(x.get_shape()) != 2:
x = flatten(x, reuse=None)
with tf.variable_scope(name, reuse=None):
# create weight matrix
n_input = x.get_shape().as_list()[1]
W = tf.get_variable(
name='W',
shape=[n_input, n_output],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
# create bias
b = tf.get_variable(
name='b',
shape=[n_output],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0))
# create the actual fully-connected layer using the weights and bias
h = tf.nn.bias_add(
name='h',
value=tf.matmul(x, W),
bias=b)
# maybe append the activation function
if activation:
h = activation(h)
return h, W, b
def flatten(x):
"""Flatten the tensor x to two dimensions."""
with tf.variable_scope("flatten"):
dims = x.get_shape().as_list()
if len(dims) == 4:
flattened = tf.reshape(
x,
shape=[-1, dims[1] * dims[2] * dims[3]])
elif len(dims) == 3:
flattened = tf.reshape(
x,
shape=[-1, dims[1] * dims[2]])
elif len(dims) == 2 or len(dims) == 1:
flattened = x
else:
raise ValueError("Expected n dimensions of 1, 2 or 4, but found: {}".format(len(dims)))
return flattened
```
#### File: Johnson145/RapidObjectDetectionUsingCascadedCNNs/run_sampling.py
```python
import traceback
import cv2
from PIL import Image
import config as cf
import os
from data.db import label
from data.db.file_list_loader import FileListLoader
from data.db.dataset_loader import DatasetLoader
from data.db.label import Label
from data.cache import Cache
from utils import log
from data.rectangles import BoundingBox, RestrictedArea
import numpy as np
from utils.img_manipulation import random_img_patch, PotentialDeadlockError
class Sample:
"""Helper class to bundle information of new samples."""
def __init__(self, label: Label, img_raw):
self.label = label
self.img_raw = img_raw
# sampling is supported only once for the complete input
if os.path.isdir(cf.get("dataset_presampled_path_root")):
raise AttributeError("Can't create an augmented input, because there is already one on disk.")
# create missing base folder
os.makedirs(cf.get("dataset_presampled_path_root"))
# used base must be the native data
# (so this must be called before loading any image data
cf.set("dataset_path_root", cf.get("dataset_native_path_root"))
# cache must be disabled, otherwise we may still try to load an already pre-sampled dataset
cf.set("cache_dataset", False)
# load native input
FileListLoader().image_infos
# total number of saved samples
i_samples_total = 0
# this existing classifier will be used to identify potential faces (that were not annotated).
# the following settings for cv2_scale_factor and cf.get("nms_opencv_min_neighbors") will produce quite a lot false positives in favor
# of reducing false negatives. this would not be a useful configuration for a production environment, but we want to
# ensure that no faces make their way into the background sample pool.
if cf.get("foreground_equals_face"):
log.log("background patches which look like human faces will be removed automatically")
cv2_scale_factor = 1.1
face_cascade = cv2.CascadeClassifier(
os.path.join(cf.get("path_opencv_data"), 'haarcascade_frontalface_default.xml'))
# log some settings
log.log("number of additional background patches, which will be sampled from each original image: {}".format(
cf.get("sampling_multiplier")
))
log.log("maximum allowed IoU between a new background sample and any known foreground region: {0:.2f}%".format(
cf.get("sampling_background_max_iou_with_foreground") * 100
))
# process each native sample after another
i_imgs = 0
background_label = label.get_by_key(label.KEY_BACKGROUND)
log.log("begin processing one native image file after the other (this may take a while)")
for img_info in FileListLoader().image_infos:
try:
# collect new samples based on the current image
img_new_samples = []
restrictions = [] # ensure that no background patches intersect foreground information
# load the original image only once
# (there is no need to use the permanent internal cache though)
img_raw = Image.open(img_info.path_original).convert('RGB')
img_width, img_height = img_raw.size
# first of all, we want to ensure that all annotated regions are used
# (even if this implies that we get more samples than cf.get("sampling_multiplier"))
if img_info.annotations is not None:
for annotation in img_info.annotations:
if annotation.bbox_is_valid:
# crop
annotation_img = img_raw.crop((annotation.xmin, annotation.ymin, annotation.xmax, annotation.ymax))
# annotated regions must always contain the same label as the complete image
annotation_sample = Sample(img_info.label, annotation_img)
img_new_samples.append(annotation_sample)
# remember annotated foreground regions
if img_info.label.is_foreground:
restricted_area = RestrictedArea(annotation.bbox, img_width=img_width, img_height=img_height)
restrictions.append(restricted_area)
# some images do contain faces, although they are not annotated. this is true for background images as well
# as for foreground images coming from AFLW or ImageNet.
# so we will not only restrict known annotations, but potential faces detected by OpenCV, too
# (they won't be used as foreground samples though)
if cf.get("foreground_equals_face"):
img_raw_np = np.array(img_raw) # convert pil image to np array, which can be used by OpenCV
img_raw_gray = cv2.cvtColor(img_raw_np, cv2.COLOR_RGB2GRAY)
faces = face_cascade.detectMultiScale(img_raw_gray, cv2_scale_factor, cf.get("nms_opencv_min_neighbors"))
for (x, y, w, h) in faces:
# pay attention to the order of x and y!
face_bbox = BoundingBox(x, y, x+w, y+h)
restricted_area = RestrictedArea(face_bbox, img_width=img_width, img_height=img_height)
restrictions.append(restricted_area)
# produce the remaining new samples by using random background patches in different sizes
# for background images, we can use everything. foreground images can only be used, if they contain partially
# background
if img_info.label.is_background or len(restrictions) > 0:
while len(img_new_samples) < cf.get("sampling_multiplier"):
try:
background_raw = random_img_patch(img_raw, restrictions,
cf.get("sampling_background_max_iou_with_foreground"))
background_sample = Sample(background_label, background_raw)
img_new_samples.append(background_sample)
except PotentialDeadlockError as e:
# stop adding background patches when it fails once
break
log.log("{}".format(e))
# save new samples on disk
i_samples_img = 0 # number of saved samples belonging to the current native sample
for sample in img_new_samples:
# build the new file name
dst_file_name = "aug_{}_{}_{}".format(
i_samples_total,
i_samples_img,
img_info.basename
)
# build the new file path
# (do not just replace parts of the original one, as at least the label folder can be different now, too)
dst_folder = cf.get("dataset_presampled_path_root")
dst_folder = os.path.join(dst_folder, img_info.dataset_key)
dst_folder = os.path.join(dst_folder, "images")
dst_folder = os.path.join(dst_folder, "original") # unscaled original windows will be saved. this allows dynamic resizing in the "main" code
dst_folder = os.path.join(dst_folder, sample.label.key)
# create dirs
if not os.path.exists(dst_folder):
os.makedirs(dst_folder)
# concat folder and file name
dst = os.path.join(dst_folder, dst_file_name)
# the new file path must not exist yet
if os.path.exists(dst):
raise ValueError("The destination path {} points to an existing file.".format(
dst
))
# save augmented image sample on disk
sample.img_raw.save(dst)
i_samples_img += 1
i_samples_total += 1
except:
log.log("WARNING: Skipped {}, because of an unexpected error:\n{}".format(
img_info.full_key,
traceback.format_exc()
))
i_imgs += 1
if i_imgs % 100 == 0:
log.log("Processed {}/{} native files".format(
i_imgs,
len(FileListLoader().image_infos)
))
log.log("All augmented original files have been created.")
log.log("In order to use the new files, we need to recalculate the cached datasets")
# backup existing cache by renaming the folder
c = Cache()
old_ds_cache_path = c._base_path("dataset")
if os.path.exists(old_ds_cache_path):
new_path_suffix = 0
while True:
new_path = "{}_pre_sampling_backup_{}".format(old_ds_cache_path, new_path_suffix)
if os.path.exists(new_path):
new_path_suffix += 1 # this backup already exists, try again
else:
log.log("Moving existing dataset cache to {}".format(new_path))
os.rename(old_ds_cache_path, new_path)
break
# new data should be loaded based on the just created pre-sampled data
cf.set("dataset_path_root", cf.get("dataset_presampled_path_root"))
# furthermore, we need to reset the already loaded file lists
FileListLoader().reset()
# now, we can try to load the dataset again
# this will start resizing of the pre-sampled data as well as caching afterwards
loader = DatasetLoader()
loader.dataset()
log.log("Done. Don't forget to set _cf[\"dataset_path_root\"] = _cf[\"dataset_presampled_path_root\"]")
```
#### File: RapidObjectDetectionUsingCascadedCNNs/utils/file_handler.py
```python
import os
import platform
import socket
import subprocess
from random import choice
from typing import List
from urllib.request import Request, urlopen
#######################################################################
# allow downloading stuff with a random user agent
# see http://wolfprojects.altervista.org/articles/change-urllib-user-agent/
user_agents = [
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246',
'Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0.1'
]
# set default time out
timeout_seconds = 5
socket.setdefaulttimeout(timeout_seconds)
def read_txt_lines(file_path: str) -> List[str]:
"""Read the txt file at the given path and return its lines in a list."""
with open(file_path) as f:
lines = f.readlines()
# remove \n symbols at the end of each line
lines = [x.strip() for x in lines]
return lines
def _open_url(url: str):
req = Request(url, headers={'User-Agent': choice(user_agents)})
# note, we are using a really large timeout of large seconds, because the ImageNet API is really slow.
result = urlopen(req, timeout=3000)
return result
def read_url(url: str):
"""Anonymously read the file at the given url."""
result = _open_url(url).read()
return result
def read_txt_url_to_str(url: str) -> str:
"""Read the entire text file at the given url into a single string."""
result_str = read_url(url).decode('utf-8')
return result_str
def read_txt_url_lines_to_list(url: str) -> str:
"""Read the text file at the given url into a list containing all lines as separate elements."""
result_str = read_txt_url_to_str(url)
lines = result_str.split("\n")
lines = [x.strip() for x in lines]
return lines
def open_file(path):
"""Open a given file or folder in the explorer/nautilus/..
See https://stackoverflow.com/a/16204023/1665966
"""
if platform.system() == "Windows":
os.startfile(path)
elif platform.system() == "Darwin":
subprocess.Popen(["open", path])
else:
subprocess.Popen(["xdg-open", path])
```
#### File: RapidObjectDetectionUsingCascadedCNNs/utils/log.py
```python
import time
import os
log_cache = []
log_name = "log"
console_output_enabled = True
def log_set_name(name: str):
"""Set the global name of the current log.
The name will be included in saved files and so it makes it easier to find a specific log file.
:param name: The new name.
:return:
"""
global log_name
log_name = name
def log(msg: str, log_to_console=True, log_to_file=True):
"""Add a single log line.
:param msg: The message of the new log line.
:param log_to_console: Whether the new log line should be shown in the console / shell.
:param log_to_file: Whether the new log line should be appeneded to the log cache which will be used for saving log
files.
:return:
"""
global log_cache
msg = time.strftime('%X') + ": {}".format(msg) # prepend the current time
if log_to_console and console_output_enabled:
print(msg)
if log_to_file:
log_cache.append(time.strftime('%x') + ' ' + msg)
def log_save(directory, flush=True):
"""Save a log file with all currently-cached log lines.
:param directory: The file path to the dir which should be used to save the new log file.
:param flush: Whether to clear the current log cache after persisting it.
:return:
"""
global log_cache, log_name
# create destination dir, if it does not exist yet
if not os.path.exists(directory):
os.makedirs(directory)
import config as cf
prefix = cf.get("session_key")
# write to file
f = open(directory + '/' + prefix + '-' + log_name + '.txt', 'w')
f.write('\n'.join(log_cache))
f.close()
# optionally, clear the current cache
if flush:
log_clear()
def log_clear():
"""Clear / flush the current log cache."""
global log_cache
log_cache = []
def disable_console_output():
"""Do not print any further log lines to the console / shell."""
global console_output_enabled
console_output_enabled = False
``` |
{
"source": "johnson2427/ape",
"score": 3
} |
#### File: ape/managers/chain.py
```python
import time
from pathlib import Path
from typing import Callable, Dict, Iterator, List, Optional, Tuple, Union
from ethpm_types import ContractType
from ape.api import Address, BlockAPI, ReceiptAPI
from ape.api.address import BaseAddress
from ape.api.networks import LOCAL_NETWORK_NAME, NetworkAPI, ProxyInfoAPI
from ape.api.query import BlockQuery
from ape.exceptions import ChainError, UnknownSnapshotError
from ape.logging import logger
from ape.managers.base import BaseManager
from ape.types import AddressType, BlockID, SnapshotID
from ape.utils import cached_property
class BlockContainer(BaseManager):
"""
A list of blocks on the chain.
Usages example::
from ape import chain
latest_block = chain.blocks[-1]
"""
@property
def head(self) -> BlockAPI:
"""
The latest block.
"""
return self._get_block("latest")
@property
def height(self) -> int:
"""
The latest block number.
"""
if self.head.number is None:
raise ChainError("Latest block has no number.")
return self.head.number
@property
def network_confirmations(self) -> int:
return self.provider.network.required_confirmations
def __getitem__(self, block_number: int) -> BlockAPI:
"""
Get a block by number. Negative numbers start at the chain head and
move backwards. For example, ``-1`` would be the latest block and
``-2`` would be the block prior to that one, and so on.
Args:
block_number (int): The number of the block to get.
Returns:
:class:`~ape.api.providers.BlockAPI`
"""
if block_number < 0:
block_number = len(self) + block_number
return self._get_block(block_number)
def __len__(self) -> int:
"""
The number of blocks in the chain.
Returns:
int
"""
return self.height + 1
def __iter__(self) -> Iterator[BlockAPI]:
"""
Iterate over all the current blocks.
Returns:
Iterator[:class:`~ape.api.providers.BlockAPI`]
"""
return self.range(len(self))
def query(
self,
*columns: List[str],
start_block: int = 0,
stop_block: Optional[int] = None,
step: int = 1,
engine_to_use: Optional[str] = None,
) -> Iterator:
"""
A method for querying blocks and returning an Iterator. If you
do not provide a starting block, the 0 block is assumed. If you do not
provide a stopping block, the last block is assumed. You can pass
``engine_to_use`` to short-circuit engine selection.
Raises:
:class:`~ape.exceptions.ChainError`: When ``stop_block`` is greater
than the chain length.
Args:
columns (List[str]): columns in the DataFrame to return
start_block (int): The first block, by number, to include in the
query. Defaults to 0.
stop_block (Optional[int]): The last block, by number, to include
in the query. Defaults to the latest block.
step (int): The number of blocks to iterate between block numbers.
Defaults to ``1``.
engine_to_use (Optional[str]): query engine to use, bypasses query
engine selection algorithm.
Returns:
Iterator
"""
if stop_block is None:
stop_block = self.height
elif stop_block > self.height:
raise ChainError(
f"'stop_block={stop_block}' cannot be greater than the chain length ({len(self)}). "
f"Use '{self.poll_blocks.__name__}()' to wait for future blocks."
)
query = BlockQuery(
columns=columns,
start_block=start_block,
stop_block=stop_block,
step=step,
engine_to_use=engine_to_use,
)
return self.query_manager.query(query)
def range(
self, start_or_stop: int, stop: Optional[int] = None, step: int = 1
) -> Iterator[BlockAPI]:
"""
Iterate over blocks. Works similarly to python ``range()``.
Raises:
:class:`~ape.exceptions.ChainError`: When ``stop`` is greater
than the chain length.
:class:`~ape.exceptions.ChainError`: When ``stop`` is less
than ``start_block``.
:class:`~ape.exceptions.ChainError`: When ``stop`` is less
than 0.
:class:`~ape.exceptions.ChainError`: When ``start`` is less
than 0.
Args:
start_or_stop (int): When given just a single value, it is the stop.
Otherwise, it is the start. This mimics the behavior of ``range``
built-in Python function.
stop (Optional[int]): The block number to stop before. Also the total
number of blocks to get. If not setting a start value, is set by
the first argument.
step (Optional[int]): The value to increment by. Defaults to ``1``.
number of blocks to get. Defaults to the latest block.
Returns:
Iterator[:class:`~ape.api.providers.BlockAPI`]
"""
if stop is None:
stop = start_or_stop
start = 0
else:
start = start_or_stop
if stop > len(self):
raise ChainError(
f"'stop={stop}' cannot be greater than the chain length ({len(self)}). "
f"Use '{self.poll_blocks.__name__}()' to wait for future blocks."
)
elif stop < start:
raise ValueError(f"stop '{stop}' cannot be less than start '{start}'.")
elif stop < 0:
raise ValueError(f"start '{start}' cannot be negative.")
elif start_or_stop < 0:
raise ValueError(f"stop '{stop}' cannot be negative.")
# Note: the range `stop_block` is a non-inclusive stop, while the
# `.query` method uses an inclusive stop, so we must adjust downwards.
results = self.query("*", start_block=start, stop_block=stop - 1, step=step) # type: ignore
for _ in results:
yield _
def poll_blocks(
self,
start: Optional[int] = None,
stop: Optional[int] = None,
required_confirmations: Optional[int] = None,
) -> Iterator[BlockAPI]:
"""
Poll new blocks. Optionally set a start block to include historical blocks.
**NOTE**: This is a daemon method; it does not terminate unless an exception occurrs
or a ``stop`` is given.
Usage example::
from ape import chain
for new_block in chain.blocks.poll_blocks():
print(f"New block found: number={new_block.number}")
Args:
start (Optional[int]): The block number to start with. Defaults to the pending
block number.
stop (Optional[int]): Optionally set a future block number to stop at.
Defaults to never-ending.
required_confirmations (Optional[int]): The amount of confirmations to wait
before yielding the block. The more confirmations, the less likely a reorg will occur.
Defaults to the network's configured required confirmations.
Returns:
Iterator[:class:`~ape.api.providers.BlockAPI`]
"""
if required_confirmations is None:
required_confirmations = self.network_confirmations
if stop is not None and stop <= self.chain_manager.blocks.height:
raise ValueError("'stop' argument must be in the future.")
# Get number of last block with the necessary amount of confirmations.
latest_confirmed_block_number = self.height - required_confirmations
has_yielded = False
if start is not None:
# Front-load historically confirmed blocks.
yield from self.range(start, latest_confirmed_block_number + 1)
has_yielded = True
time.sleep(self.provider.network.block_time)
while True:
confirmable_block_number = self.height - required_confirmations
if confirmable_block_number < latest_confirmed_block_number and has_yielded:
logger.error(
"Chain has reorganized since returning the last block. "
"Try adjusting the required network confirmations."
)
elif confirmable_block_number > latest_confirmed_block_number:
# Yield all missed confirmable blocks
new_blocks_count = confirmable_block_number - latest_confirmed_block_number
for i in range(new_blocks_count):
block_num = latest_confirmed_block_number + i
block = self._get_block(block_num)
yield block
if stop and block.number == stop:
return
has_yielded = True
latest_confirmed_block_number = confirmable_block_number
time.sleep(self.provider.network.block_time)
def _get_block(self, block_id: BlockID) -> BlockAPI:
return self.provider.get_block(block_id)
class AccountHistory(BaseManager):
"""
A container mapping account addresses to the transaction from the active session.
"""
_map: Dict[AddressType, List[ReceiptAPI]] = {}
@cached_property
def _convert(self) -> Callable:
return self.conversion_manager.convert
def __getitem__(self, address: Union[BaseAddress, AddressType, str]) -> List[ReceiptAPI]:
"""
Get the list of transactions from the active session for the given address.
Args:
address (``AddressType``): The sender of the desired transactions.
Returns:
List[:class:`~ape.api.transactions.TransactionAPI`]: The list of transactions. If there
are no recorded transactions, returns an empty list.
"""
address_key: AddressType = self._convert(address, AddressType)
explorer = self.provider.network.explorer
explorer_receipts = (
[r for r in explorer.get_account_transactions(address_key)] if explorer else []
)
for receipt in explorer_receipts:
if receipt.txn_hash not in [r.txn_hash for r in self._map.get(address_key, [])]:
self.append(receipt)
return self._map.get(address_key, [])
def __iter__(self) -> Iterator[AddressType]:
"""
Iterate through the accounts listed in the history map.
Returns:
List[str]
"""
yield from self._map
def items(self) -> Iterator[Tuple[AddressType, List[ReceiptAPI]]]:
"""
Iterate through the list of address-types to list of transaction receipts.
Returns:
Iterator[Tuple[``AddressType``, :class:`~ape.api.transactions.ReceiptAPI`]]
"""
yield from self._map.items()
def append(self, txn_receipt: ReceiptAPI):
"""
Add a transaction to the stored list for the given account address.
Raises:
:class:`~ape.exceptions.ChainError`: When trying to append a transaction
receipt that is already in the list.
Args:
txn_receipt (:class:`~ape.api.transactions.ReceiptAPI`): The transaction receipt.
**NOTE**: The receipt is accessible in the list returned from
:meth:`~ape.managers.chain.AccountHistory.__getitem__`.
"""
address = self._convert(txn_receipt.sender, AddressType)
if address not in self._map:
self._map[address] = [txn_receipt]
return
if txn_receipt.txn_hash in [r.txn_hash for r in self._map[address]]:
raise ChainError(f"Transaction '{txn_receipt.txn_hash}' already known.")
self._map[address].append(txn_receipt)
def revert_to_block(self, block_number: int):
"""
Remove all receipts past the given block number.
Args:
block_number (int): The block number to revert to.
"""
self._map = {
a: [r for r in receipts if r.block_number <= block_number]
for a, receipts in self.items()
}
class ContractCache(BaseManager):
"""
A collection of cached contracts. Contracts can be cached in two ways:
1. An in-memory cache of locally deployed contracts
2. A cache of contracts per network (only permanent networks are stored this way)
When retrieving a contract, if a :class:`~ape.api.explorers.ExplorerAPI` is used,
it will be cached to disk for faster look-up next time.
"""
_local_contracts: Dict[AddressType, ContractType] = {}
_local_proxies: Dict[AddressType, ProxyInfoAPI] = {}
@property
def _network(self) -> NetworkAPI:
return self.provider.network
@property
def _is_live_network(self) -> bool:
return self._network.name != LOCAL_NETWORK_NAME and not self._network.name.endswith("-fork")
@property
def _contract_types_cache(self) -> Path:
network_name = self._network.name.replace("-fork", "")
return self._network.ecosystem.data_folder / network_name / "contract_types"
@property
def _proxy_info_cache(self) -> Path:
network_name = self._network.name.replace("-fork", "")
return self._network.ecosystem.data_folder / network_name / "proxy_info"
def __setitem__(self, address: AddressType, contract_type: ContractType):
"""
Cache the given contract type. Contracts are cached in memory per session.
In live networks, contracts also get cached to disk at
``.ape/{ecosystem_name}/{network_name}/contract_types/{address}.json``
for faster look-up next time.
Args:
address (AddressType): The on-chain address of the contract.
contract_type (ContractType): The contract's type.
"""
if self.get(address) and self._is_live_network:
return
self._local_contracts[address] = contract_type
if self._is_live_network:
# NOTE: We don't cache forked network contracts in this method to avoid caching
# deployments from a fork. However, if you retrieve a contract from an explorer
# when using a forked network, it will still get cached to disk.
self._cache_contract_to_disk(address, contract_type)
def __getitem__(self, address: AddressType) -> ContractType:
contract_type = self.get(address)
if not contract_type:
raise IndexError(f"No contract type found at address '{address}'.")
return contract_type
def get(
self, address: AddressType, default: Optional[ContractType] = None
) -> Optional[ContractType]:
"""
Get a contract type by address.
If the contract is cached, it will return the contract from the cache.
Otherwise, if on a live network, it fetches it from the
:class:`~ape.api.explorers.ExplorerAPI`.
Args:
address (AddressType): The address of the contract.
default (Optional[ContractType]): A default contract when none is found.
Defaults to ``None``.
Returns:
Optional[ContractType]: The contract type if it was able to get one,
otherwise the default parameter.
"""
contract_type = self._local_contracts.get(address)
if contract_type:
return contract_type
if self._network.name == LOCAL_NETWORK_NAME:
# Don't check disk-cache or explorer when using local
return default
contract_type = self._get_contract_type_from_disk(address)
if not contract_type:
# Contract could be a minimal proxy
proxy_info = self._local_proxies.get(address) or self._get_proxy_info_from_disk(address)
if not proxy_info:
proxy_info = self.provider.network.ecosystem.get_proxy_info(address)
if proxy_info and self._is_live_network:
self._cache_proxy_info_to_disk(address, proxy_info)
if proxy_info:
self._local_proxies[address] = proxy_info
return self.get(proxy_info.target)
# Also gets cached to disk for faster lookup next time.
contract_type = self._get_contract_type_from_explorer(address)
# Cache locally for faster in-session look-up.
if contract_type:
self._local_contracts[address] = contract_type
return contract_type or default
def instance_at(
self, address: "AddressType", contract_type: Optional[ContractType] = None
) -> BaseAddress:
contract_type = contract_type or self.get(address)
if contract_type:
return self.create_contract(address, contract_type)
return Address(address)
def _get_contract_type_from_disk(self, address: AddressType) -> Optional[ContractType]:
address_file = self._contract_types_cache / f"{address}.json"
if not address_file.is_file():
return None
return ContractType.parse_raw(address_file.read_text())
def _get_proxy_info_from_disk(self, address: AddressType) -> Optional[ProxyInfoAPI]:
address_file = self._proxy_info_cache / f"{address}.json"
if not address_file.is_file():
return None
return ProxyInfoAPI.parse_raw(address_file.read_text())
def _get_contract_type_from_explorer(self, address: AddressType) -> Optional[ContractType]:
if not self._network.explorer:
return None
try:
contract_type = self._network.explorer.get_contract_type(address)
except Exception as err:
logger.error(f"Unable to fetch contract type at '{address}' from explorer.\n{err}")
return None
if contract_type:
# Cache contract so faster look-up next time.
self._cache_contract_to_disk(address, contract_type)
return contract_type
def _cache_contract_to_disk(self, address: AddressType, contract_type: ContractType):
self._contract_types_cache.mkdir(exist_ok=True, parents=True)
address_file = self._contract_types_cache / f"{address}.json"
address_file.write_text(contract_type.json())
def _cache_proxy_info_to_disk(self, address: AddressType, proxy_info: ProxyInfoAPI):
self._proxy_info_cache.mkdir(exist_ok=True, parents=True)
address_file = self._proxy_info_cache / f"{address}.json"
address_file.write_text(proxy_info.json())
class ChainManager(BaseManager):
"""
A class for managing the state of the active blockchain.
Also handy for querying data about the chain and managing local caches.
Access the chain manager singleton from the root ``ape`` namespace.
Usage example::
from ape import chain
"""
_snapshots: List[SnapshotID] = []
_chain_id_map: Dict[str, int] = {}
_block_container_map: Dict[int, BlockContainer] = {}
_account_history_map: Dict[int, AccountHistory] = {}
contracts: ContractCache = ContractCache()
@property
def blocks(self) -> BlockContainer:
"""
The list of blocks on the chain.
"""
if self.chain_id not in self._block_container_map:
blocks = BlockContainer()
self._block_container_map[self.chain_id] = blocks
return self._block_container_map[self.chain_id]
@property
def account_history(self) -> AccountHistory:
"""
A mapping of transactions from the active session to the account responsible.
"""
if self.chain_id not in self._account_history_map:
history = AccountHistory()
self._account_history_map[self.chain_id] = history
return self._account_history_map[self.chain_id]
@property
def chain_id(self) -> int:
"""
The blockchain ID.
See `ChainList <https://chainlist.org/>`__ for a comprehensive list of IDs.
"""
network_name = self.provider.network.name
if network_name not in self._chain_id_map:
self._chain_id_map[network_name] = self.provider.chain_id
return self._chain_id_map[network_name]
@property
def gas_price(self) -> int:
"""
The price for what it costs to transact.
"""
return self.provider.gas_price
@property
def base_fee(self) -> int:
"""
The minimum value required to get your transaction included on the next block.
Only providers that implement `EIP-1559 <https://eips.ethereum.org/EIPS/eip-1559>`__
will use this property.
Raises:
NotImplementedError: When this provider does not implement
`EIP-1559 <https://eips.ethereum.org/EIPS/eip-1559>`__.
"""
return self.provider.base_fee
@property
def pending_timestamp(self) -> int:
"""
The current epoch time of the chain, as an ``int``.
You can also set the timestamp for development purposes.
Usage example::
from ape import chain
chain.pending_timestamp += 3600
"""
return self.provider.get_block("pending").timestamp
@pending_timestamp.setter
def pending_timestamp(self, new_value: str):
self.provider.set_timestamp(self.conversion_manager.convert(value=new_value, type=int))
def __repr__(self) -> str:
props = f"id={self.chain_id}" if self.network_manager.active_provider else "disconnected"
return f"<{self.__class__.__name__} ({props})>"
def snapshot(self) -> SnapshotID:
"""
Record the current state of the blockchain with intent to later
call the method :meth:`~ape.managers.chain.ChainManager.revert`
to go back to this point. This method is for local networks only.
Raises:
NotImplementedError: When the active provider does not support
snapshotting.
Returns:
:class:`~ape.types.SnapshotID`: The snapshot ID.
"""
snapshot_id = self.provider.snapshot()
if snapshot_id not in self._snapshots:
self._snapshots.append(snapshot_id)
return snapshot_id
def restore(self, snapshot_id: Optional[SnapshotID] = None):
"""
Regress the current call using the given snapshot ID.
Allows developers to go back to a previous state.
Raises:
NotImplementedError: When the active provider does not support
snapshotting.
:class:`~ape.exceptions.UnknownSnapshotError`: When the snapshot ID is not cached.
:class:`~ape.exceptions.ChainError`: When there are no snapshot IDs to select from.
Args:
snapshot_id (Optional[:class:`~ape.types.SnapshotID`]): The snapshot ID. Defaults
to the most recent snapshot ID.
"""
if not self._snapshots:
raise ChainError("There are no snapshots to revert to.")
elif snapshot_id is None:
snapshot_id = self._snapshots.pop()
elif snapshot_id not in self._snapshots:
raise UnknownSnapshotError(snapshot_id)
else:
snapshot_index = self._snapshots.index(snapshot_id)
self._snapshots = self._snapshots[:snapshot_index]
self.provider.revert(snapshot_id)
self.account_history.revert_to_block(self.blocks.height)
def mine(
self,
num_blocks: int = 1,
timestamp: Optional[int] = None,
deltatime: Optional[int] = None,
) -> None:
"""
Mine any given number of blocks.
Raises:
ValueError: When a timestamp AND a deltatime argument are both passed
Args:
num_blocks (int): Choose the number of blocks to mine.
Defaults to 1 block.
timestamp (Optional[int]): Designate a time (in seconds) to begin mining.
Defaults to None.
deltatime (Optional[int]): Designate a change in time (in seconds) to begin mining.
Defaults to None
"""
if timestamp and deltatime:
raise ValueError("Cannot give both `timestamp` and `deltatime` arguments together.")
if timestamp:
self.pending_timestamp = timestamp
elif deltatime:
self.pending_timestamp += deltatime
self.provider.mine(num_blocks)
```
#### File: ape/pytest/runners.py
```python
from pathlib import Path
import click
import pytest
from _pytest.config import Config as PytestConfig
import ape
from ape.logging import logger
from ape.utils import ManagerAccessMixin
from ape_console._cli import console
from .contextmanagers import RevertsContextManager
class PytestApeRunner(ManagerAccessMixin):
def __init__(
self,
pytest_config: PytestConfig,
):
self.pytest_config = pytest_config
self._provider_is_connected = False
ape.reverts = RevertsContextManager # type: ignore
@property
def _network_choice(self) -> str:
# The option the user providers via --network (or the default).
return self.pytest_config.getoption("network")
def pytest_exception_interact(self, report, call):
"""
A ``-I`` option triggers when an exception is raised which can be interactively handled.
Outputs the full ``repr`` of the failed test and opens an interactive shell using the
same console as the ``ape console`` command.
"""
if self.pytest_config.getoption("interactive") and report.failed:
capman = self.pytest_config.pluginmanager.get_plugin("capturemanager")
if capman:
capman.suspend_global_capture(in_=True)
# find the last traceback frame within the active project
traceback = call.excinfo.traceback[-1]
for tb_frame in call.excinfo.traceback[::-1]:
try:
Path(tb_frame.path).relative_to(self.project_manager.path)
traceback = tb_frame
click.echo()
click.echo(f"Traceback:{traceback}")
break
except ValueError as err:
click.echo()
logger.warn_from_exception(err, tb_frame)
pass
# get global namespace
globals_dict = traceback.frame.f_globals
# filter python internals and pytest internals
globals_dict = {
k: v
for k, v in globals_dict.items()
if not k.startswith("__") and not k.startswith("@")
}
# filter fixtures
globals_dict = {
k: v for k, v in globals_dict.items() if not hasattr(v, "_pytestfixturefunction")
}
# get local namespace
locals_dict = traceback.locals
locals_dict = {k: v for k, v in locals_dict.items() if not k.startswith("@")}
click.echo("Starting interactive mode. Type `exit` fail and halt current test.")
namespace = {"_callinfo": call, **globals_dict, **locals_dict}
console(extra_locals=namespace, project=self.project_manager)
# launch ipdb instead of console
if capman:
capman.resume_global_capture()
def pytest_runtest_setup(self, item):
"""
By default insert isolation fixtures into each test cases list of fixtures
prior to actually executing the test case.
https://docs.pytest.org/en/6.2.x/reference.html#pytest.hookspec.pytest_runtest_setup
"""
if (
self.pytest_config.getoption("disable_isolation") is True
or "_function_isolation" in item.fixturenames # prevent double injection
):
# isolation is disabled via cmdline option
return
fixture_map = item.session._fixturemanager._arg2fixturedefs
scopes = [
definition.scope
for name, definitions in fixture_map.items()
if name in item.fixturenames
for definition in definitions
]
for scope in ["session", "package", "module", "class"]:
# iterate through scope levels and insert the isolation fixture
# prior to the first fixture with that scope
try:
idx = scopes.index(scope) # will raise ValueError if `scope` not found
item.fixturenames.insert(idx, f"_{scope}_isolation")
scopes.insert(idx, scope)
except ValueError:
# intermediate scope isolations aren't filled in
continue
# insert function isolation by default
try:
item.fixturenames.insert(scopes.index("function"), "_function_isolation")
except ValueError:
# no fixtures with function scope, so append function isolation
item.fixturenames.append("_function_isolation")
def pytest_sessionstart(self):
"""
Called after the `Session` object has been created and before performing
collection and entering the run test loop.
Removes `PytestAssertRewriteWarning` warnings from the terminalreporter.
This prevents warnings that "the `ape` library was already imported and
so related assertions cannot be rewritten". The warning is not relevant
for end users who are performing tests with ape.
"""
reporter = self.pytest_config.pluginmanager.get_plugin("terminalreporter")
warnings = reporter.stats.pop("warnings", [])
warnings = [i for i in warnings if "PytestAssertRewriteWarning" not in i.message]
if warnings and not self.pytest_config.getoption("--disable-warnings"):
reporter.stats["warnings"] = warnings
@pytest.hookimpl(trylast=True, hookwrapper=True)
def pytest_collection_finish(self, session):
"""
Called after collection has been performed and modified.
"""
outcome = yield
# Only start provider if collected tests.
if not outcome.get_result() and session.items and not self.network_manager.active_provider:
self.network_manager.active_provider = self.network_manager.get_provider_from_choice(
self._network_choice
)
self.network_manager.active_provider.connect()
self._provider_is_connected = True
def pytest_sessionfinish(self):
"""
Called after whole test run finished, right before returning the exit
status to the system.
**NOTE**: This hook fires even when exceptions occur, so we cannot
assume the provider successfully connected.
"""
if self._provider_is_connected:
self.chain_manager.provider.disconnect()
self._provider_is_connected = False
```
#### File: ape/utils/misc.py
```python
import json
import sys
from functools import lru_cache
from itertools import tee
from pathlib import Path
from typing import Any, Dict, Iterator, List, Mapping, Optional
import requests
import yaml
from hexbytes import HexBytes
from importlib_metadata import PackageNotFoundError, packages_distributions
from importlib_metadata import version as version_metadata
from tqdm.auto import tqdm # type: ignore
from ape.exceptions import APINotImplementedError
from ape.logging import logger
from ape.utils.os import expand_environment_variables
try:
from functools import cached_property # type: ignore
except ImportError:
from backports.cached_property import cached_property # type: ignore
try:
from functools import singledispatchmethod # type: ignore
except ImportError:
from singledispatchmethod import singledispatchmethod # type: ignore
EMPTY_BYTES32 = HexBytes("0x0000000000000000000000000000000000000000000000000000000000000000")
ZERO_ADDRESS = "0x0000000000000000000000000000000000000000"
_python_version = (
f"{sys.version_info.major}.{sys.version_info.minor}"
f".{sys.version_info.micro} {sys.version_info.releaselevel}"
)
@lru_cache(maxsize=None)
def get_distributions():
"""
Get a mapping of top-level packages to their distributions.
"""
return packages_distributions()
def get_package_version(obj: Any) -> str:
"""
Get the version of a single package.
Args:
obj: object to search inside for ``__version__``.
Returns:
str: version string.
"""
# If value is already cached/static
if hasattr(obj, "__version__"):
return obj.__version__
# NOTE: In case where don't pass a module name
if not isinstance(obj, str):
obj = obj.__name__
# Reduce module string to base package
# NOTE: Assumed that string input is module name e.g. ``__name__``
pkg_name = obj.split(".")[0]
# NOTE: In case the distribution and package name differ
dists = get_distributions()
if pkg_name in dists:
# NOTE: Shouldn't really be more than 1, but never know
if len(dists[pkg_name]) != 1:
logger.warning(f"duplicate pkg_name '{pkg_name}'")
pkg_name = dists[pkg_name][0]
try:
return str(version_metadata(pkg_name))
except PackageNotFoundError:
# NOTE: Must handle empty string result here
return ""
__version__ = get_package_version(__name__)
USER_AGENT = f"Ape/{__version__} (Python/{_python_version})"
def load_config(path: Path, expand_envars=True, must_exist=False) -> Dict:
"""
Load a configuration file into memory.
A file at the given path must exist or else it will throw ``OSError``.
The configuration file must be a `.json` or `.yaml` or else it will throw ``TypeError``.
Args:
path (str): path to filesystem to find.
expand_envars (bool): ``True`` the variables in path
are able to expand to show full path.
must_exist (bool): ``True`` will be set if the configuration file exist
and is able to be load.
Returns:
Dict (dict): Configured settings parsed from a config file.
"""
if path.exists():
contents = path.read_text()
if expand_envars:
contents = expand_environment_variables(contents)
if path.suffix in (".json",):
config = json.loads(contents)
elif path.suffix in (".yml", ".yaml"):
config = yaml.safe_load(contents)
else:
raise TypeError(f"Cannot parse '{path.suffix}' files!")
return config or {}
elif must_exist:
raise OSError(f"{path} does not exist!")
else:
return {}
def gas_estimation_error_message(tx_error: Exception) -> str:
"""
Get an error message containing the given error and an explanation of how the
gas estimation failed, as in :class:`ape.api.providers.ProviderAPI` implementations.
Args:
tx_error (Exception): The error that occurred when trying to estimate gas.
Returns:
str: An error message explaining that the gas failed and that the transaction
will likely revert.
"""
return (
f"Gas estimation failed: '{tx_error}'. This transaction will likely revert. "
"If you wish to broadcast, you must set the gas limit manually."
)
def extract_nested_value(root: Mapping, *args: str) -> Optional[Dict]:
"""
Dig through a nested ``dict`` using the given keys and return the
last-found object.
Usage example::
>>> extract_nested_value({"foo": {"bar": {"test": "VALUE"}}}, "foo", "bar", "test")
'VALUE'
Args:
root (dict): Nested keys to form arguments.
Returns:
dict, optional: The final value if it exists
else ``None`` if the tree ends at any point.
"""
current_value: Any = root
for arg in args:
if not hasattr(current_value, "get"):
return None
current_value = current_value.get(arg)
return current_value
def add_padding_to_strings(
str_list: List[str],
extra_spaces: int = 0,
space_character: str = " ",
) -> List[str]:
"""
Append spacing to each string in a list of strings such that
they all have the same length.
Args:
str_list (List[str]): The list of strings that need padding.
extra_spaces (int): Optionally append extra spacing. Defaults to ``0``.
space_character (str): The character to use in the padding. Defaults to ``" "``.
Returns:
List[str]: A list of equal-length strings with padded spaces.
"""
longest_item = len(max(str_list, key=len))
spaced_items = []
for value in str_list:
spacing = (longest_item - len(value) + extra_spaces) * space_character
spaced_items.append(f"{value}{spacing}")
return spaced_items
def stream_response(download_url: str, progress_bar_description: str = "Downloading") -> bytes:
"""
Download HTTP content by streaming and returning the bytes.
Progress bar will be displayed in the CLI.
Args:
download_url (str): String to get files to download.
progress_bar_description (str): Downloading word.
Returns:
bytes: Content in bytes to show the progress.
"""
response = requests.get(download_url, stream=True)
response.raise_for_status()
total_size = int(response.headers.get("content-length", 0))
progress_bar = tqdm(total=total_size, unit="iB", unit_scale=True, leave=False)
progress_bar.set_description(progress_bar_description)
content = bytes()
for data in response.iter_content(1024, decode_unicode=True):
progress_bar.update(len(data))
content += data
progress_bar.close()
return content
def raises_not_implemented(fn):
"""
Decorator for raising helpful not implemented error.
"""
def inner(*args, **kwargs):
raise APINotImplementedError(
f"Attempted to call method '{fn.__qualname__}', method not supported."
)
return inner
class cached_iterator(property):
"""
A cached iterator decorator. Use it as you would ``@cached_property``.
The intent is to prevent repeated fetches of data.
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.cache: Optional[Iterator] = None
super().__init__(fget=fget, fset=fset, fdel=fdel, doc=doc)
def __get__(self, obj, objtype=None):
iterator, self.cache = tee(self.cache if self.cache else self.fget(obj))
return iterator
__all__ = [
"cached_property",
"expand_environment_variables",
"extract_nested_value",
"gas_estimation_error_message",
"get_package_version",
"load_config",
"raises_not_implemented",
"singledispatchmethod",
"stream_response",
"USER_AGENT",
]
```
#### File: functional/api/test_query.py
```python
import pytest
from pydantic import ValidationError
from ape import chain
from ape.api.query import AccountTransactionQuery, BlockQuery, BlockTransactionQuery
def test_basic_query(eth_tester_provider):
chain.mine(3)
assert [i.number for i in chain.blocks.query("*")] == [0, 1, 2, 3]
x = [i for i in chain.blocks.query("number", "timestamp")]
assert len(x) == 4
assert x[3].timestamp > x[2].timestamp >= x[1].timestamp >= x[0].timestamp
columns = list(x[0].dict().keys())
assert columns == [
"num_transactions",
"hash",
"number",
"parentHash",
"size",
"timestamp",
"gasLimit",
"gasUsed",
"baseFeePerGas",
"difficulty",
"totalDifficulty",
]
def test_block_transaction_query_api():
query = BlockTransactionQuery(columns=["*"], block_id=0)
assert query.columns == [
"chain_id",
"receiver",
"sender",
"gas_limit",
"nonce",
"value",
"data",
"type",
"max_fee",
"max_priority_fee",
"required_confirmations",
"signature",
]
def test_block_transaction_query(eth_tester_provider, sender, receiver):
sender.transfer(receiver, 100)
query = chain.blocks[-1].transactions
assert len(query) == 1
assert query[0].value == 100
assert query[0].chain_id == 61
def test_block_query(eth_tester_provider):
chain.mine(3)
with pytest.raises(ValidationError) as err:
BlockQuery(columns=["numbr"], start_block=0, stop_block=2)
assert "Unrecognized field 'numbr'" in str(err.value)
with pytest.raises(ValidationError) as err:
BlockQuery(columns=["number", "timestamp", "number"], start_block=0, stop_block=2)
assert "Duplicate fields in ['number', 'timestamp', 'number']" in str(err.value)
def test_account_query(eth_tester_provider):
chain.mine(3)
query_kwargs = dict(
account="0x0000000000000000000000000000000000000000", start_nonce=0, stop_nonce=2
)
with pytest.raises(ValidationError) as err:
AccountTransactionQuery(columns=["none"], **query_kwargs)
assert "Unrecognized field 'none'" in str(err.value)
with pytest.raises(ValidationError) as err:
AccountTransactionQuery(columns=["nonce", "chain_id", "nonce"], **query_kwargs)
assert "Duplicate fields in ['nonce', 'chain_id', 'nonce']" in str(err.value)
```
#### File: tests/functional/test_networks.py
```python
import pytest
from eth_typing import HexStr
from ape.exceptions import NetworkError
@pytest.mark.parametrize("block_id", ("latest", 0, "0", "0x0", HexStr("0x0")))
def test_get_block(eth_tester_provider, block_id):
latest_block = eth_tester_provider.get_block(block_id)
# Each parameter is the same as requesting the first block.
assert latest_block.number == 0
assert latest_block.base_fee == 1000000000
assert latest_block.gas_used == 0
def test_get_network_choices_filter_ecosystem(networks):
actual = {c for c in networks.get_network_choices(ecosystem_filter="ethereum")}
expected = {c for c in networks.get_network_choices()}
assert len(actual) == 27
assert actual == expected
def test_get_network_choices_filter_network(networks):
actual = {c for c in networks.get_network_choices(network_filter="mainnet-fork")}
assert actual == set()
def test_get_network_choices_filter_provider(networks):
actual = {c for c in networks.get_network_choices(provider_filter="test")}
expected = {"::test", ":local", "ethereum:local", "ethereum:local:test", "ethereum"}
assert actual == expected
def test_get_provider_when_no_default(networks):
ethereum = networks.get_ecosystem("ethereum")
network = ethereum.get_network("rinkeby-fork")
with pytest.raises(NetworkError) as err:
# Not provider installed out-of-the-box for rinkeby-fork network
network.get_provider()
assert "No default provider for network 'rinkeby-fork'" in str(err.value)
def test_get_provider_when_not_found(networks):
ethereum = networks.get_ecosystem("ethereum")
network = ethereum.get_network("rinkeby-fork")
with pytest.raises(NetworkError) as err:
network.get_provider("test")
assert "'test' is not a valid provider for network 'rinkeby-fork'" in str(err.value)
def test_repr(networks_connected_to_tester):
assert (
repr(networks_connected_to_tester) == "<NetworkManager active_provider=<test chain_id=61>>"
)
# Check individual network
assert repr(networks_connected_to_tester.provider.network) == "<local chain_id=61>"
```
#### File: tests/functional/test_receipt.py
```python
import pytest
@pytest.fixture
def invoke_receipt(solidity_contract_instance, owner):
return solidity_contract_instance.setNumber(1, sender=owner)
def test_show_trace(invoke_receipt):
# For better tests, see a provider plugin that supports this RPC,
# such as ape-hardhat.
with pytest.raises(NotImplementedError):
invoke_receipt.show_trace()
```
#### File: functional/utils/test_trace.py
```python
from dataclasses import dataclass
from pathlib import Path
import pytest
from ethpm_types import ContractType
from evm_trace import CallTreeNode
from rich import print as rich_print
from ape.contracts import ContractContainer
from ape.utils.trace import CallTraceParser
from ape_ethereum.transactions import Receipt
from tests.functional.data.python import (
LOCAL_CALL_TREE_DICT,
MAINNET_CALL_TREE_DICT,
MAINNET_RECEIPT_DICT,
)
from tests.functional.utils.expected_traces import LOCAL_TRACE, MAINNET_TRACE
FAILED_TXN_HASH = "0x053cba5c12172654d894f66d5670bab6215517a94189a9ffc09bc40a589ec04d"
INTERNAL_TRANSFERS_TXN_HASH_0 = "0xb7d7f1d5ce7743e821d3026647df486f517946ef1342a1ae93c96e4a8016eab7"
INTERNAL_TRANSFERS_TXN_HASH_1 = "0x0537316f37627655b7fe5e50e23f71cd835b377d1cde4226443c94723d036e32"
BASE_CONTRACTS_PATH = Path(__file__).parent.parent / "data" / "contracts" / "ethereum"
@pytest.fixture(scope="module")
def local_contracts(owner, networks_connected_to_tester):
containers = {}
for char in ("a", "b", "c"):
contract_data = BASE_CONTRACTS_PATH / "local" / f"contract_{char}.json"
contract_type = ContractType.parse_raw(contract_data.read_text())
container = ContractContainer(contract_type)
containers[char] = container
contract_c = owner.deploy(containers["c"])
contract_b = owner.deploy(containers["b"], contract_c.address)
contract_a = owner.deploy(containers["a"], contract_b.address, contract_c.address)
return contract_a, contract_b, contract_c
@pytest.fixture(autouse=True, scope="module")
def full_contracts_cache(chain):
# Copy mainnet contract types into local cache to make them accessible for look-up
mainnet_contracts_dir = BASE_CONTRACTS_PATH / "mainnet"
for contract_type_file in mainnet_contracts_dir.iterdir():
address = contract_type_file.stem
contract_type = ContractType.parse_raw(contract_type_file.read_text())
chain.contracts._local_contracts[address] = contract_type
@pytest.fixture(scope="module")
def local_receipt(local_contracts, owner):
return local_contracts[0].methodWithoutArguments(sender=owner, value=123)
@pytest.fixture(scope="module")
def mainnet_receipt():
return Receipt.parse_obj(MAINNET_RECEIPT_DICT)
@pytest.fixture(scope="module")
def local_call_tree(local_contracts):
def set_address(d):
if d["address"] == "b":
d["address"] = local_contracts[1].address
elif d["address"] == "c":
d["address"] = local_contracts[2].address
new_dict = dict(LOCAL_CALL_TREE_DICT)
new_dict["address"] = local_contracts[0].address
def set_all_addresses(d):
set_address(d)
for call in d["calls"]:
set_all_addresses(call)
set_all_addresses(new_dict)
return CallTreeNode.parse_obj(new_dict)
@pytest.fixture(scope="module")
def mainnet_call_tree():
return CallTreeNode.parse_obj(MAINNET_CALL_TREE_DICT)
@pytest.fixture(params=("local", "mainnet"))
def case(request, local_receipt, mainnet_receipt, local_call_tree, mainnet_call_tree):
@dataclass
class TraceTestCase:
receipt: Receipt
expected: str
call_tree: CallTreeNode
name: str = request.param
# Add more test trace cases here
if request.param == "local":
return TraceTestCase(local_receipt, LOCAL_TRACE, local_call_tree)
elif request.param == "mainnet":
return TraceTestCase(mainnet_receipt, MAINNET_TRACE, mainnet_call_tree)
@pytest.fixture
def assert_trace(capsys):
def assert_trace(actual: str):
output, _ = capsys.readouterr()
trace = [s.strip() for s in output.split("\n")]
for line in trace:
parts = line.split(" ")
for part in [p.strip() for p in parts if p.strip()]:
part = part.strip()
assert part in actual, f"Could not find '{part}' in expected"
return assert_trace
def test_trace(case, assert_trace):
parser = CallTraceParser(case.receipt)
actual = parser.parse_as_tree(case.call_tree)
rich_print(actual)
assert_trace(case.expected)
``` |
{
"source": "johnson2427/pytest_workshop",
"score": 3
} |
#### File: pytest_workshop/tests/test_calc.py
```python
from pytest_workshop.calc import Calc
def test_add_two_numbers():
c = Calc()
res = c.add(4, 5)
assert res == 9
def test_add_three_numbers():
c = Calc()
res = c.add(4, 5, 6)
assert res == 15
def test_add_many_numbers():
c = Calc()
s = range(100)
res = c.add(*s)
assert res == 4950
def test_sub_two_numbers():
c = Calc()
res = c.sub(10, 3)
assert res == 7
def test_multiply_two_numbers():
c = Calc()
res = c.mul(6, 4)
assert res == 24
def test_multiply_many_numbers():
s = range(1, 10)
assert Calc().mul(*s) == 362880
def test_divide_two_numbers():
assert Calc().div(22, 2) == 11
def test_divide_returns_floats():
assert Calc().div(11, 2) == 5.5
def test_divide_by_zero_returns_inf():
assert Calc().div(5, 0) == "inf"
``` |
{
"source": "johnson7788/detectron2",
"score": 2
} |
#### File: johnson7788/detectron2/detectron2_tutorial.py
```python
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import os, json, cv2, random
from google.colab.patches import cv2_imshow
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
"""# 运行预训练模型使用detectron2
**我们**首先从COCO数据集中下载图像:
"""
!wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O input.jpg
im = cv2.imread("./input.jpg")
cv2_imshow(im)
"""然后,我们创建一个detectron2配置和一个detectron2`DefaultPredictor`来对该图像进行推理。"""
cfg = get_cfg()
#如果您没有在Detectron2的核心库中运行模型,则在此处添加特定于项目的配置(例如TensorMask)
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
# Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
predictor = DefaultPredictor(cfg)
outputs = predictor(im)
# look at the outputs. See https://detectron2.readthedocs.io/tutorials/models.html#model-output-format for specification
print(outputs["instances"].pred_classes)
print(outputs["instances"].pred_boxes)
# We can use `Visualizer` to draw the predictions on the image.
v = Visualizer(im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2_imshow(out.get_image()[:, :, ::-1])
"""# 在定制数据集上训练
在本节中,我们将展示如何在自定义数据集上以新格式训练现有的detectron2模型。
We use [the balloon segmentation dataset](https://github.com/matterport/Mask_RCNN/tree/master/samples/balloon)
which only has one class: balloon.
只有一类:气球。
我们将从在COCO数据集上预先训练的现有模型中训练气球分割模型,该模型可在detectron2的模型动物园中获得。
请注意,COCO数据集没有“气球”类别。 几分钟后,我们将能够认识到这个新类别。
## 准备数据集
"""
# download, decompress the data
!wget https://github.com/matterport/Mask_RCNN/releases/download/v2.1/balloon_dataset.zip
!unzip balloon_dataset.zip > /dev/null
"""*Register* the balloon dataset to detectron2, following the [detectron2 custom dataset tutorial](https://detectron2.readthedocs.io/tutorials/datasets.html).
在这里,数据集采用自定义格式,因此我们编写了一个函数来对其进行解析,并将其准备成detectron2的标准格式。 有关更多详细信息,请参见教程。
"""
# 如果您的数据集为COCO格式,则可以用以下三行替换此单元格:
# from detectron2.data.datasets import register_coco_instances
# register_coco_instances("my_dataset_train", {}, "json_annotation_train.json", "path/to/image/dir")
# register_coco_instances("my_dataset_val", {}, "json_annotation_val.json", "path/to/image/dir")
from detectron2.structures import BoxMode
def get_balloon_dicts(img_dir):
json_file = os.path.join(img_dir, "via_region_data.json")
with open(json_file) as f:
imgs_anns = json.load(f)
dataset_dicts = []
for idx, v in enumerate(imgs_anns.values()):
record = {}
filename = os.path.join(img_dir, v["filename"])
height, width = cv2.imread(filename).shape[:2]
record["file_name"] = filename
record["image_id"] = idx
record["height"] = height
record["width"] = width
annos = v["regions"]
objs = []
for _, anno in annos.items():
assert not anno["region_attributes"]
anno = anno["shape_attributes"]
px = anno["all_points_x"]
py = anno["all_points_y"]
poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)]
poly = [p for x in poly for p in x]
obj = {
"bbox": [np.min(px), np.min(py), np.max(px), np.max(py)],
"bbox_mode": BoxMode.XYXY_ABS,
"segmentation": [poly],
"category_id": 0,
}
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
return dataset_dicts
for d in ["train", "val"]:
DatasetCatalog.register("balloon_" + d, lambda d=d: get_balloon_dicts("balloon/" + d))
MetadataCatalog.get("balloon_" + d).set(thing_classes=["balloon"])
balloon_metadata = MetadataCatalog.get("balloon_train")
"""为了验证数据加载是否正确,让我们可视化训练集中随机选择的样本的注释:"""
dataset_dicts = get_balloon_dicts("balloon/train")
for d in random.sample(dataset_dicts, 3):
img = cv2.imread(d["file_name"])
visualizer = Visualizer(img[:, :, ::-1], metadata=balloon_metadata, scale=0.5)
out = visualizer.draw_dataset_dict(d)
cv2_imshow(out.get_image()[:, :, ::-1])
"""## 训练!
现在,让我们在气球数据集上微调COCO预训练的R50-FPN遮罩R-CNN模型。 在Colab的K80 GPU上训练300次迭代大约需要6分钟,而在P100 **GPU上训练大约2分钟。**
"""
from detectron2.engine import DefaultTrainer
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.DATASETS.TRAIN = ("balloon_train",)
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = 2
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml") # Let training initialize from model zoo
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.BASE_LR = 0.00025 # pick a good LR
cfg.SOLVER.MAX_ITER = 300 # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # faster, and good enough for this toy dataset (default: 512)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # only has one class (ballon)
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train()
# Commented out IPython magic to ensure Python compatibility.
# Look at training curves in tensorboard:
# %load_ext tensorboard
# %tensorboard --logdir output
"""##使用经过训练的模型进行推断和评估
现在,让我们对气球验证数据集上的训练模型进行推断。 首先,让我们使用刚刚训练的模型创建一个预测变量:
"""
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set a custom testing threshold for this model
cfg.DATASETS.TEST = ("balloon_val", )
predictor = DefaultPredictor(cfg)
"""然后,我们随机选择几个样本以可视化预测结果。"""
from detectron2.utils.visualizer import ColorMode
dataset_dicts = get_balloon_dicts("balloon/val")
for d in random.sample(dataset_dicts, 3):
im = cv2.imread(d["file_name"])
outputs = predictor(im)
v = Visualizer(im[:, :, ::-1],
metadata=balloon_metadata,
scale=0.5,
instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models
)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2_imshow(out.get_image()[:, :, ::-1])
"""我们还可以使用COCO API中实现的AP指标评估其性能。 这样得出的AP(平均精度)约为70。"""
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.data import build_detection_test_loader
evaluator = COCOEvaluator("balloon_val", cfg, False, output_dir="./output/")
val_loader = build_detection_test_loader(cfg, "balloon_val")
print(inference_on_dataset(trainer.model, val_loader, evaluator))
# another equivalent way is to use trainer.test
"""# 其他类型的内置模型"""
# Inference with a keypoint detection model
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set threshold for this model
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")
predictor = DefaultPredictor(cfg)
outputs = predictor(im)
v = Visualizer(im[:,:,::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2_imshow(out.get_image()[:, :, ::-1])
# Inference with a panoptic segmentation model
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml"))
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml")
predictor = DefaultPredictor(cfg)
panoptic_seg, segments_info = predictor(im)["panoptic_seg"]
v = Visualizer(im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
out = v.draw_panoptic_seg_predictions(panoptic_seg.to("cpu"), segments_info)
cv2_imshow(out.get_image()[:, :, ::-1])
"""# 在视频上运行全景分割"""
# This is the video we're going to process
from IPython.display import YouTubeVideo, display
video = YouTubeVideo("ll8TgCZ0plk", width=500)
display(video)
# Install dependencies, download the video, and crop 5 seconds for processing
!pip install youtube-dl
!pip uninstall -y opencv-python-headless opencv-contrib-python
!apt install python3-opencv # the one pre-installed have some issues
!youtube-dl https://www.youtube.com/watch?v=ll8TgCZ0plk -f 22 -o video.mp4
!ffmpeg -i video.mp4 -t 00:00:06 -c:v copy video-clip.mp4
# Run frame-by-frame inference demo on this video (takes 3-4 minutes) with the "demo.py" tool we provided in the repo.
!git clone https://github.com/facebookresearch/detectron2
!python detectron2/demo/demo.py --config-file detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml --video-input video-clip.mp4 --confidence-threshold 0.6 --output video-output.mkv \
--opts MODEL.WEIGHTS detectron2://COCO-PanopticSegmentation/panoptic_fpn_R_101_3x/139514519/model_final_cafdb1.pkl
# Download the results
from google.colab import files
files.download('video-output.mkv')
``` |
{
"source": "johnson7788/dygiepp",
"score": 2
} |
#### File: data/dataset_readers/dygie.py
```python
import logging
from typing import Any, Dict, List, Optional, Tuple, DefaultDict, Set, Union
import json
import pickle as pkl
import warnings
# from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import (ListField, TextField, SpanField, MetadataField,
SequenceLabelField, AdjacencyField, LabelField)
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import Token
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.dataset_readers.dataset_utils import enumerate_spans
from dygie.data.fields.adjacency_field_assym import AdjacencyFieldAssym
from dygie.data.dataset_readers.document import Document, Sentence
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class DyGIEDataException(Exception):
pass
@DatasetReader.register("dygie")
class DyGIEReader(DatasetReader):
"""
Reads a single JSON-formatted file. This is the same file format as used in the
scierc, but is preprocessed
"""
def __init__(self,
max_span_width: int,
token_indexers: Dict[str, TokenIndexer] = None,
**kwargs) -> None:
super().__init__(**kwargs)
self._max_span_width = max_span_width
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
# @overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, "r") as f:
lines = f.readlines()
for line in lines:
# Loop over the documents.
doc_text = json.loads(line)
instance = self.text_to_instance(doc_text)
yield instance
def _too_long(self, span):
return span[1] - span[0] + 1 > self._max_span_width
def _process_ner(self, span_tuples, sent):
ner_labels = [""] * len(span_tuples)
for span, label in sent.ner_dict.items():
if self._too_long(span):
continue
ix = span_tuples.index(span)
ner_labels[ix] = label
return ner_labels
def _process_coref(self, span_tuples, sent):
coref_labels = [-1] * len(span_tuples)
for span, label in sent.cluster_dict.items():
if self._too_long(span):
continue
ix = span_tuples.index(span)
coref_labels[ix] = label
return coref_labels
def _process_relations(self, span_tuples, sent):
relations = []
relation_indices = []
# Loop over the gold spans. Look up their indices in the list of span tuples and store
# values.
for (span1, span2), label in sent.relation_dict.items():
# If either span is beyond the max span width, skip it.
if self._too_long(span1) or self._too_long(span2):
continue
ix1 = span_tuples.index(span1)
ix2 = span_tuples.index(span2)
relation_indices.append((ix1, ix2))
relations.append(label)
return relations, relation_indices
def _process_events(self, span_tuples, sent):
n_tokens = len(sent.text)
trigger_labels = [""] * n_tokens
for tok_ix, trig_label in sent.events.trigger_dict.items():
trigger_labels[tok_ix] = trig_label
arguments = []
argument_indices = []
for (trig_ix, arg_span), arg_label in sent.events.argument_dict.items():
if self._too_long(arg_span):
continue
arg_span_ix = span_tuples.index(arg_span)
argument_indices.append((trig_ix, arg_span_ix))
arguments.append(arg_label)
return trigger_labels, arguments, argument_indices
def _process_sentence(self, sent: Sentence, dataset: str):
"""
获取句子文本并定义 "text_field"。
"""
#dataset不能为空,这里是有问题的
if dataset is None:
dataset = "scierc"
assert dataset, "dataset字段没有传过来,不能为None,请检查"
sentence_text = [self._normalize_word(word) for word in sent.text]
text_field = TextField([Token(word) for word in sentence_text], self._token_indexers)
# Enumerate spans.
spans = []
for start, end in enumerate_spans(sentence_text, max_span_width=self._max_span_width):
spans.append(SpanField(start, end, text_field))
span_field = ListField(spans)
span_tuples = [(span.span_start, span.span_end) for span in spans]
# Convert data to fields.
# NOTE: The `ner_labels` and `coref_labels` would ideally have type
# `ListField[SequenceLabelField]`, where the sequence labels are over the `SpanField` of
# `spans`. But calling `as_tensor_dict()` fails on this specific data type. Matt G
# recognized that this is an AllenNLP API issue and suggested that represent these as
# `ListField[ListField[LabelField]]` instead.
fields = {}
fields["text"] = text_field
fields["spans"] = span_field
if sent.ner is not None:
ner_labels = self._process_ner(span_tuples, sent)
fields["ner_labels"] = ListField(
[LabelField(entry, label_namespace=f"{dataset}__ner_labels")
for entry in ner_labels])
if sent.cluster_dict is not None:
# Skip indexing for coref labels, which are ints.
coref_labels = self._process_coref(span_tuples, sent)
fields["coref_labels"] = ListField(
[LabelField(entry, label_namespace="coref_labels", skip_indexing=True)
for entry in coref_labels])
if sent.relations is not None:
relation_labels, relation_indices = self._process_relations(span_tuples, sent)
fields["relation_labels"] = AdjacencyField(
indices=relation_indices, sequence_field=span_field, labels=relation_labels,
label_namespace=f"{dataset}__relation_labels")
if sent.events is not None:
trigger_labels, argument_labels, argument_indices = self._process_events(span_tuples, sent)
fields["trigger_labels"] = SequenceLabelField(
trigger_labels, text_field, label_namespace=f"{dataset}__trigger_labels")
fields["argument_labels"] = AdjacencyFieldAssym(
indices=argument_indices, row_field=text_field, col_field=span_field,
labels=argument_labels, label_namespace=f"{dataset}__argument_labels")
return fields
def _process_sentence_fields(self, doc: Document):
# Process each sentence.
sentence_fields = [self._process_sentence(sent, doc.dataset) for sent in doc.sentences]
# Make sure that all sentences have the same set of keys.
first_keys = set(sentence_fields[0].keys())
for entry in sentence_fields:
if set(entry.keys()) != first_keys:
raise DyGIEDataException(
f"Keys do not match across sentences for document {doc.doc_key}.")
# For each field, store the data from all sentences together in a ListField.
fields = {}
keys = sentence_fields[0].keys()
for key in keys:
this_field = ListField([sent[key] for sent in sentence_fields])
fields[key] = this_field
return fields
# @overrides
def text_to_instance(self, doc_text: Dict[str, Any]):
"""
Convert a Document object into an instance.
"""
doc = Document.from_json(doc_text)
# Make sure there are no single-token sentences; these break things.
sent_lengths = [len(x) for x in doc.sentences]
if min(sent_lengths) < 2:
msg = (f"Document {doc.doc_key} has a sentence with a single token or no tokens. "
"This may break the modeling code.")
warnings.warn(msg)
fields = self._process_sentence_fields(doc)
fields["metadata"] = MetadataField(doc)
return Instance(fields)
# @overrides
def _instances_from_cache_file(self, cache_filename):
with open(cache_filename, "rb") as f:
for entry in pkl.load(f):
yield entry
# @overrides
def _instances_to_cache_file(self, cache_filename, instances):
with open(cache_filename, "wb") as f:
pkl.dump(instances, f, protocol=pkl.HIGHEST_PROTOCOL)
@staticmethod
def _normalize_word(word):
if word == "/." or word == "/?":
return word[1:]
else:
return word
```
#### File: dygie/training/ner_metrics.py
```python
from overrides import overrides
from typing import Optional
import torch
from allennlp.training.metrics.metric import Metric
from dygie.training.f1 import compute_f1
# TODO(dwadden) Need to use the decoded predictions so that we catch the gold examples longer than
# the span boundary.
class NERMetrics(Metric):
"""
从预测标签和gold label的列表中计算精度、召回率和macro-averaged F1。
"""
def __init__(self, number_of_classes: int, none_label: int=0):
self.number_of_classes = number_of_classes
self.none_label = none_label
self.reset()
@overrides
def __call__(self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.Tensor] = None):
predictions = predictions.cpu()
gold_labels = gold_labels.cpu()
mask = mask.cpu()
for i in range(self.number_of_classes):
if i == self.none_label:
continue
self._true_positives += ((predictions==i)*(gold_labels==i)*mask.bool()).sum().item()
self._false_positives += ((predictions==i)*(gold_labels!=i)*mask.bool()).sum().item()
self._true_negatives += ((predictions!=i)*(gold_labels!=i)*mask.bool()).sum().item()
self._false_negatives += ((predictions!=i)*(gold_labels==i)*mask.bool()).sum().item()
@overrides
def get_metric(self, reset=False):
"""
Returns
-------
A tuple of the following metrics based on the accumulated count statistics:
precision : float
recall : float
f1-measure : float
"""
predicted = self._true_positives + self._false_positives
gold = self._true_positives + self._false_negatives
matched = self._true_positives
precision, recall, f1_measure = compute_f1(predicted, gold, matched)
# Reset counts if at end of epoch.
if reset:
self.reset()
return precision, recall, f1_measure
@overrides
def reset(self):
self._true_positives = 0
self._false_positives = 0
self._true_negatives = 0
self._false_negatives = 0
``` |
{
"source": "johnson7788/EasyTransfer",
"score": 2
} |
#### File: easytransfer/app_zoo/serialization.py
```python
from easytransfer.app_zoo.app_utils import get_reader_fn, get_writer_fn, log_duration_time
from easytransfer.app_zoo.base import ApplicationModel
import easytransfer.preprocessors as preprocessors
from easytransfer.engines import distribution
class SerializationModel(ApplicationModel):
def __init__(self, **kwargs):
""" Bert Serialization model, convert raw text to BERT format
"""
super(SerializationModel, self).__init__(**kwargs)
self.queue_size = 256
self.thread_num = 16
@log_duration_time
def run(self):
self.proc_executor = distribution.ProcessExecutor(self.queue_size)
worker_id = self.config.task_index
num_workers = len(self.config.worker_hosts.split(","))
proc_executor = distribution.ProcessExecutor(self.queue_size)
reader = get_reader_fn(self.config.preprocess_input_fp)(input_glob=self.config.preprocess_input_fp,
input_schema=self.config.input_schema,
is_training=False,
batch_size=self.config.preprocess_batch_size,
slice_id=worker_id,
slice_count=num_workers,
output_queue=proc_executor.get_output_queue())
proc_executor.add(reader)
preprocessor = preprocessors.get_preprocessor(
self.config.tokenizer_name_or_path,
thread_num=self.thread_num,
input_queue=proc_executor.get_input_queue(),
output_queue=proc_executor.get_output_queue(),
preprocess_batch_size=self.config.preprocess_batch_size,
user_defined_config=self.config,
app_model_name=self.config.app_model_name)
proc_executor.add(preprocessor)
writer = get_writer_fn(self.config.preprocess_output_fp)(output_glob=self.config.preprocess_output_fp,
output_schema=self.config.output_schema,
slice_id=worker_id,
input_queue=proc_executor.get_input_queue())
proc_executor.add(writer)
proc_executor.run()
proc_executor.wait()
```
#### File: easytransfer/evaluators/labeling_evaluator.py
```python
import numpy as np
import tensorflow as tf
from .evaluator import Evaluator
class SequenceLablingEvaluator(Evaluator):
def __init__(self):
# declare metric names this evaluator will return
metric_names = [
'accuracy'
]
# pass metric names to base class
super(SequenceLablingEvaluator, self).__init__(metric_names)
def clear(self):
'''
clear internal storage
'''
self.predictions = []
self.labels = []
def add_batch_info(self, predictions, labels):
'''
store prediction and labels in a internal list
Args:
predictions batched prediction result, numpy array with shape N
labels batched labels, numpy array with shape N
'''
for pred, label in zip(predictions, labels):
self.predictions.append(pred)
self.labels.append(label.astype(np.int32))
def evaluate(self, labels):
'''
python evaluation code which will be run after
all test batched data are predicted
'''
if len(self.predictions) == 0 or len(self.labels) == 0:
tf.logging.info('empty data to evaluate')
return {'accuracy': 0.0}
cnt = 0
hit = 0
for smp_preds, smp_labels in zip(self.predictions, self.labels):
for token_pred, token_label in zip(smp_preds, smp_labels):
if token_label == -1:
continue
cnt += 1
hit += (token_pred == token_label)
accuracy = 1.0 * hit / cnt
return {'accuracy': accuracy}
def sequence_labeling_eval_metrics(logits, labels, num_labels):
""" Building evaluation metrics while evaluating
Args:
logits (`Tensor`): shape of [None, seq_length, num_labels]
labels (`Tensor`): shape of [None, seq_length]
Returns:
ret_dict (`dict`): A dict with (`py_accuracy`, `py_micro_f1`, `py_macro_f1`) tf.metrics op
"""
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
evaluator = SequenceLablingEvaluator()
info_dict = {
"predictions": predictions,
"labels": labels,
}
label_ids = [i for i in range(num_labels)]
metric_dict = evaluator.get_metric_ops(info_dict, label_ids)
tf.logging.info(metric_dict)
ret_metrics = evaluator.evaluate(label_ids)
tf.logging.info(ret_metrics)
for key, val in ret_metrics.items():
tf.summary.scalar("eval_" + key, val)
return metric_dict
```
#### File: easytransfer/losses/pretrain_loss.py
```python
import tensorflow as tf
def masked_language_model_loss(lm_logits, masked_lm_ids, masked_lm_weights, vocab_size):
log_probs = tf.nn.log_softmax(lm_logits, axis=-1)
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
label_weights = tf.reshape(masked_lm_weights, [-1])
one_hot_labels = tf.one_hot(masked_lm_ids, depth=vocab_size, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
masked_lm_loss = numerator / denominator
tf.summary.scalar("masked_lm_loss", masked_lm_loss)
return masked_lm_loss
def next_sentence_prediction_loss(nsp_logits, nx_sent_labels):
log_probs = tf.nn.log_softmax(nsp_logits, axis=-1)
next_sentence_labels = tf.reshape(nx_sent_labels, [-1])
one_hot_labels = tf.one_hot(next_sentence_labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
next_sentence_prediction_loss = tf.reduce_mean(per_example_loss)
tf.summary.scalar("next_sentence_prediction_loss", next_sentence_prediction_loss)
return next_sentence_prediction_loss
def image_reconstruction_mse_loss(mpm_logits, target_raw_patch_features,
masked_image_token_num, patch_feature_size):
image_pred_probs = tf.nn.log_softmax(mpm_logits)
image_pred_probs = tf.reshape(image_pred_probs,
(-1, masked_image_token_num,
patch_feature_size))
image_target_probs = tf.reshape(tf.nn.log_softmax(target_raw_patch_features),
(
-1, masked_image_token_num, patch_feature_size))
image_loss = tf.keras.losses.mean_squared_error(image_target_probs, image_pred_probs)
image_loss = tf.reduce_mean(image_loss)
tf.summary.scalar("image_reconstruction_mse_loss", image_loss)
return image_loss
def image_reconstruction_kld_loss(mpm_logits, target_raw_patch_features,
masked_image_token_num, patch_feature_size):
image_pred_probs = tf.nn.softmax(mpm_logits)
image_pred_probs = tf.reshape(image_pred_probs,
(-1, masked_image_token_num, patch_feature_size))
image_target_probs = tf.reshape(tf.nn.softmax(target_raw_patch_features),
(-1, masked_image_token_num,
patch_feature_size))
image_loss = tf.keras.losses.KLD(image_target_probs, image_pred_probs)
image_loss = tf.reduce_mean(image_loss)
tf.summary.scalar("image_reconstruction_kld_loss", image_loss)
return image_loss
```
#### File: easytransfer/optimizers/adam_weight_decay_optimizer.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.eager import context
from tensorflow.python.training import optimizer
class AdamWeightDecayOptimizer(optimizer.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="AdamWeightDecayOptimizer"):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
self.learning_rate_t = None
self._beta1_t = None
self._beta2_t = None
self._epsilon_t = None
def _get_beta_accumulators(self):
with ops.init_scope():
if context.executing_eagerly():
graph = None
else:
graph = ops.get_default_graph()
return (self._get_non_slot_variable("beta1_power", graph=graph),
self._get_non_slot_variable("beta2_power", graph=graph))
def _prepare(self):
self.learning_rate_t = ops.convert_to_tensor(
self.learning_rate, name='learning_rate')
self.weight_decay_rate_t = ops.convert_to_tensor(
self.weight_decay_rate, name='weight_decay_rate')
self.beta_1_t = ops.convert_to_tensor(self.beta_1, name='beta_1')
self.beta_2_t = ops.convert_to_tensor(self.beta_2, name='beta_2')
self.epsilon_t = ops.convert_to_tensor(self.epsilon, name='epsilon')
def _create_slots(self, var_list):
first_var = min(var_list, key=lambda x: x.name)
self._create_non_slot_variable(initial_value=self.beta_1,
name="beta1_power",
colocate_with=first_var)
self._create_non_slot_variable(initial_value=self.beta_2,
name="beta2_power",
colocate_with=first_var)
for v in var_list:
self._zeros_slot(v, 'm', self._name)
self._zeros_slot(v, 'v', self._name)
def _apply_dense(self, grad, var):
learning_rate_t = math_ops.cast(
self.learning_rate_t, var.dtype.base_dtype)
beta_1_t = math_ops.cast(self.beta_1_t, var.dtype.base_dtype)
beta_2_t = math_ops.cast(self.beta_2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self.epsilon_t, var.dtype.base_dtype)
weight_decay_rate_t = math_ops.cast(
self.weight_decay_rate_t, var.dtype.base_dtype)
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
learning_rate_t = math_ops.cast(self.learning_rate_t, var.dtype.base_dtype)
learning_rate_t = (learning_rate_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# Standard Adam update.
next_m = (
tf.multiply(beta_1_t, m) +
tf.multiply(1.0 - beta_1_t, grad))
next_v = (
tf.multiply(beta_2_t, v) + tf.multiply(1.0 - beta_2_t,
tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + epsilon_t)
if self._do_use_weight_decay(var.name):
update += weight_decay_rate_t * var
update_with_lr = learning_rate_t * update
next_param = var - update_with_lr
return control_flow_ops.group(*[var.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
def _resource_apply_dense(self, grad, var):
learning_rate_t = math_ops.cast(
self.learning_rate_t, var.dtype.base_dtype)
beta_1_t = math_ops.cast(self.beta_1_t, var.dtype.base_dtype)
beta_2_t = math_ops.cast(self.beta_2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self.epsilon_t, var.dtype.base_dtype)
weight_decay_rate_t = math_ops.cast(
self.weight_decay_rate_t, var.dtype.base_dtype)
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
learning_rate_t = math_ops.cast(self.learning_rate_t, var.dtype.base_dtype)
learning_rate_t = (learning_rate_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# Standard Adam update.
next_m = (
tf.multiply(beta_1_t, m) +
tf.multiply(1.0 - beta_1_t, grad))
next_v = (
tf.multiply(beta_2_t, v) + tf.multiply(1.0 - beta_2_t,
tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + epsilon_t)
if self._do_use_weight_decay(var.name):
update += weight_decay_rate_t * var
update_with_lr = learning_rate_t * update
next_param = var - update_with_lr
return control_flow_ops.group(*[var.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
learning_rate_t = math_ops.cast(
self.learning_rate_t, var.dtype.base_dtype)
beta_1_t = math_ops.cast(self.beta_1_t, var.dtype.base_dtype)
beta_2_t = math_ops.cast(self.beta_2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self.epsilon_t, var.dtype.base_dtype)
weight_decay_rate_t = math_ops.cast(
self.weight_decay_rate_t, var.dtype.base_dtype)
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
learning_rate_t = math_ops.cast(self.learning_rate_t, var.dtype.base_dtype)
learning_rate_t = (learning_rate_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
m_t = state_ops.assign(m, m * beta_1_t,
use_locking=self._use_locking)
m_scaled_g_values = grad * (1 - beta_1_t)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, m_scaled_g_values)
v_scaled_g_values = (grad * grad) * (1 - beta_2_t)
v_t = state_ops.assign(v, v * beta_2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, v_scaled_g_values)
update = m_t / (math_ops.sqrt(v_t) + epsilon_t)
if self._do_use_weight_decay(var.name):
update += weight_decay_rate_t * var
update_with_lr = learning_rate_t * update
var_update = state_ops.assign_sub(var,
update_with_lr,
use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _apply_sparse(self, grad, var):
return self._apply_sparse_shared(
grad.values, var, grad.indices,
lambda x, i, v: state_ops.scatter_add( # pylint: disable=g-long-lambda
x, i, v, use_locking=self._use_locking))
def _resource_scatter_add(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_add(
x.handle, i, v)]):
return x.value()
def _resource_apply_sparse(self, grad, var, indices):
return self._apply_sparse_shared(
grad, var, indices, self._resource_scatter_add)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _finish(self, update_ops, name_scope):
# Update the power accumulators.
with ops.control_dependencies(update_ops):
beta1_power, beta2_power = self._get_beta_accumulators()
with ops.colocate_with(beta1_power):
update_beta1 = beta1_power.assign(
beta1_power * self.beta_1_t, use_locking=self._use_locking)
update_beta2 = beta2_power.assign(
beta2_power * self.beta_2_t, use_locking=self._use_locking)
return control_flow_ops.group(*update_ops + [update_beta1, update_beta2],
name=name_scope)
```
#### File: scripts/fashion_bert/fashionbert_utils.py
```python
import numpy as np
from collections import namedtuple
from sklearn import metrics
import tensorflow as tf
from easytransfer import FLAGS, Config
_app_flags = tf.app.flags
_app_flags.DEFINE_string("input_dir", default=None, help='')
_app_flags.DEFINE_string("output_dir", default=None, help='')
_app_flags.DEFINE_integer("num_threads", default=None, help='')
_app_flags.DEFINE_string("data_format", default=None, help='')
_app_flags.DEFINE_string("tokenizer", default="wordpiece", help='')
_app_flags.DEFINE_string("spm_model_fp", None, "The model file for sentence piece tokenization.")
_app_flags.DEFINE_string("vocab_fp", None, "The model file for word piece tokenization.")
_app_flags.DEFINE_bool("do_whole_word_mask", True,
"Whether to use whole word masking rather than per-WordPiece masking.")
_app_flags.DEFINE_bool("do_chinese_whole_word_mask", False,
"Whether to use whole word masking rather than per-WordPiece masking.")
_app_flags.DEFINE_bool("random_next_sentence", False, "")
_app_flags.DEFINE_integer("dupe_factor", 40, "Number of times to duplicate the input data (with different masks).")
_app_flags.DEFINE_integer("max_seq_length", 512, "Maximum sequence length.")
_app_flags.DEFINE_integer("max_predictions_per_seq", 20, "Maximum number of masked LM predictions per sequence.")
_app_flags.DEFINE_float("masked_lm_prob", 0.15, "Masked LM probability.")
_app_flags.DEFINE_float("short_seq_prob", 0.1,
"Probability of creating sequences which are shorter than the maximum length.")
_app_flags.DEFINE_string("loss", None, "loss")
_app_flags.DEFINE_string("model_type", None, "model_type")
_app_flags.DEFINE_string("input_schema", default=None, help='input_schema')
_app_flags.DEFINE_string("pretrain_model_name_or_path", default=None, help='pretrain_model_name_or_path')
_app_flags.DEFINE_string("train_input_fp", default=None, help='train_input_fp')
_app_flags.DEFINE_string("eval_input_fp", default=None, help='eval_input_fp')
_app_flags.DEFINE_string("predict_input_fp", default=None, help='predict_input_fp')
_app_flags.DEFINE_string("predict_checkpoint_path", default=None, help='predict_checkpoint_path')
_app_flags.DEFINE_integer("train_batch_size", default=128, help='train_batch_size')
_app_flags.DEFINE_integer("predict_batch_size", default=128, help='predict_batch_size')
_app_flags.DEFINE_integer("num_epochs", default=1, help='num_epochs')
_app_flags.DEFINE_string("model_dir", default='', help='model_dir')
_app_flags.DEFINE_float("learning_rate", 1e-4, "learning_rate")
_app_flags.DEFINE_integer("hidden_size", default=None, help='')
_app_flags.DEFINE_integer("intermediate_size", default=None, help='')
_app_flags.DEFINE_integer("num_hidden_layers", default=None, help='')
_app_flags.DEFINE_integer("num_attention_heads", default=None, help='')
_APP_FLAGS = _app_flags.FLAGS
class PretrainConfig(Config):
def __init__(self):
if _APP_FLAGS.pretrain_model_name_or_path is not None:
pretrain_model_name_or_path = _APP_FLAGS.pretrain_model_name_or_path
else:
pretrain_model_name_or_path = _APP_FLAGS.model_dir + "/model.ckpt"
config_json = {
"preprocess_config": {
"input_schema": _APP_FLAGS.input_schema,
"output_schema": None
},
'model_config': {
'pretrain_model_name_or_path': pretrain_model_name_or_path
},
'train_config': {
"train_input_fp": _APP_FLAGS.train_input_fp,
"train_batch_size": _APP_FLAGS.train_batch_size,
"num_epochs": _APP_FLAGS.num_epochs,
"model_dir": _APP_FLAGS.model_dir,
"save_steps": None,
"optimizer_config": {
"optimizer": "adam",
"weight_decay_ratio": 0,
"warmup_ratio": 0.1,
"learning_rate": _APP_FLAGS.learning_rate,
},
"distribution_config": {
"distribution_strategy": "MirroredStrategy"
}
},
"evaluate_config": {
"eval_input_fp": _APP_FLAGS.eval_input_fp,
"eval_batch_size": 16,
"num_eval_steps": 1000
},
"predict_config": {
"predict_input_fp": _APP_FLAGS.predict_input_fp,
"predict_output_fp": None,
"predict_checkpoint_path":_APP_FLAGS.predict_checkpoint_path,
"predict_batch_size": _APP_FLAGS.predict_batch_size,
"output_schema":None
},
}
config_json["worker_hosts"] = FLAGS.worker_hosts
config_json["task_index"] = FLAGS.task_index
config_json["job_name"] = FLAGS.job_name
config_json["num_gpus"] = FLAGS.workerGPU
config_json["num_workers"] = FLAGS.workerCount
super(PretrainConfig, self).__init__(mode=FLAGS.mode, config_json=config_json)
Doc = namedtuple('Doc', ['id', 'score', 'label'])
def read_batch_result_file(filename, type):
# for accuracy
example_cnt = 0
true_pred_cnt = 0
# for rank@k
query_dict = {}
# for AUC
y_preds = []
y_trues = []
with open(filename, 'r') as fin:
while True:
line = fin.readline()
if not line:
break
items = line.split('\001')
if len(items) != 5:
print("Line errors {}".format(line))
# text_prod_ids, imag_prod_ids, prod_img_ids, labels, nsp_logits
text_prod_ids = items[0].replace('[', '').replace(']', '').split(',')
image_prod_ids = items[1].replace('[', '').replace(']', '').split(',')
prod_img_ids = items[2].replace('[', '').replace(']', '').split(',')
labels = np.array([int(x) for x in items[3].replace('[', '').replace(']', '').split(',')], dtype=np.int32)
predictions = np.array([float(x) for x in items[4].replace('[', '').replace(']', '').split(',')], dtype=np.float32).reshape((-1,2))
# print(predictions.shape, len(text_prod_ids))
assert len(text_prod_ids) == len(image_prod_ids), len(text_prod_ids) == len(prod_img_ids)
assert len(text_prod_ids) == labels.shape[0], len(text_prod_ids) == predictions.shape[0]
example_cnt = example_cnt + len(text_prod_ids)
# step 1: accuracy
pred_labels = np.argmax(predictions, axis=1)
true_pred_cnt += np.sum(pred_labels == labels)
# step 2: rank@K
for idx in range(len(text_prod_ids)):
query_id = text_prod_ids[idx] if type == 'txt2img' else image_prod_ids[idx]
doc_id = image_prod_ids[idx] if type == 'txt2img' else text_prod_ids[idx]
dscore = predictions[idx, 1]
dlabel = labels[idx]
doc = Doc(id=doc_id, score = dscore, label=dlabel)
if query_id in query_dict:
query_dict[query_id].append(doc)
else:
docs = []
docs.append(doc)
query_dict[query_id] = docs
# step 3: AUC
for idx in range(len(text_prod_ids)):
y_preds.append(predictions[idx, 1])
y_trues.append(labels[idx])
return example_cnt, true_pred_cnt, query_dict, y_preds, y_trues
def prediction_summary(results):
if results is None:
return None
example_cnt, true_pred_cnt, query_dict, y_preds, y_trues = results
# step 1: accuracy
print("Accuracy: ", float(true_pred_cnt) / (float(example_cnt) + 1e-5))
# step 2: rank @ K
query_sorted_dict = {}
for query_id in query_dict.keys():
query_dict[query_id].sort(key=lambda x: x.score, reverse=True)
# for query_id in query_dict.keys():
# print("Query_id, ", query_id, " after sort: ", query_dict[query_id])
Ks = [1, 5, 10, 100]
for k in Ks:
print("========== Rank @ {} evaluation ============".format(k))
fount_at_top_k = 0
for query_id in query_dict.keys():
query_sorted_docs = query_dict[query_id]
tmp_range = k if k < len(query_sorted_docs) else len(query_sorted_docs)
for idx in range(tmp_range):
if query_sorted_docs[idx].label:
fount_at_top_k += 1
break
print("========== Rank @ {} is {} ============".format(k, float(fount_at_top_k)/float(len(query_dict.keys()) + 1e-5)))
# step 3: AUC
test_auc = metrics.roc_auc_score(y_trues,y_preds) #验证集上的auc值
print("==== AUC {} ====".format(test_auc))
'''
filename: filename
type : img2txt or txt2img
'''
def prediction_analysis(filename, type='img2txt'):
file_name = './logs_bak/fashiongen-tb/eval_txt2img_results.txt'
print("To analysis file : ", file_name)
results = read_batch_result_file(filename, type)
# print(results)
prediction_summary(results)
def append_to_file(filename, content):
with open(filename, 'a') as fout:
fout.write(content)
def delete_exists_file(filename):
if tf.gfile.Exists(filename):
print("file {} found, and deleted".format(filename))
tf.gfile.remove(filename)
```
#### File: scripts/fashion_bert/pretrain_main.py
```python
import tensorflow as tf
import numpy as np
import time
import os
from easytransfer import base_model, FLAGS
from easytransfer import model_zoo
from easytransfer import preprocessors
from easytransfer.datasets import BundleCSVReader
from easytransfer.losses import masked_language_model_loss, next_sentence_prediction_loss, image_reconstruction_kld_loss
from easytransfer.evaluators import masked_language_model_eval_metrics, next_sentence_prediction_eval_metrics
from fashionbert_utils import prediction_analysis, PretrainConfig, append_to_file, delete_exists_file
_app_flags = tf.app.flags
_app_flags.DEFINE_string("type", default=None, help='')
_app_flags.DEFINE_integer("input_sequence_length", default=None, help='')
_app_flags.DEFINE_integer("vocab_size", default=30522, help='')
_app_flags.DEFINE_integer("image_feature_size", default=None, help='')
_APP_FLAGS = _app_flags.FLAGS
class ImageBertPretrain(base_model):
def __init__(self, **kwargs):
super(ImageBertPretrain, self).__init__(**kwargs)
self.user_defined_config = kwargs["user_defined_config"]
def build_logits(self, features, mode=None):
preprocessor = preprocessors.get_preprocessor(self.pretrain_model_name_or_path,
app_model_name="pretrain_language_model",
feature_type="pretrain_multimodel",
user_defined_config=self.user_defined_config)
self.model = model_zoo.get_pretrained_model(self.pretrain_model_name_or_path,
input_sequence_length=_APP_FLAGS.input_sequence_length)
if mode == tf.estimator.ModeKeys.PREDICT:
image_feature, image_mask, input_ids, input_mask, segment_ids,\
nx_sent_labels, prod_desc, text_prod_id, image_prod_id, prod_img_id = preprocessor(features)
# TODO: DONOT Need these features in predict. BUT to compatible the data format
masked_patch_positions = tf.constant(np.random.randint(0, self.config.predict_batch_size, (self.model.config.masked_image_token_num,)))
masked_lm_positions = tf.constant(np.random.randint(0, self.config.predict_batch_size, (self.model.config.masked_text_token_num,)))
masked_lm_ids = tf.constant(np.random.randint(0, self.config.predict_batch_size, (self.model.config.masked_text_token_num, 1,)))
masked_lm_weights = tf.ones(self.config.predict_batch_size, self.model.config.masked_text_token_num)
else:
image_feature, image_mask, masked_patch_positions, input_ids, input_mask, segment_ids,\
masked_lm_positions, masked_lm_ids, masked_lm_weights, nx_sent_labels = preprocessor(features)
mlm_logits, nsp_logits, mpm_logits, target_raw_patch_features, pooled_output = \
self.model(input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
masked_lm_positions=masked_lm_positions,
image_feature=image_feature,
image_mask=image_mask,
masked_patch_positions=masked_patch_positions,
output_features=False,
mode=mode,
image_feature_size=_APP_FLAGS.image_feature_size)
logits = (mlm_logits, nsp_logits, mpm_logits)
labels = (masked_lm_ids, masked_lm_weights, nx_sent_labels, target_raw_patch_features)
return logits, labels
def build_loss(self, logits, labels):
mlm_logits, nsp_logits, mpm_logits = logits
masked_lm_ids, masked_lm_weights, nx_sent_labels, target_raw_patch_features = labels
masked_lm_loss = masked_language_model_loss(mlm_logits, masked_lm_ids, masked_lm_weights,
_APP_FLAGS.vocab_size)
next_sentence_loss = next_sentence_prediction_loss(nsp_logits, nx_sent_labels)
image_loss = image_reconstruction_kld_loss(mpm_logits, target_raw_patch_features,
self.model.config.masked_image_token_num,
self.model.config.patch_feature_size)
G = tf.reshape(tf.stack([masked_lm_loss, next_sentence_loss, image_loss]), shape=[3])
w0 = 1.0
w1 = 1.0
w2 = 1.0
isAdaptive = True
if isAdaptive:
nG = tf.math.square(tf.nn.softmax(G))
alpha = 1.0
K = 3.0
denominator = (alpha * K - nG[0]) * (alpha * K - nG[1]) + \
(alpha * K - nG[1]) * (alpha * K - nG[2]) + \
(alpha * K - nG[2]) * (alpha * K - nG[0])
w0 = (alpha * K - nG[1]) * (alpha * K - nG[2]) / denominator
w1 = (alpha * K - nG[2]) * (alpha * K - nG[0]) / denominator
w2 = (alpha * K - nG[0]) * (alpha * K - nG[1]) / denominator
adaptive_loss = w0 * masked_lm_loss + w1 * next_sentence_loss + w2 * image_loss
return adaptive_loss
def build_eval_metrics(self, logits, labels):
mlm_logits, nsp_logits, _ = logits
masked_lm_ids, masked_lm_weights, next_sentence_labels, _ = labels
mlm_metrics = masked_language_model_eval_metrics(mlm_logits, masked_lm_ids, masked_lm_weights,
self.model.config.vocab_size)
nsp_metrics = next_sentence_prediction_eval_metrics(nsp_logits, next_sentence_labels)
return mlm_metrics.update(nsp_metrics)
def build_predictions(self, output):
logits, _ = output
mlm_logits, nsp_logits, mpm_logits = logits
return {"nsp_logits": nsp_logits}
def main():
config = PretrainConfig()
app = ImageBertPretrain(user_defined_config=config)
if FLAGS.mode == "train_and_evaluate":
train_reader = BundleCSVReader(input_glob=app.train_input_fp,
is_training=True,
shuffle_buffer_size=4096,
input_schema=app.input_schema,
batch_size=app.train_batch_size,
worker_hosts=app.config.worker_hosts,
task_index=app.config.task_index
)
eval_reader = BundleCSVReader(input_glob=app.eval_input_fp,
input_schema=app.input_schema,
is_training=False,
batch_size=app.eval_batch_size,
worker_hosts=app.config.worker_hosts,
task_index=app.config.task_index)
app.run_train_and_evaluate(train_reader=train_reader, eval_reader=eval_reader)
elif FLAGS.mode == "predict":
predict_reader = BundleCSVReader(input_glob=app.predict_input_fp,
input_schema=app.input_schema,
batch_size=app.predict_batch_size,
worker_hosts=app.config.worker_hosts,
task_index=app.config.task_index)
localtime = time.strftime("%Y%m%d-%H%M-", time.localtime())
if _APP_FLAGS.type == "img2txt":
print("************ predict task img2txt ********")
result_filename = "eval_img2txt_results.txt"
analysis_type = "img2txt"
else:
print("************ predict task txt2img ********")
result_filename = "eval_txt2img_results.txt"
analysis_type = "txt2img"
if not tf.gfile.Exists(_APP_FLAGS.output_dir):
tf.gfile.MkDir(_APP_FLAGS.output_dir)
result_fp_path = os.path.join(_APP_FLAGS.output_dir, str(localtime) + result_filename)
print("result_fp_path: ", result_fp_path)
delete_exists_file(result_fp_path)
for result in app.run_predict(reader=predict_reader,
checkpoint_path=app.config.predict_checkpoint_path,
yield_single_examples=False):
nsp_logits = result["nsp_logits"]
labels = result["nx_sent_labels"]
text_prod_id = result["text_prod_id"]
image_prod_id = result["image_prod_id"]
prod_img_id = result["prod_img_id"]
batch_pred_result = str(text_prod_id.tolist()) + "\001" \
+ str(image_prod_id.tolist()) + "\001" \
+ str(prod_img_id.tolist()) + "\001" \
+ str(labels.tolist()) + "\001" \
+ str(np.reshape(nsp_logits, [-1]).tolist()) + "\n"
append_to_file(result_fp_path, batch_pred_result)
prediction_analysis(result_fp_path, type=analysis_type)
if __name__ == "__main__":
main()
```
#### File: knowledge_distillation/adabert/main_adabert.py
```python
from collections import Counter
import os
import numpy as np
import tensorflow as tf
from sklearn.metrics import f1_score
from easytransfer.datasets import CSVReader
from easytransfer.model_zoo import AdaBERTStudent
from utils import get_assignment_map_from_checkpoint, load_npy, load_arch, SearchResultsSaver
flags = tf.app.flags
flags.DEFINE_string('open_ess', None, 'open_ess')
flags.DEFINE_string('distribution_strategy', "MirroredStrategy", "ds")
# Model
flags.DEFINE_integer(
"embed_size", default=128, help="word/position embedding dimensions")
flags.DEFINE_integer(
"num_token", default=30522, help="Number of distinct tokens")
flags.DEFINE_integer(
"is_pair_task", default=0, help="single sentence or paired sentences.")
flags.DEFINE_integer(
"num_classes", default=2, help="Number of categories to be discriminated")
flags.DEFINE_integer("seq_length", default=128, help="sequence length")
# Training
flags.DEFINE_integer(
"temp_decay_steps",
default=18000,
help="Number of steps for annealing temperature.")
flags.DEFINE_float(
"model_opt_lr",
default=5e-4,
help="learning rate for updating model parameters")
flags.DEFINE_float(
"arch_opt_lr",
default=1e-4,
help="learning rate for updating arch parameters")
flags.DEFINE_float(
"model_l2_reg",
default=3e-4,
help="coefficient for the l2regularization of model parameters")
flags.DEFINE_float(
"arch_l2_reg",
default=1e-3,
help="coefficient for the l2regularization of arch parameters")
flags.DEFINE_float("loss_gamma", default=0.8, help="loss weight gamma")
flags.DEFINE_float("loss_beta", default=4.0, help="loss weight beta")
flags.DEFINE_string(
"emb_pathes", default=None, help="given embeddings")
flags.DEFINE_string(
"arch_path", default=None, help="given architectures")
flags.DEFINE_string(
"model_dir",
default="./model_dir",
help="Directory for saving the finetuned model.")
flags.DEFINE_string(
"searched_model", default=None, help="searched_model_ckpt")
flags.DEFINE_string("train_file", default="", help="train file.")
# mirror ds actual bs = num_core_per_host * train_batch_size
flags.DEFINE_integer(
"num_core_per_host", default=1, help="the number of GPUs used.")
flags.DEFINE_integer(
"train_batch_size", default=32, help="batch size for training")
flags.DEFINE_integer(
"train_steps", default=20000, help="Number of training steps")
flags.DEFINE_integer(
"save_steps", default=2000, help="If None, not to save any model.")
flags.DEFINE_integer(
"max_save", default=1, help="Max number of checkpoints to save. ")
# these parameters are reserved for PAI
flags.DEFINE_boolean("is_training", default=True, help="training or not.")
flags.DEFINE_string("checkpointDir", default='', help="checkpoint Dir")
FLAGS = flags.FLAGS
def get_run_config():
session_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
intra_op_parallelism_threads=64,
inter_op_parallelism_threads=64,
gpu_options=tf.GPUOptions(
allow_growth=True,
force_gpu_compatible=True,
per_process_gpu_memory_fraction=1.0))
#session_config.graph_options.optimizer_options.opt_level = -1
from tensorflow.core.protobuf import rewriter_config_pb2
session_config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
if FLAGS.distribution_strategy == "ExascaleStrategy":
tf.logging.info(
"*****************Using ExascaleStrategy*********************")
import pai
worker_hosts = FLAGS.worker_hosts.split(',')
if len(worker_hosts) > 1:
pai.distribute.set_tf_config(FLAGS.job_name, FLAGS.task_index,
worker_hosts)
strategy = pai.distribute.ExascaleStrategy(
optimize_clip_by_global_norm=True)
elif FLAGS.distribution_strategy == "MirroredStrategy":
tf.logging.info(
"*****************Using MirroredStrategy*********************")
from tensorflow.contrib.distribute.python import cross_tower_ops as cross_tower_ops_lib
cross_tower_ops = cross_tower_ops_lib.AllReduceCrossTowerOps('nccl')
strategy = tf.contrib.distribute.MirroredStrategy(
num_gpus=FLAGS.num_core_per_host)
elif FLAGS.distribution_strategy == "None":
strategy = None
else:
raise ValueError(
"Set correct distribution strategy, ExascaleStrategy | MirroredStrategy | None"
)
#model_dir set in tf.estimator.Estimator
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.model_dir,
session_config=session_config,
keep_checkpoint_max=FLAGS.max_save,
save_checkpoints_secs=None,
save_checkpoints_steps=FLAGS.save_steps,
#log_step_count_steps=50,
train_distribute=strategy,
)
return run_config
def get_model_fn():
def model_fn(features, labels, mode, params):
inputs = []
if FLAGS.is_training:
for feature_name in ["ids", "mask", "seg_ids", "prob_logits", "labels"]:
inputs.append(features[feature_name])
else:
for feature_name in ["ids", "mask", "seg_ids", "labels"]:
inputs.append(features[feature_name])
if FLAGS.emb_pathes and FLAGS.is_training:
pathes = FLAGS.emb_pathes.split(',')
pretrained_word_embeddings = load_npy(pathes[0])
pretrained_pos_embeddings = load_npy(pathes[1])
else:
pretrained_word_embeddings, pretrained_pos_embeddings = None, None
Kmax = 8
given_arch = None
if FLAGS.arch_path:
Kmax, given_arch = load_arch(FLAGS.arch_path)
model = AdaBERTStudent(
inputs, (mode == tf.estimator.ModeKeys.TRAIN),
vocab_size=FLAGS.num_token,
is_pair_task=bool(FLAGS.is_pair_task),
num_classes=FLAGS.num_classes,
Kmax=Kmax,
emb_size=FLAGS.embed_size,
seq_len=FLAGS.seq_length,
keep_prob=0.9 if mode == tf.estimator.ModeKeys.TRAIN else 1.0,
temp_decay_steps=FLAGS.temp_decay_steps,
model_opt_lr=FLAGS.model_opt_lr,
arch_opt_lr=FLAGS.arch_opt_lr,
model_l2_reg=FLAGS.model_l2_reg,
arch_l2_reg=FLAGS.arch_l2_reg,
loss_gamma=FLAGS.loss_gamma,
loss_beta=FLAGS.loss_beta,
pretrained_word_embeddings=pretrained_word_embeddings,
pretrained_pos_embeddings=pretrained_pos_embeddings,
given_arch=given_arch)
if mode == tf.estimator.ModeKeys.TRAIN:
logging_tensors = dict(
[(var.name, var) for var in model.arch_params])
logging_tensors['step'] = model.global_step
logging_tensors['loss'] = model.loss
logging_hook = tf.train.LoggingTensorHook(
logging_tensors, every_n_iter=50)
chief_only_hooks = [logging_hook]
if given_arch is None:
search_result_hook = SearchResultsSaver(
model.global_step, model.arch_params, model.ld_embs, FLAGS.model_dir, FLAGS.save_steps)
chief_only_hooks.append(search_result_hook)
# handle the save/restore related issues
if FLAGS.searched_model:
# has pretrained
tvars = tf.trainable_variables()
initialized_variable_names = {}
init_checkpoint = os.path.join(FLAGS.searched_model)
tf.logging.info("Init from %s" % init_checkpoint)
(assignment_map, initialized_variable_names
) = get_assignment_map_from_checkpoint(tvars, init_checkpoint, ["wemb", "pemb"])
tf.train.init_from_checkpoint(init_checkpoint,
assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name,
var.shape, init_string)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=model.loss,
train_op=model.update,
training_chief_hooks=chief_only_hooks)
elif mode == tf.estimator.ModeKeys.EVAL:
# Define the metrics:
metrics_dict = {
'Acc': model.acc,
#'AUC': tf.metrics.auc(train_labels, probabilities, num_thresholds=2000)
}
return tf.estimator.EstimatorSpec(
mode, loss=model.loss, eval_metric_ops=metrics_dict)
else:
if FLAGS.is_training:
predictions = dict()
predictions["predicted"] = model.predictions
predictions["labels"] = features["labels"]
else:
predictions = features.copy()
predictions["logits"] = model.logits
predictions["predicted"] = model.predictions
return tf.estimator.EstimatorSpec(
mode=mode, predictions=predictions)
return model_fn
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
print("\nParameters:")
for attr, _ in sorted(FLAGS.__flags.items()):
print(' {}={}'.format(attr, FLAGS[attr].value))
print("")
feature_schema = "labels:int:1,ids:int:{},mask:int:{},seg_ids:int:{},prob_logits:float:26".format(
FLAGS.seq_length, FLAGS.seq_length, FLAGS.seq_length)
estimator = tf.estimator.Estimator(
model_fn=get_model_fn(), config=get_run_config(), params=None)
reader_fn = CSVReader
reader = reader_fn(
input_glob=FLAGS.train_file.split(',')[0],
input_schema=feature_schema,
is_training=FLAGS.is_training,
batch_size=FLAGS.train_batch_size,
num_parallel_batches=8,
shuffle_buffer_size=1024,
prefatch_buffer_size=1024)
if FLAGS.is_training:
valid_reader = reader_fn(
input_glob=FLAGS.train_file.split(',')[1],
input_schema=feature_schema,
is_training=False,
batch_size=FLAGS.train_batch_size,
num_parallel_batches=8,
shuffle_buffer_size=1024,
prefatch_buffer_size=1024)
train_spec = tf.estimator.TrainSpec(
input_fn=reader.get_input_fn(), max_steps=FLAGS.train_steps)
eval_spec = tf.estimator.EvalSpec(
input_fn=valid_reader.get_input_fn(), steps=None, throttle_secs=0)
tf.estimator.train_and_evaluate(
estimator, train_spec=train_spec, eval_spec=eval_spec)
else:
# assume there is just one given output table
# fout = tf.python_io.TableWriter(FLAGS.outputs)
fout = open(FLAGS.outputs, "w")
num_samples, num_correct = 0, 0
all_predictions = list()
all_labels = list()
for batch_idx, result in enumerate(estimator.predict(
input_fn=reader.get_input_fn(), yield_single_examples=False)):
predicted = result["predicted"]
labels = result["labels"].squeeze(1)
all_predictions.extend(predicted)
all_labels.extend(labels)
num_samples += len(labels)
num_correct += np.sum(
np.asarray(labels == predicted, dtype=np.int32))
to_be_wrote = list()
for i in range(labels.shape[0]):
row = list()
row.append(
','.join([str(v) for v in result["ids"][i]]))
row.append(
','.join([str(v) for v in result["mask"][i]]))
row.append(
','.join([str(v) for v in result["seg_ids"][i]]))
row.append(labels[i])
row.append(
','.join([str(v) for v in result["logits"][i]]))
row.append(int(result["predicted"][i]))
to_be_wrote.append(tuple(row))
# fout.write(to_be_wrote, (0, 1, 2, 3, 4, 5))
for items in to_be_wrote:
fout.write("\t".join([str(t) for t in items]) + "\n")
if batch_idx % 50 == 0:
print("======> predicted for {} instances".format(num_samples))
print(Counter(all_predictions))
print("Accuracy={} / {} = {}".format(num_correct, num_samples, float(num_correct)/float(num_samples)))
print("f1={}".format(f1_score(all_labels, all_predictions)))
if __name__ == "__main__":
tf.app.run()
```
#### File: scripts/knowledge_distillation/main_teacher.py
```python
import datetime
import tensorflow as tf
import easytransfer.layers as layers
from easytransfer import preprocessors, model_zoo, FLAGS, base_model
from easytransfer.losses import softmax_cross_entropy
from easytransfer.evaluators import classification_eval_metrics, teacher_probes_eval_metrics
from easytransfer.app_zoo.app_utils import get_reader_fn, get_writer_fn
class TeacherNetwork(base_model):
def __init__(self, **kwargs):
""" Teacher Network for KD """
super(TeacherNetwork, self).__init__(**kwargs)
def build_logits(self, features, mode=None):
""" Building graph of KD Teacher
Args:
features (`OrderedDict`): A dict mapping raw input to tensors
mode (`bool): tell the model whether it is under training
Returns:
logits (`list`): logits for all the layers, list of shape of [None, num_labels]
label_ids (`Tensor`): label_ids, shape of [None]
"""
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
preprocessor = preprocessors.get_preprocessor(self.config.pretrain_model_name_or_path,
user_defined_config=self.config)
bert_backbone = model_zoo.get_pretrained_model(self.config.pretrain_model_name_or_path)
# Serialize raw text to get input tensors
input_ids, input_mask, segment_ids, label_id = preprocessor(features)
if self.config.train_probes:
# Get BERT all hidden states
bert_model = bert_backbone.bert
embedding_output = bert_model.embeddings([input_ids, segment_ids], training=is_training)
attention_mask = layers.get_attn_mask_bert(input_ids, input_mask)
all_hidden_outputs, all_att_outputs = bert_model.encoder(
[embedding_output, attention_mask], training=is_training)
# Get teacher Probes
logits = layers.HiddenLayerProbes(self.config.num_labels,
kernel_initializer=layers.get_initializer(0.02),
name="probes")([embedding_output, all_hidden_outputs])
self.tvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
"probes/")
else:
_, pooled_output = bert_backbone([input_ids, input_mask, segment_ids], mode=mode)
pooled_output = tf.layers.dropout(
pooled_output, rate=self.config.dropout_rate, training=is_training)
logits = layers.Dense(self.config.num_labels,
kernel_initializer=layers.get_initializer(0.02),
name='app/ez_dense')(pooled_output)
logits = [logits]
if mode == tf.estimator.ModeKeys.PREDICT:
return {
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids,
"label_id": label_id,
"logits": tf.concat(logits, axis=-1)
}
else:
return logits, label_id
def build_loss(self, logits, labels):
""" Building loss for KD Teacher
"""
loss = 0.0
for layer_logits in logits:
loss += softmax_cross_entropy(labels, self.config.num_labels, layer_logits)
return loss
def build_eval_metrics(self, logits, labels):
""" Building evaluation metrics while evaluating
Args:
logits (`Tensor`): list of tensors shape of [None, num_labels]
labels (`Tensor`): shape of [None]
Returns:
ret_dict (`dict`): A dict of each layer accuracy tf.metrics op
"""
if self.config.train_probes:
return teacher_probes_eval_metrics(logits, labels, self.config.num_labels)
else:
return classification_eval_metrics(logits[0], labels, self.config.num_labels)
def build_predictions(self, predict_output):
""" Building prediction dict of KD Teacher
Args:
predict_output (`dict`): return value of build_logits
Returns:
predict_output (`dict`): A dict for the output to the file
"""
return predict_output
def train_and_evaluate_on_the_fly():
app = TeacherNetwork()
train_reader = get_reader_fn()(input_glob=app.config.train_input_fp,
input_schema=app.config.input_schema,
is_training=True,
batch_size=app.config.train_batch_size)
eval_reader = get_reader_fn()(input_glob=app.config.eval_input_fp,
input_schema=app.config.input_schema,
is_training=False,
batch_size=app.config.eval_batch_size)
app.run_train_and_evaluate(train_reader=train_reader, eval_reader=eval_reader)
tf.logging.info("Finished training")
def evaluate_on_the_fly():
app = TeacherNetwork()
eval_reader = get_reader_fn()(input_glob=app.config.eval_input_fp,
input_schema=app.config.input_schema,
is_training=False,
batch_size=app.config.eval_batch_size)
app.run_evaluate(reader=eval_reader, checkpoint_path=app.config.eval_ckpt_path)
tf.logging.info("Finished training")
def predict_on_the_fly():
app = TeacherNetwork()
reader = get_reader_fn()(input_glob=app.config.predict_input_fp,
input_schema=app.config.input_schema,
is_training=False,
batch_size=app.config.predict_batch_size)
writer = get_writer_fn()(output_glob=app.config.predict_output_fp,
output_schema=app.config.output_schema)
app.run_predict(reader=reader, writer=writer, checkpoint_path=app.config.predict_checkpoint_path)
if __name__ == "__main__":
starttime = datetime.datetime.now()
if FLAGS.mode == "train_and_evaluate_on_the_fly":
train_and_evaluate_on_the_fly()
elif FLAGS.mode == "evaluate_on_the_fly":
evaluate_on_the_fly()
elif FLAGS.mode == "predict_on_the_fly":
predict_on_the_fly()
else:
raise RuntimeError("invalid mode")
endtime = datetime.datetime.now()
tf.logging.info("Finished in {} seconds".format((endtime - starttime).seconds))
```
#### File: scripts/pretraining_language_model/pretrain_main.py
```python
import json
import multiprocessing
import random
import shutil
from glob import glob
import os
import tensorflow as tf
from tqdm import tqdm
from easytransfer import base_model, FLAGS
from easytransfer import model_zoo
from easytransfer import layers
from easytransfer.losses import softmax_cross_entropy
from easytransfer import preprocessors
from easytransfer.datasets import BundleTFRecordReader, OdpsTableReader
from easytransfer.evaluators import masked_language_model_eval_metrics, next_sentence_prediction_eval_metrics
from easytransfer.losses import masked_language_model_loss, next_sentence_prediction_loss
from easytransfer.preprocessors.tokenization import FullTokenizer
from pretrain_utils import create_training_instances, write_instance_to_file, PretrainConfig, _APP_FLAGS
class Pretrain(base_model):
def __init__(self, **kwargs):
super(Pretrain, self).__init__(**kwargs)
self.user_defined_config = kwargs.get("user_defined_config", None)
def build_logits(self, features, mode=None):
bert_preprocessor = preprocessors.get_preprocessor(self.pretrain_model_name_or_path,
app_model_name="pretrain_language_model",
user_defined_config=self.user_defined_config)
if _APP_FLAGS.distribution_strategy == "WhaleStrategy" or \
self.config.distribution_strategy == "WhaleStrategy":
tf.logging.info("*********Calling Whale Encoder***********")
model = model_zoo.get_pretrained_model(self.pretrain_model_name_or_path, enable_whale=True,
input_sequence_length=_APP_FLAGS.input_sequence_length)
else:
model = model_zoo.get_pretrained_model(self.pretrain_model_name_or_path,
input_sequence_length=_APP_FLAGS.input_sequence_length)
if _APP_FLAGS.loss == "mlm+nsp" or _APP_FLAGS.loss == "mlm+sop":
input_ids, input_mask, segment_ids, masked_lm_positions, \
masked_lm_ids, masked_lm_weights, next_sentence_labels = bert_preprocessor(features)
lm_logits, nsp_logits, _ = model([input_ids, input_mask, segment_ids],
masked_lm_positions=masked_lm_positions,
output_features=False,
mode=mode)
return (lm_logits, nsp_logits), (masked_lm_ids, masked_lm_weights, next_sentence_labels)
elif _APP_FLAGS.loss == "mlm":
input_ids, input_mask, segment_ids, masked_lm_positions, \
masked_lm_ids, masked_lm_weights = bert_preprocessor(features)
lm_logits, _, _ = model([input_ids, input_mask, segment_ids],
masked_lm_positions=masked_lm_positions,
output_features=False,
mode=mode)
return lm_logits, (masked_lm_ids, masked_lm_weights)
def build_loss(self, logits, labels):
if _APP_FLAGS.loss == "mlm":
lm_logits = logits
masked_lm_ids, masked_lm_weights = labels
masked_lm_loss = masked_language_model_loss(lm_logits, masked_lm_ids, masked_lm_weights,
_APP_FLAGS.vocab_size)
return masked_lm_loss
elif _APP_FLAGS.loss == "mlm+nsp" or _APP_FLAGS.loss == "mlm+sop":
lm_logits, nsp_logits = logits
masked_lm_ids, masked_lm_weights, nx_sent_labels = labels
masked_lm_loss = masked_language_model_loss(lm_logits, masked_lm_ids, masked_lm_weights,
_APP_FLAGS.vocab_size)
nsp_loss = next_sentence_prediction_loss(nsp_logits, nx_sent_labels)
return masked_lm_loss + nsp_loss
def build_eval_metrics(self, logits, labels):
if _APP_FLAGS.loss == "mlm+nsp" or _APP_FLAGS.loss == "mlm+sop":
lm_logits, nsp_logits = logits
masked_lm_ids, masked_lm_weights, next_sentence_labels = labels
mlm_metrics = masked_language_model_eval_metrics(lm_logits, masked_lm_ids, masked_lm_weights,
_APP_FLAGS.vocab_size)
nsp_metrics = next_sentence_prediction_eval_metrics(nsp_logits, next_sentence_labels)
return mlm_metrics.update(nsp_metrics)
elif _APP_FLAGS.loss == "mlm":
lm_logits = logits
masked_lm_ids, masked_lm_weights = labels
mlm_metrics = masked_language_model_eval_metrics(lm_logits, masked_lm_ids, masked_lm_weights,
_APP_FLAGS.vocab_size)
return mlm_metrics
class PretrainMultitask(base_model):
def __init__(self, **kwargs):
super(PretrainMultitask, self).__init__(**kwargs)
self.user_defined_config = kwargs.get("user_defined_config", None)
def build_logits(self, features, mode=None):
bert_preprocessor = preprocessors.get_preprocessor(self.pretrain_model_name_or_path,
app_model_name="pretrain_language_model",
user_defined_config=self.user_defined_config)
if _APP_FLAGS.distribution_strategy == "WhaleStrategy" or \
self.config.distribution_strategy == "WhaleStrategy":
tf.logging.info("*********Calling Whale Encoder***********")
model = model_zoo.get_pretrained_model(self.pretrain_model_name_or_path, enable_whale=True,
input_sequence_length=_APP_FLAGS.input_sequence_length)
else:
model = model_zoo.get_pretrained_model(self.pretrain_model_name_or_path,
input_sequence_length=_APP_FLAGS.input_sequence_length)
if _APP_FLAGS.loss == "mlm+nsp" or _APP_FLAGS.loss == "mlm+sop":
input_ids, input_mask, segment_ids, masked_lm_positions, \
masked_lm_ids, masked_lm_weights, next_sentence_labels = bert_preprocessor(features)
lm_logits, nsp_logits, _ = model([input_ids, input_mask, segment_ids],
masked_lm_positions=masked_lm_positions,
output_features=False,
mode=mode)
return (lm_logits, nsp_logits), (masked_lm_ids, masked_lm_weights, next_sentence_labels)
elif _APP_FLAGS.loss == "mlm":
task_1_dense = layers.Dense(2,
kernel_initializer=layers.get_initializer(0.02),
name='task_1_dense')
input_ids, input_mask, segment_ids, masked_lm_positions, \
masked_lm_ids, masked_lm_weights, task_1_label = bert_preprocessor(features)
lm_logits, _, pooled_output = model([input_ids, input_mask, segment_ids],
masked_lm_positions=masked_lm_positions,
output_features=False,
mode=mode)
task_1_logits = task_1_dense(pooled_output)
return (lm_logits, task_1_logits), (masked_lm_ids, masked_lm_weights, task_1_label)
def build_loss(self, logits, labels):
if _APP_FLAGS.loss == "mlm":
lm_logits, task_1_logits = logits
masked_lm_ids, masked_lm_weights, task_1_label = labels
masked_lm_loss = masked_language_model_loss(lm_logits, masked_lm_ids, masked_lm_weights,
_APP_FLAGS.vocab_size)
task_1_loss = softmax_cross_entropy(task_1_label, 2, task_1_logits)
return masked_lm_loss + task_1_loss
elif _APP_FLAGS.loss == "mlm+nsp" or _APP_FLAGS.loss == "mlm+sop":
lm_logits, nsp_logits = logits
masked_lm_ids, masked_lm_weights, nx_sent_labels = labels
masked_lm_loss = masked_language_model_loss(lm_logits, masked_lm_ids, masked_lm_weights,
_APP_FLAGS.vocab_size)
nsp_loss = next_sentence_prediction_loss(nsp_logits, nx_sent_labels)
return masked_lm_loss + nsp_loss
def build_eval_metrics(self, logits, labels):
if _APP_FLAGS.loss == "mlm+nsp" or _APP_FLAGS.loss == "mlm+sop":
lm_logits, nsp_logits = logits
masked_lm_ids, masked_lm_weights, next_sentence_labels = labels
mlm_metrics = masked_language_model_eval_metrics(lm_logits, masked_lm_ids, masked_lm_weights,
_APP_FLAGS.vocab_size)
nsp_metrics = next_sentence_prediction_eval_metrics(nsp_logits, next_sentence_labels)
return mlm_metrics.update(nsp_metrics)
elif _APP_FLAGS.loss == "mlm":
lm_logits = logits
masked_lm_ids, masked_lm_weights = labels
mlm_metrics = masked_language_model_eval_metrics(lm_logits, masked_lm_ids, masked_lm_weights,
_APP_FLAGS.vocab_size)
return mlm_metrics
def run(mode):
if FLAGS.config is None:
config_json = {
"model_type": _APP_FLAGS.model_type,
"vocab_size": _APP_FLAGS.vocab_size,
"hidden_size": _APP_FLAGS.hidden_size,
"intermediate_size": _APP_FLAGS.intermediate_size,
"num_hidden_layers": _APP_FLAGS.num_hidden_layers,
"max_position_embeddings": 512,
"num_attention_heads": _APP_FLAGS.num_attention_heads,
"type_vocab_size": 2
}
if not tf.gfile.Exists(_APP_FLAGS.model_dir):
tf.gfile.MkDir(_APP_FLAGS.model_dir)
# Pretrain from scratch
if _APP_FLAGS.pretrain_model_name_or_path is None:
if not tf.gfile.Exists(_APP_FLAGS.model_dir + "/config.json"):
with tf.gfile.GFile(_APP_FLAGS.model_dir + "/config.json", mode='w') as f:
json.dump(config_json, f)
shutil.copy2(_APP_FLAGS.vocab_fp, _APP_FLAGS.model_dir)
if _APP_FLAGS.spm_model_fp is not None:
shutil.copy2(_APP_FLAGS.spm_model_fp, _APP_FLAGS.model_dir)
config = PretrainConfig()
if _APP_FLAGS.do_multitaks_pretrain:
app = PretrainMultitask(user_defined_config=config)
else:
app = Pretrain(user_defined_config=config)
else:
if _APP_FLAGS.do_multitaks_pretrain:
app = PretrainMultitask()
else:
app = Pretrain()
if "train" in mode:
if _APP_FLAGS.data_reader == 'tfrecord':
train_reader = BundleTFRecordReader(input_glob=app.train_input_fp,
is_training=True,
shuffle_buffer_size=1024,
worker_hosts=FLAGS.worker_hosts,
task_index=FLAGS.task_index,
input_schema=app.input_schema,
batch_size=app.train_batch_size)
elif _APP_FLAGS.data_reader == 'odps':
tf.logging.info("***********Reading Odps Table *************")
worker_id = FLAGS.task_index
num_workers = len(FLAGS.worker_hosts.split(","))
train_reader = OdpsTableReader(input_glob=app.train_input_fp,
is_training=True,
shuffle_buffer_size=1024,
input_schema=app.input_schema,
slice_id=worker_id,
slice_count=num_workers,
batch_size=app.train_batch_size)
if mode == "train_and_evaluate":
if _APP_FLAGS.data_reader == 'tfrecord':
eval_reader = BundleTFRecordReader(input_glob=app.eval_input_fp,
is_training=False,
shuffle_buffer_size=1024,
worker_hosts=FLAGS.worker_hosts,
task_index=FLAGS.task_index,
input_schema=app.input_schema,
batch_size=app.eval_batch_size)
elif _APP_FLAGS.data_reader == 'odps':
eval_reader = OdpsTableReader(input_glob=app.train_input_fp,
is_training=False,
shuffle_buffer_size=1024,
input_schema=app.input_schema,
slice_id=worker_id,
slice_count=num_workers,
batch_size=app.train_batch_size)
app.run_train_and_evaluate(train_reader=train_reader, eval_reader=eval_reader)
elif mode == "train":
app.run_train(reader=train_reader)
elif mode == "evaluate":
if _APP_FLAGS.data_reader == 'tfrecord':
eval_reader = BundleTFRecordReader(input_glob=app.eval_input_fp,
is_training=False,
shuffle_buffer_size=1024,
worker_hosts=FLAGS.worker_hosts,
task_index=FLAGS.task_index,
input_schema=app.input_schema,
batch_size=app.eval_batch_size)
elif _APP_FLAGS.data_reader == 'odps':
eval_reader = OdpsTableReader(input_glob=app.train_input_fp,
is_training=False,
shuffle_buffer_size=1024,
input_schema=app.input_schema,
slice_id=worker_id,
slice_count=num_workers,
batch_size=app.train_batch_size)
ckpts = set()
with tf.gfile.GFile(os.path.join(app.config.model_dir, "checkpoint"), mode='r') as reader:
for line in reader:
line = line.strip()
line = line.replace("oss://", "")
ckpts.add(int(line.split(":")[1].strip().replace("\"", "").split("/")[-1].replace("model.ckpt-", "")))
ckpts.remove(0)
writer = tf.summary.FileWriter(os.path.join(app.config.model_dir, "eval_output"))
for ckpt in sorted(ckpts):
checkpoint_path = os.path.join(app.config.model_dir, "model.ckpt-" + str(ckpt))
tf.logging.info("checkpoint_path is {}".format(checkpoint_path))
ret_metrics = app.run_evaluate(reader=eval_reader,
checkpoint_path=checkpoint_path)
global_step = ret_metrics['global_step']
eval_masked_lm_accuracy = tf.Summary()
eval_masked_lm_accuracy.value.add(tag='masked_lm_valid_accuracy', simple_value=ret_metrics['eval_masked_lm_accuracy'])
eval_masked_lm_loss = tf.Summary()
eval_masked_lm_loss.value.add(tag='masked_lm_valid_loss', simple_value=ret_metrics['eval_masked_lm_loss'])
writer.add_summary(eval_masked_lm_accuracy, global_step)
writer.add_summary(eval_masked_lm_loss, global_step)
writer.close()
def run_preprocess(input_file, output_file):
rng = random.Random(12345)
if _APP_FLAGS.tokenizer == "wordpiece":
tokenizer = FullTokenizer(vocab_file=_APP_FLAGS.vocab_fp)
elif _APP_FLAGS.tokenizer == "sentencepiece":
tokenizer = FullTokenizer(spm_model_file=_APP_FLAGS.spm_model_fp)
instances = create_training_instances(
input_file, tokenizer, _APP_FLAGS.max_seq_length, _APP_FLAGS.dupe_factor,
_APP_FLAGS.short_seq_prob, _APP_FLAGS.masked_lm_prob, _APP_FLAGS.max_predictions_per_seq,
_APP_FLAGS.do_whole_word_mask,
rng)
write_instance_to_file(instances, tokenizer, FLAGS.max_seq_length,
FLAGS.max_predictions_per_seq, output_file)
def preprocess():
po = multiprocessing.Pool(_APP_FLAGS.num_threads)
if not os.path.exists(_APP_FLAGS.output_dir):
os.makedirs(_APP_FLAGS.output_dir)
for input_file in tqdm(glob(_APP_FLAGS.input_dir + "/*.txt")):
file_name = input_file.split("/")[-1].replace(".txt", ".tfrecord")
output_file = os.path.join(_APP_FLAGS.output_dir, file_name)
po.apply_async(func=run_preprocess, args=(input_file, output_file))
po.close()
po.join()
def main():
if FLAGS.mode == "train_and_evaluate" or FLAGS.mode == "train" or FLAGS.mode == "evaluate":
run(FLAGS.mode)
elif FLAGS.mode == "preprocess":
preprocess()
if __name__ == "__main__":
main()
```
#### File: unit_tests/appzoo_tests/test_ez_conversion.py
```python
import os
import subprocess
import unittest
import shutil
class TestEzConversion(unittest.TestCase):
def test_conversion(self):
argvs = ['easy_transfer_app',
'--mode', 'export',
'--checkpointPath', './pai-bert-base-zh/model.ckpt',
'--exportType', 'convert_bert_to_google',
'--exportDirBase', 'ez_conversion/',
]
print(' '.join(argvs))
try:
res = subprocess.check_output(' '.join(argvs), stderr=subprocess.STDOUT, shell=True)
print(res)
except subprocess.CalledProcessError as e:
print(e.output)
raise RuntimeError
shutil.rmtree('ez_conversion/', ignore_errors=True)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "johnson7788/fastNLP",
"score": 3
} |
#### File: fastNLP/mytest/cosmetics_NER_StaticEmbedding_BiLSTM.py
```python
def load_data():
from fastNLP.io import WeiboNERPipe
data_bundle = WeiboNERPipe().process_from_file(paths="data")
print(data_bundle.get_dataset('train')[:2])
return data_bundle
def build_model_metric(data_bundle):
"""
BiLSTMCRF 很容易过拟合,不太推荐使用,效果不太好
:param data_bundle:
:return:
"""
# ## 模型构建
#
# 首先选择需要使用的Embedding类型。关于Embedding的相关说明可以参见《使用Embedding模块将文本转成向量》。 在这里我们使用通过word2vec预训练的中文汉字embedding。
from fastNLP.embeddings import StaticEmbedding
from fastNLP import SpanFPreRecMetric
embed = StaticEmbedding(vocab=data_bundle.get_vocab('chars'), model_dir_or_name='cn-char-fastnlp-100d')
# 选择好Embedding之后,我们可以使用fastNLP中自带的 fastNLP.models.BiLSTMCRF 作为模型。
from fastNLP.models import BiLSTMCRF
# 这是由于BiLSTMCRF模型的forward函数接受的words,而不是chars,所以需要把这一列重新命名
data_bundle.rename_field('chars', 'words')
model = BiLSTMCRF(embed=embed, num_classes=len(data_bundle.get_vocab('target')), num_layers=1, hidden_size=200,
dropout=0.5,
target_vocab=data_bundle.get_vocab('target'))
metric = SpanFPreRecMetric(tag_vocab=data_bundle.get_vocab('target'))
return model, metric
def do_train(data_bundle, model, metric):
# ## 进行训练
# 下面我们选择用来评估模型的metric,以及优化用到的优化函数。
from torch.optim import Adam
from fastNLP import LossInForward
optimizer = Adam(model.parameters(), lr=1e-2)
loss = LossInForward()
# 使用Trainer进行训练, 您可以通过修改 device 的值来选择显卡。
from fastNLP import Trainer
import torch
device = 0 if torch.cuda.is_available() else 'cpu'
trainer = Trainer(data_bundle.get_dataset('train'), model, loss=loss, optimizer=optimizer,
dev_data=data_bundle.get_dataset('dev'), metrics=metric, device=device, save_path="output", n_epochs=50)
trainer.train()
def do_test(data_bundle, metric, model_path, save_excel="test.xlsx"):
# ## 进行测试
# 训练结束之后过,可以通过 Tester 测试其在测试集上的性能
from fastNLP import Tester
from fastNLP.io import ModelLoader
import os
#如果是一个目录,只用其中的一个模型
if os.path.isdir(model_path):
models_file = os.listdir(model_path)
if len(models_file) != 1:
print("模型文件不仅一个,请手动给定")
import sys
sys.exit(1)
else:
model_path = os.path.join(model_path,models_file[0])
model = ModelLoader.load_pytorch_model(model_path)
tester = Tester(data_bundle.get_dataset('test'), model, metrics=metric)
eval_results = tester.test()
id2labels = data_bundle.vocabs['target'].idx2word
test_contents = data_bundle.get_dataset('test').get_field("raw_chars").content
true_labels = data_bundle.get_dataset('test').get_field("target").content
predict_ids = eval_results['predict_results']
results = []
for content, true_id, predict_id in zip(test_contents, true_labels, predict_ids):
label = list(map(lambda x: id2labels[x], true_id))
predict = list(map(lambda x: id2labels[x], predict_id))
if len(content) != len(label):
print("句子内容和真实label长度不匹配,错误")
print(content)
print(label)
break
predict = predict[:len(label)]
con = " ".join(content)
la = " ".join(label)
pre = " ".join(predict)
print("句子:", con)
print("真实标签:", la)
print("预测标签:", pre)
words = []
word = ""
for idx, p in enumerate(predict):
if p.startswith('B-'):
if word != "":
#说明上一个单词已经是一个完整的词了, 加到词表,然后重置
words.append(word)
word = ""
word += content[idx]
elif p.startswith('I-'):
word += content[idx]
else:
#如果单词存在,那么加到词语表里面
if word:
words.append(word)
word = ""
print("真实的词:", words)
results.append({'content':con, "words":words, "predict":pre})
if save_excel:
import pandas as pd
df = pd.DataFrame(results)
writer = pd.ExcelWriter(save_excel)
df.to_excel(writer)
writer.save()
if __name__ == '__main__':
data_bundle = load_data()
model, metric = build_model_metric(data_bundle)
# do_train(data_bundle, model, metric)
do_test(data_bundle, metric, model_path="output/best_BiLSTMCRF_f_2020-12-04-16-55-05-725458")
``` |
{
"source": "johnson7788/football",
"score": 3
} |
#### File: johnson7788/football/my_A2C.py
```python
import math
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt
import torch
import torch.nn as nn
import torch.nn.functional as F
import gfootball.env as football_env
from gfootball.env import observation_preprocessing
import collections
torch.manual_seed(0)
np.random.seed(0)
class Actor(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=4, out_channels=8, kernel_size=8, stride=4, padding=0, bias=True)
self.conv2 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=4, stride=2, padding=0, bias=True)
self.conv3 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=0, bias=True)
self.fc1 = nn.Linear(1280 + 4, 256, bias=True)
self.fc2 = nn.Linear(256, 19)
self.relu = nn.ReLU()
self.b_1 = nn.BatchNorm2d(4)
self.b_2 = nn.BatchNorm2d(8)
self.b_3 = nn.BatchNorm2d(16)
self.b_4 = nn.BatchNorm1d(1280 + 4)
self.b_5 = nn.BatchNorm1d(256)
def forward(self, x, scalar):
x = torch.tensor(x).float() # normalize
x = x.permute(0, 3, 1, 2).contiguous() # 1 x channels x height x width
x = self.b_1(x)
x = self.relu(self.conv1(x))
x = self.b_2(x)
x = self.relu(self.conv2(x))
x = self.b_3(x)
x = self.relu(self.conv3(x))
x = x.reshape(x.shape[0], -1) # flatten
x = self.b_4(torch.cat([x, scalar], 1))
x = self.relu(self.fc1(x))
x = self.b_5(x)
x = self.fc2(x)
return F.softmax(x, dim=-1)
class Critic(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=4, out_channels=8, kernel_size=8, stride=4, padding=0, bias=True)
self.conv2 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=4, stride=2, padding=0, bias=True)
self.conv3 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=0, bias=True)
self.fc1 = nn.Linear(1280 + 4, 256, bias=True)
self.fc2 = nn.Linear(256, 1)
self.relu = nn.ReLU()
self.b_1 = nn.BatchNorm2d(4)
self.b_2 = nn.BatchNorm2d(8)
self.b_3 = nn.BatchNorm2d(16)
self.b_4 = nn.BatchNorm1d(1280 + 4)
self.b_5 = nn.BatchNorm1d(256)
def forward(self, x, scalar):
x = torch.tensor(x).float() # normalize
x = x.permute(0, 3, 1, 2).contiguous() # 1 x channels x height x width
x = self.b_1(x)
x = self.relu(self.conv1(x))
x = self.b_2(x)
x = self.relu(self.conv2(x))
x = self.b_3(x)
x = self.relu(self.conv3(x))
x = x.reshape(x.shape[0], -1) # flatten
x = self.b_4(torch.cat([x, scalar], 1))
x = self.relu(self.fc1(x))
x = self.b_5(x)
x = self.fc2(x)
return x
directions = [
[Action.TopLeft, Action.Top, Action.TopRight],
[Action.Left, Action.Idle, Action.Right],
[Action.BottomLeft, Action.Bottom, Action.BottomRight]]
dirsign = lambda x: 1 if abs(x) < 0.01 else (0 if x < 0 else 2)
enemyGoal = [1, 0]
perfectRange = [[0.61, 1], [-0.2, 0.2]]
def inside(pos, area):
return area[0][0] <= pos[0] <= area[0][1] and area[1][0] <= pos[1] <= area[1][1]
def get_distance(pos1, pos2):
return ((pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2) ** 0.5
def player_direction(obs):
controlled_player_pos = obs['left_team'][obs['active']]
controlled_player_dir = obs['left_team_direction'][obs['active']]
x = controlled_player_pos[0]
y = controlled_player_pos[1]
dx = controlled_player_dir[0]
dy = controlled_player_dir[1]
if x <= dx:
return 0
if x > dx:
return 1
def run_pass(left_team, right_team, x, y):
###Are there defenders dead ahead?
defenders = 0
for i in range(len(right_team)):
if right_team[i][0] > x and y + .01 >= right_team[i][1] and right_team[i][1] >= y - .01:
if abs(right_team[i][0] - x) < .01:
defenders = defenders + 1
if defenders == 0:
return Action.Right
teammateL = 0
teammateR = 0
for i in range(len(left_team)):
# is there a teamate close to left
if left_team[i][0] >= x:
if left_team[i][1] < y:
if abs(left_team[i][1] - x) < .05:
teammateL = teammateL + 1
# is there a teamate to right
if left_team[i][0] >= x:
if left_team[i][1] > y:
if abs(left_team[i][1] - x) < .05:
teammateR = teammateR + 1
# pass only close to goal
if x > .75:
if teammateL > 0 or teammateR > 0:
return Action.ShortPass
if defenders > 0 and y >= 0:
return Action.TopRight
if defenders > 0 and y < 0:
return Action.BottomRight
def agent(obs):
controlled_player_pos = obs['left_team'][obs['active']]
# special plays
if obs["game_mode"] == GameMode.Penalty:
return Action.Shot
if obs["game_mode"] == GameMode.Corner:
if controlled_player_pos[0] > 0:
return Action.Shot
if obs["game_mode"] == GameMode.FreeKick:
return Action.Shot
# Make sure player is running.
if 0 < controlled_player_pos[0] < 0.6 and Action.Sprint not in obs['sticky_actions']:
return Action.Sprint
elif 0.6 < controlled_player_pos[0] and Action.Sprint in obs['sticky_actions']:
return Action.ReleaseSprint
# Does the player we control have the ball?
if obs['ball_owned_player'] == obs['active'] and obs['ball_owned_team'] == 0:
goalkeeper = 0
# if in the zone near goal shoot
if inside(controlled_player_pos, perfectRange) and controlled_player_pos[0] < obs['ball'][0]:
return Action.Shot
# if the goalie is coming out on player near goal shoot
elif abs(obs['right_team'][goalkeeper][0] - 1) > 0.2 and controlled_player_pos[0] > 0.4 and abs(
controlled_player_pos[1]) < 0.2:
return Action.Shot
# if close to goal and too wide for shot pass the ball
if controlled_player_pos[0] > .75 and controlled_player_pos[1] > .20 or controlled_player_pos[0] > .75 and \
controlled_player_pos[1] < -.20:
return Action.ShortPass
# if near our goal and moving away long pass to get out of our zone
if player_direction(obs) == 1 and controlled_player_pos[0] < -.3:
return Action.LongPass
# which way should we run or pass
else:
return run_pass(obs['left_team'], obs['right_team'], controlled_player_pos[0], controlled_player_pos[1])
else:
# vector where ball is going
ball_targetx = obs['ball'][0] + obs['ball_direction'][0]
ball_targety = obs['ball'][1] + obs['ball_direction'][1]
# euclidian distance to the ball so we head off movement until very close
e_dist = get_distance(obs['left_team'][obs['active']], obs['ball'])
# if not close to ball move to where it is going
if e_dist > .005:
# Run where ball will be
xdir = dirsign(ball_targetx - controlled_player_pos[0])
ydir = dirsign(ball_targety - controlled_player_pos[1])
return directions[ydir][xdir]
# if close to ball go to ball
else:
# Run towards the ball.
xdir = dirsign(obs['ball'][0] - controlled_player_pos[0])
ydir = dirsign(obs['ball'][1] - controlled_player_pos[1])
return directions[ydir][xdir]
def get_coordinates(arr):
w_step = 2 / 96.0
h_step = 0.84 / 72
x, y = arr
x_i = 0
y_i = 0
for i in range(1, 96):
if x < -1 or x > 1:
if x < -1:
x_i = 0
else:
x_i = 95
else:
if -1 + (i - 1) * w_step <= x <= -1 + i * w_step:
x_i = i
break
for i in range(1, 72):
if y < -0.42 or y > 0.42:
if y < -0.42:
y_i = 0
else:
y_i = 71
else:
if -0.42 + (i - 1) * h_step <= y <= -0.42 + i * h_step:
y_i = i
break
return [y_i, x_i]
def get_team_coordinates(team_arr):
answ = []
for j in range(len(team_arr)):
answ.append(get_coordinates(team_arr[j]))
return answ
def angle(src, tgt):
dx = tgt[0] - src[0]
dy = tgt[1] - src[1]
theta = round(math.atan2(dx, -dy) * 180 / math.pi, 2)
while theta < 0:
theta += 360
return theta
def direction(src, tgt):
actions = [3, 4, 5,
6, 7,
8, 1, 2]
theta = angle(src, tgt)
index = int(((theta + 45 / 2) % 360) / 45)
return actions[index]
def create_obs(obs):
ball_coord = get_coordinates(obs['ball'][:-1])
left_team_coord = get_team_coordinates(obs['left_team'])
right_team_coord = get_team_coordinates(obs['right_team'])
player_coord = get_coordinates(obs['left_team'][obs['active']])
obs_1 = np.zeros(shape=(1, 72, 96, 4))
obs_1[0, ball_coord[0], ball_coord[1], 0] = 1
obs_1[0, player_coord[0], player_coord[1], 0] = 1
for i, l in enumerate(left_team_coord):
obs_1[0, l[0], l[1], 2] = 1
for i, r in enumerate(right_team_coord):
obs_1[0, r[0], r[1], 3] = 1
ball_next_coord = get_coordinates(
[obs['ball'][0] + obs['ball_direction'][0], obs['ball'][1] + obs['ball_direction'][1]])
left_team_next_coord = []
for i in range(len(obs['left_team'])):
left_team_next_coord.append([obs['left_team'][i][0] + obs['left_team_direction'][i][0],
obs['left_team'][i][1] + obs['left_team_direction'][i][1]])
right_team_next_coord = []
for i in range(len(obs['right_team'])):
right_team_next_coord.append([obs['right_team'][i][0] + obs['right_team_direction'][i][0],
obs['right_team'][i][1] + obs['right_team_direction'][i][1]])
scalar = np.zeros(shape=(1, 4))
scalar[0, 0] = obs['ball_owned_team']
scalar[0, 1] = obs['game_mode']
scalar[0, 2] = direction(obs['ball'][:-1], obs['ball_direction'][:-1])
scalar[0, 3] = direction(obs['left_team'][obs['active']], obs['left_team_direction'][obs['active']])
return obs_1, scalar
def reward_plot(rewards_for_plot):
plt.figure(figsize=(10, 6))
plt.plot(rewards_for_plot)
plt.title('Train graphic', fontsize=22)
plt.xlabel('epoch', fontsize=18)
plt.ylabel('mean_reward', fontsize=18)
def train():
env = football_env.create_environment(
env_name="11_vs_11_kaggle",
representation='raw',
stacked=False,
logdir='.',
write_goal_dumps=False,
write_full_episode_dumps=False,
render=False,
number_of_left_players_agent_controls=1,
dump_frequency=0)
obs = env.reset()
created_obs = create_obs(obs[0])
actor = Actor()
critic = Critic()
# actor.load_state_dict(torch.load('actor.pth'))
# critic.load_state_dict(torch.load('critic.pth'))
env = football_env.create_environment(
env_name="11_vs_11_kaggle",
representation='raw',
stacked=False,
logdir='.',
write_goal_dumps=False,
write_full_episode_dumps=False,
render=False,
number_of_left_players_agent_controls=1,
number_of_right_players_agent_controls=1,
dump_frequency=0)
obs = env.reset()
adam_actor = torch.optim.Adam(actor.parameters(), lr=1e-3)
adam_critic = torch.optim.Adam(critic.parameters(), lr=1e-3)
gamma = 0.99
step_done = 0
rewards_for_plot = []
for steps_done in range(64):
states = []
actions = []
rewards = []
next_states = []
dones = []
games_play = 0
wins = 0
loses = 0
obs = env.reset()
values = []
log_probs = []
done = False
while not done:
converted_obs = create_obs(obs[0])
actor.eval()
prob = actor(torch.as_tensor(converted_obs[0], dtype=torch.float32),
torch.as_tensor(converted_obs[1], dtype=torch.float32))
actor.train()
dist = torch.distributions.Categorical(probs=prob)
act = dist.sample()
new_obs, reward, done, _ = env.step([act.detach().data.numpy()[0], (agent(obs[1])).value])
if reward[0] == -1:
loses += 1
done = True
if reward[0] == 1:
wins += 1
done = True
if reward[0] == 0 and done:
reward[0] = 0.25
last_q_val = 0
if done:
converted_next_obs = create_obs(new_obs[0])
critic.eval()
last_q_val = critic(torch.as_tensor(converted_next_obs[0], dtype=torch.float32),
torch.as_tensor(converted_next_obs[1], dtype=torch.float32))
last_q_val = last_q_val.detach().data.numpy()
critic.train()
states.append(obs[0])
action_arr = np.zeros(19)
action_arr[act] = 1
actions.append(action_arr)
rewards.append(reward[0])
next_states.append(new_obs[0])
dones.append(1 - int(done))
obs = new_obs
if done:
obs = env.reset()
break
rewards = np.array(rewards)
states = np.array(states)
actions = np.array(actions)
next_states = np.array(next_states)
dones = np.array(dones)
print('epoch ' + str(steps_done) + '\t' + 'reward_mean ' + str(np.mean(rewards)) + '\t' + 'games_count ' + str(
games_play) + '\t' + 'total_wins ' + str(wins) + '\t' + 'total_loses ' + str(loses))
rewards_for_plot.append(np.mean(rewards))
# train
q_vals = np.zeros((len(rewards), 1))
for i in range(len(rewards) - 1, 0, -1):
last_q_val = rewards[i] + dones[i] * gamma * last_q_val
q_vals[i] = last_q_val
action_tensor = torch.as_tensor(actions, dtype=torch.float32)
obs_playgraund_tensor = torch.as_tensor(np.array([create_obs(states[i])[0][0] for i in range(len(rewards))]),
dtype=torch.float32)
obs_scalar_tensor = torch.as_tensor(np.array([create_obs(states[i])[1][0] for i in range(len(rewards))]),
dtype=torch.float32)
val = critic(obs_playgraund_tensor, obs_scalar_tensor)
probs = actor(obs_playgraund_tensor, obs_scalar_tensor)
advantage = torch.Tensor(q_vals) - val
critic_loss = advantage.pow(2).mean()
adam_critic.zero_grad()
critic_loss.backward()
adam_critic.step()
actor_loss = (-torch.log(probs) * advantage.detach()).mean()
adam_actor.zero_grad()
actor_loss.backward(retain_graph=True)
adam_actor.step()
# soft_update(actor, target_actor, 0.8)
# soft_update(critic, target_critic, 0.8)
if steps_done != 0 and steps_done % 50 == 0:
torch.save(actor.state_dict(), 'actor.pth')
torch.save(critic.state_dict(), 'critic.pth')
torch.save(actor.state_dict(), 'actor.pth')
torch.save(critic.state_dict(), 'critic.pth')
reward_plot(rewards_for_plot)
```
#### File: johnson7788/football/my_ppo.py
```python
import os
import sys
import time
import gfootball.env as football_env
import argparse
from stable_baselines3 import PPO
from stable_baselines3.common.callbacks import CheckpointCallback
#所有可用的训练模式
levels = ['11_vs_11_competition','11_vs_11_easy_stochastic','11_vs_11_hard_stochastic','11_vs_11_kaggle','11_vs_11_stochastic','1_vs_1_easy',
'5_vs_5','academy_3_vs_1_with_keeper','academy_corner','academy_counterattack_easy','academy_counterattack_hard','academy_empty_goal',
'academy_empty_goal_close','academy_pass_and_shoot_with_keeper','academy_run_pass_and_shoot_with_keeper','academy_run_to_score',
'academy_run_to_score_with_keeper','academy_single_goal_versus_lazy']
def model_config(parser):
parser.add_argument('--level', default='5_vs_5', type=str, choices=levels, help='定义要解决的问题,要使用的游戏场景,一共11种')
parser.add_argument('--state', default='extracted_stacked', type=str, help='extracted 或者extracted_stacked')
parser.add_argument('--reward_experiment', default='scoring,checkpoints', type=str, help='奖励的方式,"scoring" 或者 "scoring,checkpoints, 注意奖励方式,如果踢全场,最好用2种结合"')
parser.add_argument('--num_timesteps', default=20000000, type=int, help='训练的时间步数,一般可以200万个step')
parser.add_argument('--nsteps', default=128, type=int, help='batch size 是 nsteps')
parser.add_argument('--output_path', default='output', type=str, help='模型保存的路径,模型名称根据时间自动命名,默认为output')
parser.add_argument('--model_save_prefix', default='ppo_model', type=str, help='模型保存的名称的前缀')
parser.add_argument('--model_save_frequency', default=100000, type=int, help='每所少个step保存一次模型,默认为100000')
return parser
def data_config(parser):
parser.add_argument('--log_dir', default='logs', help='日志目录')
parser.add_argument('--tensorboard', action='store_true')
return parser
def train_config(parser):
parser.add_argument('--do_train', action='store_true', help="训练并测试模型")
parser.add_argument('--do_eval', action='store_true', help="只测试模型,需要给出要加载的模型checkpoint")
parser.add_argument('--load_checkpoint', default='output/ppo_model_20000000_steps.zip', type=str, help="只测试模型,需要给出要加载的模型checkpoint")
parser.add_argument('--initial_checkpoint', default='', type=str, help="训练时,使用哪个模型继续训练,默认为空")
parser.add_argument('--dump_scores', action='store_true', default=True, help="保存记录分数的样本轨迹。")
parser.add_argument('--dump_full_episodes', action='store_true', default=True, help="记录每个整个episode的轨迹。")
parser.add_argument('--render', action='store_true',default=False, help="是否显示动画")
parser.add_argument('--debug', action='store_true', help="print debug info")
return parser
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser = data_config(parser)
parser = model_config(parser)
parser = train_config(parser)
args = parser.parse_args()
if args.do_eval:
# 那么测试时用真实的时间,那么足球动画就不会加速,能看清
other_config_options = {'real_time':True}
else:
other_config_options = {}
env = football_env.create_environment(
env_name=args.level, stacked=('stacked' in args.state),
rewards=args.reward_experiment,
logdir=args.log_dir,
write_goal_dumps=args.dump_scores,
write_full_episode_dumps=args.dump_full_episodes,
render=args.render,
dump_frequency=50,
other_config_options=other_config_options,)
#模型的配置
model = PPO("MlpPolicy", env, verbose=1)
if args.initial_checkpoint:
model.load(args.initial_checkpoint)
if args.do_train:
print(f"开始训练,会耗时较长, 即将训练{args.num_timesteps}个step,模型保存频率为{args.model_save_frequency}")
checkpoint_callback = CheckpointCallback(save_freq=args.model_save_frequency, save_path=args.output_path,
name_prefix=args.model_save_prefix)
model.learn(total_timesteps=args.num_timesteps, callback=checkpoint_callback)
#保存最后一次训练好的训练好的模型
# 模型保存的位置/output/0714095907.zip
save_path = os.path.join(args.output_path, args.model_save_prefix + '_final.zip')
model.save(save_path)
elif args.do_eval:
print(f"评估模式,直接加载模型")
model.load(args.load_checkpoint)
else:
print(f"请选择需要训练还是测试评估, --do_train, --do_eval")
sys.exit(0)
#环境重置,方便测试模型
obs = env.reset()
# 测试模型
print(f"开始测试模型效果:")
step = 0
for i in range(1000):
step += 1
print(f"循环第{i}次,开始进行第{step}个step操作")
action, _states = model.predict(obs, deterministic=True)
obs, reward, done, info = env.step(action)
env.render()
if done:
print(f"这一个episode足球结束,开始下一个step测试")
step = 0
obs = env.reset()
env.close()
``` |
{
"source": "johnson7788/hypertools",
"score": 3
} |
#### File: hypertools/myexample/hypercube.py
```python
import numpy as np
from pyDOE import ff2n
from scipy.spatial.distance import pdist, squareform
import hypertools as hyp
get_ipython().run_line_magic('matplotlib', 'inline')
# ## Define hypercube functions
# In[2]:
def hypercube(ndims=4, res=100, x_min=-1, x_max=1):
length = x_max - x_min
vertices = (ff2n(ndims) + 1) / 2
vertices *= length
vertices += x_min
dists = squareform(pdist(vertices))
n_edges = np.sum(dists == length) / 2
x = np.zeros([n_edges * res, ndims])
ind = 0
for i in range(vertices.shape[0]):
for j in range(i):
if dists[i, j] == length:
next = np.zeros([res, ndims])
for d in range(ndims):
next[:, d] = np.linspace(vertices[i, d], vertices[j, d], res)
x[np.arange(ind, ind+res), :] = next
ind += res
return x
# ## Create, plot, and save cube of desired dimensionality
# In[3]:
for dims in range(3,9):
print('DIMS: ' + str(dims))
hyp.plot(hypercube(dims), 'k.')
``` |
{
"source": "johnson7788/lit",
"score": 2
} |
#### File: lit_nlp/components/projection.py
```python
import abc
import copy
import threading
from typing import Any, Dict, List, Text, Optional, Hashable, Iterable, Type, Sequence
from absl import logging
from lit_nlp.api import components as lit_components
from lit_nlp.api import dataset as lit_dataset
from lit_nlp.api import model as lit_model
from lit_nlp.api import types
from lit_nlp.lib import caching
JsonDict = types.JsonDict
IndexedInput = types.IndexedInput
Spec = types.Spec
class ProjectorModel(lit_model.Model, metaclass=abc.ABCMeta):
"""LIT model API implementation for dimensionality reduction."""
##
# Training methods
@abc.abstractmethod
def fit_transform(self, inputs: Iterable[JsonDict]) -> List[JsonDict]:
return
def fit_transform_with_metadata(self, indexed_inputs) -> List[JsonDict]:
return self.fit_transform((i["data"] for i in indexed_inputs))
##
# LIT model API
def input_spec(self):
# 'x' denotes input features
return {"x": types.Embeddings()}
def output_spec(self):
# 'z' denotes projected embeddings
return {"z": types.Embeddings()}
@abc.abstractmethod
def predict_minibatch(self, inputs: Iterable[JsonDict],
**unused_kw) -> List[JsonDict]:
return
def max_minibatch_size(self, **unused_kw):
return 1000
class ProjectionInterpreter(lit_components.Interpreter):
"""Interpreter API implementation for dimensionality reduction model."""
def __init__(self, model: lit_model.Model,
indexed_inputs: Sequence[IndexedInput],
model_outputs: Optional[List[JsonDict]],
projector: ProjectorModel, field_name: Text, name: Text):
self._projector = caching.CachingModelWrapper(projector, name=name)
self._field_name = field_name
# Train on the given examples
self._run(model, indexed_inputs, model_outputs, do_fit=True)
def convert_input(self, indexed_input: JsonDict,
model_output: JsonDict) -> JsonDict:
"""Convert inputs, preserving metadata."""
c = copy.copy(indexed_input) # shallow copy
c["data"] = {"x": model_output[self._field_name]}
return c
def _run(self,
model: lit_model.Model,
indexed_inputs: Sequence[IndexedInput],
model_outputs: Optional[List[JsonDict]] = None,
do_fit=False):
# Run model, if needed.
if model_outputs is None:
model_outputs = list(model.predict(indexed_inputs))
assert len(model_outputs) == len(indexed_inputs)
converted_inputs = list(
map(self.convert_input, indexed_inputs, model_outputs))
if do_fit:
return self._projector.fit_transform_with_metadata(
converted_inputs, dataset_name="")
else:
return self._projector.predict_with_metadata(
converted_inputs, dataset_name="")
def run_with_metadata(self,
indexed_inputs: Sequence[IndexedInput],
model: lit_model.Model,
dataset: lit_dataset.Dataset,
model_outputs: Optional[List[JsonDict]] = None,
config: Dict[Text, Any] = None):
del config # unused - configure in constructor instead
del dataset # unused - pass examples to constructor instead
return self._run(model, indexed_inputs, model_outputs, do_fit=False)
def _key_from_dict(d) -> Hashable:
"""Convert nested dict into a frozen, hashable structure usable as a key."""
if isinstance(d, dict):
return frozenset((k, _key_from_dict(v)) for k, v in d.items())
elif isinstance(d, (list, tuple)):
return tuple(map(_key_from_dict, d))
else:
return d
class ProjectionManager(lit_components.Interpreter):
"""Manager for multiple ProjectionInterpreter instances.
Presents a standard "Interpreter" interface so that client code can treat
this as an ordinary stateless component.
The config is used to uniquely identify the projection instance, and a new
instance is created and fit if not found.
Config must contain the following fields:
- field_name: name of embedding field (in model output)
- (recommended) dataset_name: used for model cache
- (optional) proj_kw: config for the underlying ProjectorModel
We also recommend including the model name and dataset name in the key, but
this is not explicitly enforced.
"""
def __init__(self, model_class: Type[ProjectorModel]):
self._lock = threading.RLock()
self._instances = {}
# Used to construct new instances, given config['proj_kw']
self._model_factory = model_class
def _train_instance(self, model: lit_model.Model,
dataset: lit_dataset.IndexedDataset, config: JsonDict,
name: Text) -> ProjectionInterpreter:
# Ignore pytype warning about abstract methods, since this should always
# be a subclass of ProjectorModel which has these implemented.
projector = self._model_factory(**config.get("proj_kw", {})) # pytype: disable=not-instantiable
train_inputs = dataset.indexed_examples
# TODO(lit-dev): remove 'dataset_name' from caching logic so we don't need
# to track it here or elsewhere.
train_outputs = list(
model.predict_with_metadata(
train_inputs, dataset_name=config.get("dataset_name")))
logging.info("Creating new projection instance on %d points",
len(train_inputs))
return ProjectionInterpreter(
model,
train_inputs,
train_outputs,
projector=projector,
field_name=config["field_name"],
name=name)
def run_with_metadata(self, *args, **kw):
# UMAP code is not threadsafe and will throw
# strange 'index-out-of-bounds' errors if multiple instances are accessed
# concurrently.
with self._lock:
return self._run_with_metadata(*args, **kw)
def _run_with_metadata(self,
indexed_inputs: Sequence[IndexedInput],
model: lit_model.Model,
dataset: lit_dataset.IndexedDataset,
model_outputs: Optional[List[JsonDict]] = None,
config: Dict[Text, Any] = None):
instance_key = _key_from_dict(config)
logging.info("Projection request: instance key: %s", instance_key)
# Fit a new instance if necessary
if instance_key not in self._instances:
self._instances[instance_key] = self._train_instance(
model, dataset, config, name=str(instance_key))
proj_instance = self._instances[instance_key]
# If projector was just trained, points should be cached.
return proj_instance.run_with_metadata(indexed_inputs, model, dataset,
model_outputs)
``` |
{
"source": "johnson7788/mt-dnn",
"score": 2
} |
#### File: johnson7788/mt-dnn/predict_api_test.py
```python
import requests
import json
def dopredict_absa(test_data, host="127.0.0.1:3326"):
"""
预测结果
:param test_data:
:return:
"""
url = f"http://{host}/api/absa_predict"
data = {'data': test_data}
headers = {'content-type': 'application/json'}
r = requests.post(url, headers=headers, data=json.dumps(data), timeout=360)
print(r.json())
return r.json()
def dopredict_absa_fullscore(test_data, host="127.0.0.1:3326"):
"""
预测结果
:param test_data:
:return:
"""
url = f"http://{host}/api/absa_predict_fullscore"
data = {'data': test_data}
headers = {'content-type': 'application/json'}
r = requests.post(url, headers=headers, data=json.dumps(data), timeout=360)
print(r.json())
return r.json()
def dopredict_dem8(test_data, host="127.0.0.1:3326"):
"""
预测结果
:param test_data: [('持妆不能输雅诗兰黛上妆即定妆雅诗兰黛DW粉底是我的心头好持妆遮瑕磨皮粉底液测评', '遮瑕', '成分'),...]
:return:
"""
url = f"http://{host}/api/dem8_predict"
data = {'data': test_data}
headers = {'content-type': 'application/json'}
r = requests.post(url, headers=headers, data=json.dumps(data), timeout=360)
print(r.json())
return r.json()
def dopredict_purchase(test_data, host="127.0.0.1:3326"):
"""
预测结果
:param test_data: [('持妆不能输雅诗兰黛上妆即定妆雅诗兰黛DW粉底是我的心头好持妆遮瑕磨皮粉底液测评', '遮瑕', '成分'),...]
:return:
"""
url = f"http://{host}/api/purchase_predict"
data = {'data': test_data}
headers = {'content-type': 'application/json'}
r = requests.post(url, headers=headers, data=json.dumps(data), timeout=360)
print(r.json())
return r.json()
def dem8(test_data, host="127.0.0.1:3326"):
"""
预测结果, 多个aspect关键字的情况
:param test_data: [('持妆不能输雅诗兰黛上妆即定妆雅诗兰黛DW粉底是我的心头好持妆遮瑕磨皮粉底液测评', ['遮瑕','粉底'], '成分'),...]
:return:
"""
url = f"http://{host}/api/dem8"
data = {'data': test_data}
headers = {'content-type': 'application/json'}
r = requests.post(url, headers=headers, data=json.dumps(data), timeout=360)
print(r.json())
return r.json()
def dopredict_absa_dem8(test_data, host="127.0.0.1:3326"):
"""
预测属性之后预测情感
:param test_data: [('持妆不能输雅诗兰黛上妆即定妆雅诗兰黛DW粉底是我的心头好持妆遮瑕磨皮粉底液测评', '遮瑕', '成分'),...]
:return:
"""
url = f"http://{host}/api/absa_dem8_predict"
data = {'data': test_data}
headers = {'content-type': 'application/json'}
r = requests.post(url, headers=headers, data=json.dumps(data), timeout=360)
print(r.json())
return r.json()
def dopredict_absa_sentence(test_data, host="127.0.0.1:3326"):
"""
预测属性之后预测情感
:param test_data: [('持妆不能输雅诗兰黛上妆即定妆雅诗兰黛DW粉底是我的心头好持妆遮瑕磨皮粉底液测评', '遮瑕', '成分'),...]
:return:
"""
url = f"http://{host}/api/absa_predict_sentence"
data = {'data': test_data}
headers = {'content-type': 'application/json'}
r = requests.post(url, headers=headers, data=json.dumps(data), timeout=360)
print(r.json())
return r.json()
if __name__ == '__main__':
# host = "127.0.0.1:3326"
# host = "192.168.50.139:3326"
host = "192.168.50.189:3326"
absa_data = [('这个遮瑕效果很差,很不好用', '遮瑕'), ('抗氧化效果一般', '抗氧化'), ('海洋冰泉水润清透是MG面膜深受顾客喜爱的经典款面膜之一,已经使用了两年多了。该产品外包装精致、里面的面膜质感很好,与面部的贴合度、大小符合度都不错,使面膜的精华液能很好的均匀的敷于脸部各个部位。适用于各种肌肤,补水效果好,用后皮肤水润、光滑,以后还会回购的。', '水润'), ('海洋冰泉水润清透是MG面膜深受顾客喜爱的经典款面膜之一,已经使用了两年多了。该产品外包装精致、里面的面膜质感很好,与面部的贴合度、大小符合度都不错,使面膜的精华液能很好的均匀的敷于脸部各个部位。适用于各种肌肤,补水效果好,用后皮肤水润、光滑,以后还会回购的。', '质感'), ('海洋冰泉水润清透是MG面膜深受顾客喜爱的经典款面膜之一,已经使用了两年多了。该产品外包装精致、里面的面膜质感很好,与面部的贴合度、大小符合度都不错,使面膜的精华液能很好的均匀的敷于脸部各个部位。适用于各种肌肤,补水效果好,用后皮肤水润、光滑,以后还会回购的。', '补水')]
dem8_data = [('持妆不能输雅诗兰黛上妆即定妆雅诗兰黛DW粉底是我的心头好持妆遮瑕磨皮粉底液测评', '遮瑕', '成分'), ('活动有赠品比较划算,之前买过快用完了,一支可以分两次使用,早上抗氧化必备VC', '抗氧化','成分'), ('海洋冰泉水润清透是MG面膜深受顾客喜爱的经典款面膜之一,已经使用了两年多了。该产品外包装精致、里面的面膜质感很好,与面部的贴合度、大小符合度都不错,使面膜的精华液能很好的均匀的敷于脸部各个部位。适用于各种肌肤,补水效果好,用后皮肤水润、光滑,以后还会回购的。', '水润', '功效'), ('海洋冰泉水润清透是MG面膜深受顾客喜爱的经典款面膜之一,已经使用了两年多了。该产品外包装精致、里面的面膜质感很好,与面部的贴合度、大小符合度都不错,使面膜的精华液能很好的均匀的敷于脸部各个部位。适用于各种肌肤,补水效果好,用后皮肤水润、光滑,以后还会回购的。', '质感','功效'), ('海洋冰泉水润清透是MG面膜深受顾客喜爱的经典款面膜之一,已经使用了两年多了。该产品外包装精致、里面的面膜质感很好,与面部的贴合度、大小符合度都不错,使面膜的精华液能很好的均匀的敷于脸部各个部位。适用于各种肌肤,补水效果好,用后皮肤水润、光滑,以后还会回购的。', '补水','功效')]
purchase_data = [['飘了大概两周 终于到了\n希望我的头发别再掉了\n但是我又纠结要不要用,\n看到各种各样的评价,还说有疯狂的脱发期,那我要不要用?\n担心越用越脱啊。再加上我头发甚至连头皮都是干巴巴的类型。\n#REGENERATE \n#Grow Gorgeous Grow Gorgeous强效防脱增发精华头发增长生发液密发增发英国进口 \n#Dr.Hauschka 德国世家 \n#Alpecin 咖啡因C1 洗发水 \n#Alpecin 咖啡因防脱免洗发根滋养液 ', '买了alpecin洗发水增发液纠结要不要用?', 'grow gorgeous强效防脱增发精华']]
# dem8_dd = [('持妆不能输雅诗兰黛上妆即定妆雅诗兰黛DW粉底是我的心头好持妆遮瑕磨皮粉底液测评', ['遮瑕','粉底'], '成分'), ('活动有赠品比较划算,之前买过快用完了,一支可以分两次使用,早上抗氧化必备VC', ['抗氧化'],'成分'), ('海洋冰泉水润清透是MG面膜深受顾客喜爱的经典款面膜之一,已经使用了两年多了。该产品外包装精致、里面的面膜质感很好,与面部的贴合度、大小符合度都不错,使面膜的精华液能很好的均匀的敷于脸部各个部位。适用于各种肌肤,补水效果好,用后皮肤水润、光滑,以后还会回购的。',['水润','补水'], '功效'), ('海洋冰泉水润清透是MG面膜深受顾客喜爱的经典款面膜之一,已经使用了两年多了。该产品外包装精致、里面的面膜质感很好,与面部的贴合度、大小符合度都不错,使面膜的精华液能很好的均匀的敷于脸部各个部位。适用于各种肌肤,补水效果好,用后皮肤水润、光滑,以后还会回购的。', ['质感'],'功效'), ('海洋冰泉水润清透是MG面膜深受顾客喜爱的经典款面膜之一,已经使用了两年多了。该产品外包装精致、里面的面膜质感很好,与面部的贴合度、大小符合度都不错,使面膜的精华液能很好的均匀的敷于脸部各个部位。适用于各种肌肤,补水效果好,用后皮肤水润、光滑,以后还会回购的。', ['补水'],'功效')]
# dopredict_absa(host=host,test_data=absa_data)
# dopredict_absa_fullscore(host=host,test_data=absa_data)
# dopredict_dem8(host=host,test_data=dem8_data)
# dopredict_purchase(host=host,test_data=purchase_data)
# dem8(host=host,test_data=dem8_dd)
# 句子情感
sentence_data = ['持妆不能输雅诗兰黛上妆即定妆雅诗兰黛DW粉底是我的心头好持妆遮瑕磨皮粉底液测评', '活动有赠品比较划算,之前买过快用完了,一支可以分两次使用,早上抗氧化必备VC']
dopredict_absa_sentence(host=host, test_data=sentence_data)
# dopredict_absa_dem8(test_data=dem8_data,host=host)
```
#### File: johnson7788/mt-dnn/train.py
```python
import argparse
import json
import os
import random
from datetime import datetime
from pprint import pprint
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader, BatchSampler
from pretrained_models import *
from tensorboardX import SummaryWriter
#from torch.utils.tensorboard import SummaryWriter
from experiments.exp_def import TaskDefs
from mt_dnn.inference import eval_model, extract_encoding
from data_utils.log_wrapper import create_logger
from data_utils.task_def import EncoderModelType
from data_utils.utils import set_environment
from mt_dnn.batcher import SingleTaskDataset, MultiTaskDataset, Collater, MultiTaskBatchSampler, DistMultiTaskBatchSampler, DistSingleTaskBatchSampler
from mt_dnn.batcher import DistTaskDataset
from mt_dnn.model import MTDNNModel
def model_config(parser):
parser.add_argument('--update_bert_opt', default=0, type=int, help='是否更新固定预训练的bert模型参数,大于0表示固定')
parser.add_argument('--multi_gpu_on', action='store_true',help='默认False,是否使用多GPU')
parser.add_argument('--mem_cum_type', type=str, default='simple',
help='bilinear/simple/default')
parser.add_argument('--answer_num_turn', type=int, default=5,help='论文中的超参数K,K步推理')
parser.add_argument('--answer_mem_drop_p', type=float, default=0.1)
parser.add_argument('--answer_att_hidden_size', type=int, default=128)
parser.add_argument('--answer_att_type', type=str, default='bilinear', help='bilinear/simple/default')
parser.add_argument('--answer_rnn_type', type=str, default='gru', help='SAN逐步推理模块使用的结构是,rnn/gru/lstm')
parser.add_argument('--answer_sum_att_type', type=str, default='bilinear', help='bilinear/simple/default')
parser.add_argument('--answer_merge_opt', type=int, default=1)
parser.add_argument('--answer_mem_type', type=int, default=1)
parser.add_argument('--max_answer_len', type=int, default=10)
parser.add_argument('--answer_dropout_p', type=float, default=0.1)
parser.add_argument('--answer_weight_norm_on', action='store_true')
parser.add_argument('--dump_state_on', action='store_true')
parser.add_argument('--answer_opt', type=int, default=1, help='可选0,1,代表是否使用SANClassifier分类头还是普通的线性分类头,1表示使用SANClassifier, 0是普通线性映射')
parser.add_argument('--pooler_actf', type=str, default='tanh',
help='tanh/relu/gelu, 构建输出头的时的激活函数的选择')
parser.add_argument('--mtl_opt', type=int, default=0)
parser.add_argument('--ratio', type=float, default=0)
parser.add_argument('--mix_opt', type=int, default=0)
parser.add_argument('--max_seq_len', type=int, default=512)
parser.add_argument('--init_ratio', type=float, default=1)
parser.add_argument('--encoder_type', type=int, default=EncoderModelType.BERT)
parser.add_argument('--num_hidden_layers', type=int, default=-1, help='-1表示不修改模型的隐藏层参数,使用默认值,否则修改')
# BERT pre-training
parser.add_argument('--bert_model_type', type=str, default='bert-base-uncased',help='使用的预训练模型')
parser.add_argument('--do_lower_case', action='store_true',help='是否小写')
parser.add_argument('--masked_lm_prob', type=float, default=0.15)
parser.add_argument('--short_seq_prob', type=float, default=0.2)
parser.add_argument('--max_predictions_per_seq', type=int, default=128)
# bin samples
parser.add_argument('--bin_on', action='store_true')
parser.add_argument('--bin_size', type=int, default=64)
parser.add_argument('--bin_grow_ratio', type=int, default=0.5)
# dist training
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--world_size", type=int, default=1, help="For distributed training: world size")
parser.add_argument("--master_addr", type=str, default="localhost")
parser.add_argument("--master_port", type=str, default="6600")
parser.add_argument("--backend", type=str, default="nccl")
return parser
def data_config(parser):
parser.add_argument('--log_file', default='mt-dnn-train.log', help='path for log file.')
parser.add_argument('--tensorboard', action='store_true')
parser.add_argument('--tensorboard_logdir', default='tensorboard_logdir')
parser.add_argument("--init_checkpoint", default='mt_dnn_models/bert_model_base_uncased.pt', type=str, help='使用哪个模型初始模型参数,请注意,选择正确的中英文模型')
parser.add_argument('--data_dir', default='data/canonical_data/bert_uncased_lower',help='tokenize后的数据的地址')
parser.add_argument('--data_sort_on', action='store_true')
parser.add_argument('--name', default='farmer')
parser.add_argument('--task_def', type=str, default="experiments/glue/glue_task_def.yml",help="使用的task任务定义的文件,默认是glue的task进行训练")
parser.add_argument('--train_datasets', default='mnli',help='训练的多个任务的数据集,用逗号,分隔,如果多个数据集存在')
parser.add_argument('--test_datasets', default='mnli_matched,mnli_mismatched',help='测试的多个任务的数据集,用逗号,分隔,如果多个数据集存在,根据任务名前缀自动匹配,例如mnli的前半部分mnli_')
parser.add_argument('--glue_format_on', action='store_true')
parser.add_argument('--mkd-opt', type=int, default=0,
help=">0表示开启知识蒸馏, requires 'softlabel' column in input data")
parser.add_argument('--do_padding', action='store_true')
return parser
def train_config(parser):
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available(),
help='是否使用GPU')
parser.add_argument('--log_per_updates', type=int, default=500)
parser.add_argument('--save_per_updates', type=int, default=10000,help='结合save_per_updates_on一起使用,表示每多少step,进行模型评估和保存')
parser.add_argument('--save_per_updates_on', action='store_true',help='每一步都保存模型,保存频繁,每步都评估 ')
parser.add_argument('--epochs', type=int, default=5)
parser.add_argument('--batch_size', type=int, default=8, help='训练的batch_size')
parser.add_argument('--batch_size_eval', type=int, default=8)
parser.add_argument('--optimizer', default='adamax',
help='supported optimizer: adamax, sgd, adadelta, adam, 使用的优化器')
parser.add_argument('--grad_clipping', type=float, default=0)
parser.add_argument('--global_grad_clipping', type=float, default=1.0)
parser.add_argument('--weight_decay', type=float, default=0)
parser.add_argument('--learning_rate', type=float, default=5e-5)
parser.add_argument('--momentum', type=float, default=0)
parser.add_argument('--warmup', type=float, default=0.1)
parser.add_argument('--warmup_schedule', type=str, default='warmup_linear')
parser.add_argument('--adam_eps', type=float, default=1e-6)
parser.add_argument('--vb_dropout', action='store_false')
parser.add_argument('--dropout_p', type=float, default=0.1,help='构建输出头时Pooler的dropout设置')
parser.add_argument('--dropout_w', type=float, default=0.000)
parser.add_argument('--bert_dropout_p', type=float, default=0.1)
# loading
parser.add_argument("--model_ckpt", default='checkpoints/model_0.pt', type=str, help='继续训练模型时的已存在模型')
parser.add_argument("--resume", action='store_true',help='继续训练模型,结合参数--model_ckpt一起使用')
# scheduler
parser.add_argument('--have_lr_scheduler', dest='have_lr_scheduler', action='store_false')
parser.add_argument('--multi_step_lr', type=str, default='10,20,30')
#parser.add_argument('--feature_based_on', action='store_true')
parser.add_argument('--lr_gamma', type=float, default=0.5)
parser.add_argument('--scheduler_type', type=str, default='ms', help='ms/rop/exp')
parser.add_argument('--output_dir', default='checkpoint')
parser.add_argument('--seed', type=int, default=2018,
help='random seed for data shuffling, embedding init, etc.')
parser.add_argument('--grad_accumulation_step', type=int, default=1)
#fp 16
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
# adv training
parser.add_argument('--adv_train', action='store_true')
# the current release only includes smart perturbation
parser.add_argument('--adv_opt', default=0, type=int)
parser.add_argument('--adv_norm_level', default=0, type=int)
parser.add_argument('--adv_p_norm', default='inf', type=str)
parser.add_argument('--adv_alpha', default=1, type=float)
parser.add_argument('--adv_k', default=1, type=int)
parser.add_argument('--adv_step_size', default=1e-5, type=float)
parser.add_argument('--adv_noise_var', default=1e-5, type=float)
parser.add_argument('--adv_epsilon', default=1e-6, type=float)
parser.add_argument('--encode_mode', action='store_true', help='只把测试数据用模型编码一下,然后保存到checkpoint目录,没啥用')
parser.add_argument('--debug', action='store_true', help="print debug info")
return parser
# 各种参数
parser = argparse.ArgumentParser()
parser = data_config(parser)
parser = model_config(parser)
parser = train_config(parser)
args = parser.parse_args()
output_dir = args.output_dir
data_dir = args.data_dir
args.train_datasets = args.train_datasets.split(',')
args.test_datasets = args.test_datasets.split(',')
os.makedirs(output_dir, exist_ok=True)
output_dir = os.path.abspath(output_dir)
set_environment(args.seed, args.cuda)
log_path = args.log_file
logger = create_logger(__name__, to_disk=True, log_file=log_path)
task_defs = TaskDefs(args.task_def)
encoder_type = args.encoder_type
def dump(path, data):
with open(path, 'w') as f:
json.dump(data, f)
def evaluation(model, datasets, data_list, task_defs, output_dir='checkpoints', epoch=0, n_updates=-1, with_label=False, tensorboard=None, glue_format_on=False, test_on=False, device=None, logger=None):
# eval on rank 1
print_message(logger, "开始评估")
test_prefix = "Test" if test_on else "Dev"
if n_updates > 0:
updates_str = "updates"
else:
updates_str = "epoch"
updates = model.updates if n_updates > 0 else epoch
for idx, dataset in enumerate(datasets):
prefix = dataset.split('_')[0]
task_def = task_defs.get_task_def(prefix)
label_dict = task_def.label_vocab
test_data = data_list[idx]
if test_data is not None:
with torch.no_grad():
test_metrics, test_predictions, test_scores, test_golds, test_ids= eval_model(model,
test_data,
metric_meta=task_def.metric_meta,
device=device,
with_label=with_label,
label_mapper=label_dict,
task_type=task_def.task_type)
for key, val in test_metrics.items():
if tensorboard:
tensorboard.add_scalar('{}/{}/{}'.format(test_prefix, dataset, key), val, global_step=updates)
if isinstance(val, str):
print_message(logger, '任务是 {0} -- {1} {2} -- {3} {4}: {5}'.format(dataset, updates_str, updates, test_prefix, key, val), level=1)
elif isinstance(val, float):
print_message(logger, '任务是 {0} -- {1} {2} -- {3} {4}: {5:.3f}'.format(dataset, updates_str, updates, test_prefix, key, val), level=1)
else:
test_metrics[key] = str(val)
print_message(logger, 'Task {0} -- {1} {2} -- {3} {4}: \n{5}'.format(dataset, updates_str, updates, test_prefix, key, val), level=1)
if args.local_rank in [-1, 0]:
score_file = os.path.join(output_dir, '{}_{}_scores_{}_{}.json'.format(dataset, test_prefix.lower(), updates_str, updates))
results = {'metrics': test_metrics, 'predictions': test_predictions, 'uids': test_ids, 'scores': test_scores}
dump(score_file, results)
if glue_format_on:
from experiments.glue.glue_utils import submit
official_score_file = os.path.join(output_dir, '{}_{}_scores_{}.tsv'.format(dataset, test_prefix.lower(), updates_str))
submit(official_score_file, results, label_dict)
def initialize_distributed(args):
"""Initialize torch.distributed."""
args.rank = int(os.getenv('RANK', '0'))
args.world_size = int(os.getenv("WORLD_SIZE", '1'))
if os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'):
# We are using (OpenMPI) mpirun for launching distributed data parallel processes
local_rank = int(os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'))
local_size = int(os.getenv('OMPI_COMM_WORLD_LOCAL_SIZE'))
args.local_rank = local_rank
args.rank = nodeid * local_size + local_rank
args.world_size = num_nodes * local_size
#args.batch_size = args.batch_size * args.world_size
device = args.rank % torch.cuda.device_count()
if args.local_rank is not None:
device = args.local_rank
torch.cuda.set_device(device)
device = torch.device('cuda', args.local_rank)
# Call the init process
init_method = 'tcp://'
master_ip = os.getenv('MASTER_ADDR', 'localhost')
master_port = os.getenv('MASTER_PORT', '6600')
init_method += master_ip + ':' + master_port
torch.distributed.init_process_group(
backend=args.backend,
world_size=args.world_size, rank=args.rank,
init_method=init_method)
return device
def print_message(logger, message, level=0):
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
do_logging = True
else:
do_logging = False
else:
do_logging = True
if do_logging:
if level == 1:
logger.warning(message)
else:
logger.info(message)
def main():
# set up dist
if args.local_rank > -1:
device = initialize_distributed(args)
elif torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
# opt还是args,只不过是字典格式
opt = vars(args)
# update data dir
opt['data_dir'] = data_dir
batch_size = args.batch_size
print_message(logger, '开始MT-DNN训练')
#return
tasks = {}
task_def_list = []
dropout_list = []
# 不是分布式,那么就打印
printable = args.local_rank in [-1, 0]
train_datasets = []
# 初始化每个任务的数据集
for dataset in args.train_datasets:
prefix = dataset.split('_')[0]
if prefix in tasks:
continue
task_id = len(tasks)
tasks[prefix] = task_id
#训练的基本数据信息,例如用哪个损失,任务类型,任务标签等
task_def = task_defs.get_task_def(prefix)
task_def_list.append(task_def)
assert len(task_def.label_vocab.ind2tok) == task_def.n_class, "配置中的类别数量和标签数量不相等,请检查"
train_path = os.path.join(data_dir, '{}_train.json'.format(dataset))
print_message(logger, '加载训练任务 {},训练任务的顺序id是: {}'.format(train_path, task_id))
# 训练的数据的json文件, train_path = 'data_my/canonical_data/bert-base-chinese/absa_train.json'
train_data_set = SingleTaskDataset(path=train_path, is_train=True, maxlen=args.max_seq_len, task_id=task_id, task_def=task_def, printable=printable)
train_datasets.append(train_data_set)
#Collater函数
train_collater = Collater(dropout_w=args.dropout_w, encoder_type=encoder_type, soft_label=args.mkd_opt > 0, max_seq_len=args.max_seq_len, do_padding=args.do_padding)
#把数据放到一起
multi_task_train_dataset = MultiTaskDataset(train_datasets)
if args.local_rank != -1:
multi_task_batch_sampler = DistMultiTaskBatchSampler(train_datasets, args.batch_size, args.mix_opt, args.ratio, rank=args.local_rank, world_size=args.world_size)
else:
# 一个batch的数据集采用器
multi_task_batch_sampler = MultiTaskBatchSampler(train_datasets, args.batch_size, args.mix_opt, args.ratio, bin_on=args.bin_on, bin_size=args.bin_size, bin_grow_ratio=args.bin_grow_ratio)
# Dataloader格式
multi_task_train_data = DataLoader(multi_task_train_dataset, batch_sampler=multi_task_batch_sampler, collate_fn=train_collater.collate_fn, pin_memory=args.cuda)
# len(task_def_list),里面包含几个task,长度就是几
opt['task_def_list'] = task_def_list
# 测试数据,同理
dev_data_list = []
test_data_list = []
test_collater = Collater(is_train=False, encoder_type=encoder_type, max_seq_len=args.max_seq_len, do_padding=args.do_padding)
for dataset in args.test_datasets:
prefix = dataset.split('_')[0]
task_def = task_defs.get_task_def(prefix)
task_id = tasks[prefix]
task_type = task_def.task_type
data_type = task_def.data_type
dev_path = os.path.join(data_dir, '{}_dev.json'.format(dataset))
dev_data = None
if os.path.exists(dev_path):
dev_data_set = SingleTaskDataset(dev_path, False, maxlen=args.max_seq_len, task_id=task_id, task_def=task_def, printable=printable)
if args.local_rank != -1:
dev_data_set = DistTaskDataset(dev_data_set, task_id)
single_task_batch_sampler = DistSingleTaskBatchSampler(dev_data_set, args.batch_size_eval, rank=args.local_rank, world_size=args.world_size)
dev_data = DataLoader(dev_data_set, batch_sampler=single_task_batch_sampler, collate_fn=test_collater.collate_fn, pin_memory=args.cuda)
else:
dev_data = DataLoader(dev_data_set, batch_size=args.batch_size_eval, collate_fn=test_collater.collate_fn, pin_memory=args.cuda)
dev_data_list.append(dev_data)
test_path = os.path.join(data_dir, '{}_test.json'.format(dataset))
test_data = None
if os.path.exists(test_path):
test_data_set = SingleTaskDataset(test_path, False, maxlen=args.max_seq_len, task_id=task_id, task_def=task_def, printable=printable)
if args.local_rank != -1:
test_data_set = DistTaskDataset(test_data_set, task_id)
single_task_batch_sampler = DistSingleTaskBatchSampler(test_data_set, args.batch_size_eval, rank=args.local_rank, world_size=args.world_size)
test_data = DataLoader(test_data_set, batch_sampler=single_task_batch_sampler, collate_fn=test_collater.collate_fn, pin_memory=args.cuda)
else:
test_data = DataLoader(test_data_set, batch_size=args.batch_size_eval, collate_fn=test_collater.collate_fn, pin_memory=args.cuda)
test_data_list.append(test_data)
# 打印默认参数
print_message(logger, '#' * 20)
print_message(logger, opt)
print_message(logger, '#' * 20)
# 需要除以grad accumulation,来计算一共需要多少个batch step
num_all_batches = args.epochs * len(multi_task_train_data) // args.grad_accumulation_step
print_message(logger, '############# Gradient Accumulation 信息 #############')
print_message(logger, '原有训练的step数是: {}'.format(args.epochs * len(multi_task_train_data)))
print_message(logger, '梯度度累积参数 grad_accumulation 为: {}'.format(args.grad_accumulation_step))
print_message(logger, '经过梯度累积后的训练step数是: {}'.format(num_all_batches))
print_message(logger, '############# Gradient Accumulation 信息 #############')
#使用哪个模型初始化参数
init_model = args.init_checkpoint
state_dict = None
# 加载模型参数,可选bert和roberta
if os.path.exists(init_model):
if encoder_type == EncoderModelType.BERT or \
encoder_type == EncoderModelType.DEBERTA or \
encoder_type == EncoderModelType.ELECTRA:
state_dict = torch.load(init_model, map_location=device)
config = state_dict['config']
elif encoder_type == EncoderModelType.ROBERTA or encoder_type == EncoderModelType.XLM:
model_path = '{}/model.pt'.format(init_model)
state_dict = torch.load(model_path, map_location=device)
arch = state_dict['args'].arch
arch = arch.replace('_', '-')
if encoder_type == EncoderModelType.XLM:
arch = "xlm-{}".format(arch)
# convert model arch
from data_utils.roberta_utils import update_roberta_keys
from data_utils.roberta_utils import patch_name_dict
state = update_roberta_keys(state_dict['model'], nlayer=state_dict['args'].encoder_layers)
state = patch_name_dict(state)
literal_encoder_type = EncoderModelType(opt['encoder_type']).name.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[literal_encoder_type]
config = config_class.from_pretrained(arch).to_dict()
state_dict = {'state': state}
else:
if opt['encoder_type'] not in EncoderModelType._value2member_map_:
raise ValueError("encoder_type is out of pre-defined types")
literal_encoder_type = EncoderModelType(opt['encoder_type']).name.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[literal_encoder_type]
config = config_class.from_pretrained(init_model).to_dict()
# config是预训练模型的参数,设置一下,dropout默认0.1
config['attention_probs_dropout_prob'] = args.bert_dropout_p
config['hidden_dropout_prob'] = args.bert_dropout_p
# 是否开启多GPU
config['multi_gpu_on'] = opt["multi_gpu_on"]
# 如果大于0,说明模型的修改隐藏层参数
if args.num_hidden_layers > 0:
config['num_hidden_layers'] = args.num_hidden_layers
#更新下opt,用于保存所有参数
opt.update(config)
#MTDNN模型初始化
model = MTDNNModel(opt, device=device, state_dict=state_dict, num_train_step=num_all_batches)
# 是否是继续训练模型
if args.resume and args.model_ckpt:
print_message(logger, '选择了继续训练模型,并且模型{}也存在'.format(args.model_ckpt))
model.load(args.model_ckpt)
#### model meta str
headline = '############# 打印 MT-DNN 模型的结果信息 #############'
### print network
print_message(logger, '\n{}\n{}\n'.format(headline, model.network))
#保存配置信息
config_file = os.path.join(output_dir, 'config.json')
with open(config_file, 'w', encoding='utf-8') as writer:
writer.write('{}\n'.format(json.dumps(opt)))
writer.write('\n{}\n{}\n'.format(headline, model.network))
print_message(logger, f"保存参数信息到{config_file}中")
print_message(logger, "总的参数量是: {}".format(model.total_param))
# tensorboard, 配置tensorboard
tensorboard = None
if args.tensorboard:
args.tensorboard_logdir = os.path.join(args.output_dir, args.tensorboard_logdir)
tensorboard = SummaryWriter(log_dir=args.tensorboard_logdir)
#只编码测试数据并保存
if args.encode_mode:
for idx, dataset in enumerate(args.test_datasets):
prefix = dataset.split('_')[0]
test_data = test_data_list[idx]
with torch.no_grad():
encoding = extract_encoding(model, test_data, use_cuda=args.cuda)
torch.save(encoding, os.path.join(output_dir, '{}_encoding.pt'.format(dataset)))
return
# 开始训练
for epoch in range(0, args.epochs):
print_message(logger, '开始训练Epoch: {}'.format(epoch), level=1)
start = datetime.now()
# batch_meta, 一个批次数据的元信息,就是基本信息, batch_data是一个批次的数据, colllater函数已经在enumerate时调用了,batch_data是mt_dnn下的batcher.py函数collate_fn返回的结果
for i, (batch_meta, batch_data) in enumerate(multi_task_train_data):
# batch_data包含的数据 token_ids, type_ids, masks, premise_masks(前提mask), hypothesis_masks(假设mask),label, 前提和假设的mask只有在问答时有用,decoder_opt ==1 的时候是问答
# 使用Collater的patch_data函数对一个批次的数据进一步处理,例如放到GPU上
batch_meta, batch_data = Collater.patch_data(device, batch_meta, batch_data)
task_id = batch_meta['task_id']
#模型训练
model.update(batch_meta, batch_data)
# 打印一些信息
if (model.updates) % (args.log_per_updates) == 0 or model.updates == 1:
ramaining_time = str((datetime.now() - start) / (i + 1) * (len(multi_task_train_data) - i - 1)).split('.')[0]
if args.adv_train and args.debug:
debug_info = ' adv loss[%.5f] emb val[%.8f] eff_perturb[%.8f] ' % (
model.adv_loss.avg,
model.emb_val.avg,
model.eff_perturb.avg
)
else:
debug_info = ' '
print_message(logger, '任务[{0:2}],训练了第[{1:6}]步, 训练损失为:[{2:.5f}]{3},预计还需时间:[{4}]'.format(task_id,
model.updates,
model.train_loss.avg,
debug_info,
ramaining_time))
if args.tensorboard:
tensorboard.add_scalar('train/loss', model.train_loss.avg, global_step=model.updates)
# 评估和保存模型
if args.save_per_updates_on and ((model.local_updates) % (args.save_per_updates * args.grad_accumulation_step) == 0) and args.local_rank in [-1, 0]:
model_file = os.path.join(output_dir, 'model_{}_{}.pt'.format(epoch, model.updates))
evaluation(model, args.test_datasets, dev_data_list, task_defs, output_dir, epoch, n_updates=args.save_per_updates, with_label=True, tensorboard=tensorboard, glue_format_on=args.glue_format_on, test_on=False, device=device, logger=logger)
evaluation(model, args.test_datasets, test_data_list, task_defs, output_dir, epoch, n_updates=args.save_per_updates, with_label=False, tensorboard=tensorboard, glue_format_on=args.glue_format_on, test_on=True, device=device, logger=logger)
print_message(logger, '每步都保存模型: {}'.format(model_file))
model.save(model_file)
evaluation(model, args.test_datasets, dev_data_list, task_defs, output_dir, epoch, with_label=True, tensorboard=tensorboard, glue_format_on=args.glue_format_on, test_on=False, device=device, logger=logger)
evaluation(model, args.test_datasets, test_data_list, task_defs, output_dir, epoch, with_label=False, tensorboard=tensorboard, glue_format_on=args.glue_format_on, test_on=True, device=device, logger=logger)
print_message(logger, '[new test scores at {} saved.]'.format(epoch))
if args.local_rank in [-1, 0]:
model_file = os.path.join(output_dir, 'model_{}.pt'.format(epoch))
print_message(logger, 'epoch结束保存模型: {}'.format(model_file))
model.save(model_file)
# 保存最后的模型
if args.local_rank in [-1, 0]:
model_file = os.path.join(output_dir, 'model_final.pt')
print_message(logger, '最终保存模型: {}'.format(model_file))
model.save(model_file)
if args.tensorboard:
tensorboard.close()
if __name__ == '__main__':
main()
``` |
{
"source": "johnson7788/NLP",
"score": 3
} |
#### File: NLP/Res2Net/config.py
```python
import torch
class Config(object):
"""Base configuration class."""
#指定包含训练集,验证集合测试集的文件夹
data_directory = "data/zhengjian/"
#模型的保存位置
save_path='model/'
#模型保存名称
save_name='checkpoint.pth'
#使用哪个模型类型,可选 ['densenet161', 'resnet18', 'vgg16', 'res2next50']
arch = 'res2next50'
# classifier的 隐藏层数, 可以任意个[1024,512,256],每个是一个FC
hidden_units = [256]
# 评估间隔, 训练多少个epoch,进行一次评估
eval_interval = 100
# 是否绘图还是直接返回结果
plot = False
# 绘图显示的预测个数, 需要是偶数个
plot_image = 6
#是否使用gpu
cuda = False
#device name ,如果使用cpu,那么就是cpu,如果使用gpu, 可能是第几块显卡cuda:0
device_name = 'cpu'
#训练的epoch
epochs = 2
batch_size = 64
#学习率
learning_rate = 0.001
#学习率动量
learning_momentum = 0.9
#学习率衰减稀疏
weight_decay = 0.0001
dropout = 0.5
#生成的词嵌入的维度
embed_dim = 128
#卷积核的数量
kernel_num = 100
#卷积核的尺寸
kernel_sizes = "3,4,5"
#训练多少个epoch时,模型保存
save_interval = 2
#初始化,是否使用gpu
def __init__(self):
if self.cuda:
self.cuda = torch.cuda.is_available()
self.device = torch.device("cuda:0" if self.cuda else "cpu")
def dump(self):
"""打印配置信息"""
print("模型配置如下:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("\t{:30} = {}".format(a, getattr(self, a)))
print()
```
#### File: text_classsification/utils/data_helpers.py
```python
import numpy as np
import re
# 清洗字符串,字符切分
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^\u4e00-\u9fa5A-Za-z0-9(),.!?,。?!、“”\'\`]", " ", string) # 考虑到中文
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def load_data_and_labels(positive_data_file, negative_data_file):
"""
基于给定的正例和负例文件路径加载数据
:param positive_data_file:
:param negative_data_file:
:return:
"""
# 1. 加载所有数据组成list列表
positive = open(positive_data_file, 'rb').read().decode('utf-8')
negative = open(negative_data_file, 'rb').read().decode('utf-8')
# 2.数据的划分(转换成一个一个样本)
positive = positive.split("\n")
negative = negative.split("\n")
# 3. 数据简单处理
positive = [clean_str(s.strip()) for s in positive]
negative = [clean_str(s.strip()) for s in negative]
positive = [s for s in positive if len(s) > 0]
negative = [s for s in negative if len(s) > 0]
# 4. 数据合并得到x
texts = positive + negative
# 5. 得到对应的id
labels = [1] * len(positive) + [0] * len(negative)
# 6. 结果返回
return np.asarray(texts), np.asarray(labels)
def batch_iter(data, batch_size, num_epochs, shuffle=True):
"""
基于给定的data数据获取批次数据
:param data:
:param batch_size:
:param num_epochs:
:param shuffle:
:return:
"""
data = np.array(data)
data_size = len(data)
# 一个epoch里面有多少个bachsize
num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
# 传给permutation一个矩阵,它会返回一个洗牌后的矩阵副本
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
if __name__ == '__main__':
texts, labels = load_data_and_labels()
# from utils.vocabulary_utils import VocabularyProcessorUtil, split_with_word
#
# _, vocabulary = VocabularyProcessorUtil.load_word2vec_embedding("../model/w2v.bin")
# VocabularyProcessorUtil.building_model(documents=texts, save_path='../model/vocab.pkl', max_document_length=512,
# vocabulary=vocabulary,
# split_fn=split_with_word)
# model = VocabularyProcessorUtil.load_model('../model/vocab.pkl')
```
#### File: text_classsification/utils/vocabulary_utils.py
```python
import os
import itertools
import jieba
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn import preprocessing
from gensim import utils
from gensim.models import word2vec
def default_split_fn(documents):
return split_with_char(documents)
def split_with_char(documents):
return [list(sentence) for sentence in documents]
def split_with_word(documents):
return [list(filter(lambda word: len(word) > 0, jieba.cut(sentence.strip()))) for sentence in documents]
class CategoricalVocabulary(preprocessing.CategoricalVocabulary):
def __init__(self, unknown_token="<UNK>"):
super(CategoricalVocabulary, self).__init__(unknown_token, False)
# 特殊值(填充0,未知1)
self.padding_token = "<PAD>"
self._mapping[self.padding_token] = 0
self._mapping[self._unknown_token] = 1
# 添加一个属性
self.vocab_size = 2
def get(self, category):
if category not in self._mapping:
return 1
return self._mapping[category]
def set(self, category, index):
self._mapping[category] = index
self.vocab_size += 1
class PathLineSentences(object):
"""Like :class:`~gensim.models.word2vec.LineSentence`, but process all files in a directory
in alphabetical order by filename.
The directory must only contain files that can be read by :class:`gensim.models.word2vec.LineSentence`:
.bz2, .gz, and text files. Any file not ending with .bz2 or .gz is assumed to be a text file.
The format of files (either text, or compressed text files) in the path is one sentence = one line,
with words already preprocessed and separated by whitespace.
Warnings
--------
Does **not recurse** into subdirectories.
"""
def __init__(self, source, max_sentence_length=word2vec.MAX_WORDS_IN_BATCH, limit=None, split_fn=None):
self.source = source
self.max_sentence_length = max_sentence_length
self.limit = limit
if split_fn is None:
self.split_fn = default_split_fn
else:
self.split_fn = split_fn
if os.path.isfile(self.source):
self.input_files = [self.source] # force code compatibility with list of files
elif os.path.isdir(self.source):
self.source = os.path.join(self.source, '') # ensures os-specific slash at end of path
self.input_files = os.listdir(self.source)
self.input_files = [self.source + filename for filename in self.input_files] # make full paths
self.input_files.sort()
else:
raise ValueError('input is neither a file nor a path')
def __iter__(self):
"""iterate through the files"""
for file_name in self.input_files:
with utils.open(file_name, 'rb') as fin:
for line in itertools.islice(fin, self.limit):
line = self.split_fn([utils.to_unicode(line).strip()])[0]
i = 0
while i < len(line):
yield line[i:i + self.max_sentence_length]
i += self.max_sentence_length
class VocabularyProcessorUtil(object):
@staticmethod
def building_model(documents, save_path, max_document_length=512, vocabulary=None, split_fn=default_split_fn):
"""
基于传入的文档数据构建字典相关信息
:param documents: 进行模型训练的时候的文本数据
:param save_path: 模型持久化的路径
:param vocabulary: 词汇映射表
:param split_fn: 将文本转换为单词过程中的函数, 默认是将每个字当作一个单词
:param max_document_length: 将文本单词id转换的时候,最长文本允许的单词数目
:return:
"""
tf.logging.info("开始构建词汇转换模型.....")
model = preprocessing.VocabularyProcessor(max_document_length=max_document_length,
vocabulary=vocabulary, tokenizer_fn=split_fn)
model.fit(raw_documents=documents)
tf.logging.info("词汇转换模型构建完成,开始模型保存操作!!!")
model.save(save_path)
tf.logging.info("词汇转换模型保存完成,保存位置为:{}".format(save_path))
@staticmethod
def load_model(save_path) -> preprocessing.VocabularyProcessor:
"""
基于给定的路径加载模型并返回
:param save_path:
:return:
"""
if os.path.exists(save_path):
tf.logging.info("从【{}】位置进行词汇转换模型的恢复!!!".format(save_path))
return preprocessing.VocabularyProcessor.restore(save_path)
else:
raise Exception("词汇转换模型不存在,请检查磁盘路径:{}".format(save_path))
@staticmethod
def build_word2vec_embedding(data_path, save_path, embedding_dimensions):
"""
基于data_path下的文件内容构建Word2Vec向量,并将向量保存到save_path这个路径中
:param data_path: 原始数据所在的文件夹路径
:param save_path: 训练好的数据保存路径
:param embedding_dimensions: 转换的Embedding向量大小
:return:
"""
# 0. 加载数据
sentences = PathLineSentences(source=data_path, split_fn=split_with_word)
# 1. 构建Word2Vec模型
model = word2vec.Word2Vec(sentences=sentences, size=embedding_dimensions,
window=9, min_count=2, iter=50)
# 3. 模型保存(以文本形式保存)
model.wv.save_word2vec_format(fname=save_path, binary=True)
@staticmethod
def load_word2vec_embedding(save_path):
"""
加载Word2Vec训练好的embedding转换矩阵
:param save_path: 数据存储的路径
:param binary: 是否是二进制存储
:return: embedding_table, vocabulary
"""
# 1. 加载数据
model = word2vec.Word2VecKeyedVectors.load_word2vec_format(save_path, binary=True)
# 2. 获取embedding_table
embedding_table = model.vectors
embedding_dimensions = np.shape(embedding_table)[1]
# 3. 获取单词和id之间的映射关系
vocabulary = CategoricalVocabulary()
vocab_size = vocabulary.vocab_size
for word in model.vocab:
vocabulary.set(word, model.vocab[word].index + vocab_size)
# 4. 在embedding_table前面加入特征字符所代表的含义
embedding_table = np.concatenate(
[
np.zeros(shape=(1, embedding_dimensions), dtype=embedding_table.dtype), # PAD对应的的特征值
np.random.normal(0, 0.01, size=(1, embedding_dimensions)), # UNK对应的特征值
embedding_table # 原始单词对应的特征值
],
axis=0
)
return embedding_table, vocabulary
if __name__ == '__main__':
VocabularyProcessorUtil.build_word2vec_embedding("../data", "../model/w2v2.bin", 128)
embedding_table, vob = VocabularyProcessorUtil.load_word2vec_embedding("../model/w2v.bin")
print(vob.vocab_size)
```
#### File: NLP/TextCNN/config.py
```python
import torch
class Config(object):
"""Base configuration class."""
#训练文件夹位置
train_dir = "data/train"
#评估文件夹位置
eval_dir = "data/eval"
#模型的保存位置
save_path='model/'
#是否使用gpu
cuda = True
#训练的epoch
epochs = 2
batch_size = 64
#学习率
learning_rate = 0.001
#学习率动量
learning_momentum = 0.9
#学习率衰减稀疏
weight_decay = 0.0001
dropout = 0.5
#生成的词嵌入的维度
embed_dim = 128
#卷积核的数量
kernel_num = 100
#卷积核的尺寸
kernel_sizes = "3,4,5"
#训练多少个epoch时,模型保存
save_interval = 2
#初始化,是否使用gpu
def __init__(self):
if self.cuda:
self.cuda = torch.cuda.is_available()
self.device = torch.device("cuda:0" if self.cuda else "cpu")
def dump(self):
"""打印配置信息"""
print("模型配置如下:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("\t{:30} = {}".format(a, getattr(self, a)))
print()
``` |
{
"source": "johnson7788/OpenNRE",
"score": 3
} |
#### File: OpenNRE/example/gen_chinese_data.py
```python
import os
import json
import re
import random
def gen_rel2id(train_dir, destination='/Users/admin/git/OpenNRE/benchmark/liter/liter_rel2id.json'):
"""
根据Chinese-Literature-NER-RE-Dataset的训练目录生成关系到id的映射
:param train_dir: *.ann和*.txt结尾的文件
:param destination: 输出的目标json文件
:return:
"""
relations = []
files = os.listdir(train_dir)
#过滤出标注的文件
files = [f for f in files if f.endswith('.ann')]
for file in files:
annfile = os.path.join(train_dir,file)
with open(annfile, 'r') as f:
for line in f:
if line.startswith('R'):
line = line.strip()
line_split = re.split('[\t ]', line)
relation = line_split[1]
if relation == 'Coreference':
print(f"文件{annfile},行 {line}是有问题的")
if relation not in relations:
print(f'加入关系: {relation}')
relations.append(relation)
desdir = os.path.dirname(destination)
if not os.path.exists(desdir):
os.makedirs(desdir)
assert len(relations) == 9, "关系必须是9个才对"
rel2id = {rel:idx for idx, rel in enumerate(relations)}
with open(destination, 'w', encoding='utf-8') as f:
json.dump(rel2id, f)
def gen_data(source_dir, des_dir, mini_data = False, truncate=-1):
"""
根据原始目录生成目标训练或测试等文件
:param source_dir: eg: /Users/admin/git/Chinese-Literature-NER-RE-Dataset/relation_extraction/Training
:param des_dir: eg: /Users/admin/git/OpenNRE/benchmark/liter
:return:
"""
#保存处理好的数据
data = []
files = os.listdir(source_dir)
# 过滤出标注的文件
ann_files = [f for f in files if f.endswith('.ann')]
text_files = [f for f in files if f.endswith('.txt')]
#转出成不带文件后缀的key和文件名为value的字典
ann_file_dict = {f.split('.')[0]:f for f in ann_files}
text_file_dict = {f.split('.')[0]: f for f in text_files}
for k, v in ann_file_dict.items():
if text_file_dict.get(k) is None:
print(f"文件{v} 不存在对应的txt文件,错误")
continue
#开始读取ann 文件
annfile = os.path.join(source_dir, v)
text_name = text_file_dict.get(k)
textfile = os.path.join(source_dir, text_name)
with open(textfile, 'r') as f:
text = ""
text_len = []
for line in f:
text_len.append(len(line))
if len(line) == 61:
#固定的行长度是61
line = line.strip()
text += line
# text = f.read()
#保存所有实体
entities = []
#保存所有关系
rels = []
with open(annfile, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('R'):
line_split = re.split('[\t ]', line)
assert len(line_split) == 4, f"关系{annfile}的行 {line}不为4项"
rels.append(line_split)
if line.startswith('T'):
line_split = re.split('[\t ]', line)
if len(line_split) == 7:
# 如果不为5,那么是有逗号隔开的,例如 T81 Metric 539 540;541 542 百 鸟
# 只需要T81 Metric 539 540 百
pos_stop = line_split[3].split(';')[0]
line_split = line_split[:3] + [pos_stop] + [line_split[5]]
elif len(line_split) == 5:
pass
else:
raise Exception(f"实体 {annfile} 的行 {line} 不为5项或者7项,有问题,请检查")
#把实体的索引,进行减法,因为每61个字符一行,我们去掉了一部分'\n',所以做减法
pos_start = int(line_split[2])
pos_stop = int(line_split[3])
if pos_start > 61:
pos_remind1 = pos_start // 61
pos_start = pos_start -pos_remind1
if pos_stop > 61:
pos_remind2 = pos_stop //61
pos_stop = pos_stop - pos_remind2
line_split = line_split[:2] + [pos_start, pos_stop] + [line_split[-1]]
entities.append(line_split)
#检查实体, 保存成实体id:实体的type,实体start_idx, 实体stop_idx,实体的值
ent_dict = {}
for entity in entities:
entity_id = entity[0]
if ent_dict.get(entity_id) is not None:
print(f"{annfile}: 实体id已经存在过了,冲突的id,请检查 {entity}")
ent_dict[entity_id] = entity[1:]
#开始分析所有关系
for rel in rels:
relation = rel[1]
arg1, h1_entityid = rel[2].split(':')
assert arg1 == 'Arg1', f"{rel}分隔的首个字符不是Arg1"
#实体1的id处理
h1_entity = ent_dict.get(h1_entityid)
if h1_entity is None:
print(f"关系{rel}中对应的实体id{h1_entityid}是不存在的,请检查")
h1_type,h1_pos_start, h1_pos_stop, h1_entity_value = h1_entity
h1_pos_start = int(h1_pos_start)
h1_pos_stop = int(h1_pos_stop)
arg2, h2_entityid = rel[3].split(':')
assert arg2 == 'Arg2', f"{rel}分隔的首个字符不是Arg2"
#实体2的id处理
h2_entity = ent_dict.get(h2_entityid)
if h2_entity is None:
print(f"关系{rel}中对应的实体id{h2_entityid}是不存在的,请检查")
h2_type, h2_pos_start, h2_pos_stop, h2_entity_value = h2_entity
h2_pos_start = int(h2_pos_start)
h2_pos_stop = int(h2_pos_stop)
# 检查关键字的位置是否匹配
def get_true_pos(text, value, pos1, pos2, rnum=16):
#从上下加8个字符获取真实的位置
index_true_text = text[pos1-rnum:pos2+rnum]
print(f"实体1: {value}位置不匹配, 上下的2个位置是: {index_true_text},尝试修复")
newpos1, newpos2 = pos1, pos2
if value in index_true_text:
sres = re.finditer(re.escape(value), text)
for sv in sres:
if sv.start() > pos1-rnum and sv.end() < pos2+rnum:
newpos1, newpos2 = sv.start(), sv.end()
break
else:
print("通过正则没有匹配到,请检查,用最后一个位置作为索引")
newpos1, newpos2 = sv.start(), sv.end()
else:
print("上下浮动了16个,仍然没有匹配,请检查")
sres = re.finditer(re.escape(value), text)
min_dist = 100
for sv in sres:
min_dist = min(min_dist, sv.start() - pos1, sv.end() - pos2)
if min_dist in [sv.start() - pos1, sv.end() - pos2]:
newpos1, newpos2 = sv.start(), sv.end()
if text[newpos1:newpos2] != value:
assert text[newpos1:newpos2] == value, "仍然是匹配错误的位置,请检查"
return newpos1, newpos2
# 验证下文本中的实体在文档中的位置时正确的
if text[h1_pos_start:h1_pos_stop] != h1_entity_value:
h1_pos_start, h1_pos_stop = get_true_pos(text=text,value=h1_entity_value, pos1=h1_pos_start, pos2=h1_pos_stop)
if text[h2_pos_start:h2_pos_stop] != h2_entity_value:
h2_pos_start, h2_pos_stop = get_true_pos(text=text,value=h2_entity_value, pos1=h2_pos_start, pos2=h2_pos_stop)
if truncate != -1:
if abs(h1_pos_start - h2_pos_stop) > truncate:
print(f'2个实体间的距离很大,超过了{truncate}长度')
else:
#开始截断数据, 只保留最大长度
add_length = truncate - abs(h1_pos_start - h2_pos_stop)
added = int(add_length/2)
if h1_pos_start < h2_pos_stop:
truncate_start = h1_pos_start - added
truncate_end = h2_pos_stop + added
else:
truncate_start = h2_pos_stop - added
truncate_end = h1_pos_start + added
if truncate_start <0:
truncate_start = 0
truncate_text = text[truncate_start:truncate_end]
else:
truncate_text = text
# 开始整理成一条数据
one_data = {
'text': truncate_text,
'h': {
'name': h1_entity_value,
'id': h1_entityid,
'pos': [h1_pos_start, h1_pos_stop]
},
't': {
'name': h2_entity_value,
'id': h2_entityid,
'pos': [h2_pos_start, h2_pos_stop]
},
'relation': relation
}
data.append(one_data)
train_file = os.path.join(des_dir, 'liter_train.txt')
dev_file = os.path.join(des_dir, 'liter_test.txt')
test_file = os.path.join(des_dir, 'liter_val.txt')
print(f"一共处理了{len(ann_files)}个文件,生成{len(data)}条数据")
random.shuffle(data)
train_num = int(len(data) * 0.8)
dev_num = int(len(data) * 0.1)
train_data = data[:train_num]
dev_data = data[train_num:train_num+dev_num]
test_data = data[train_num+dev_num:]
if mini_data:
#选择前500条样本测试
train_data = train_data[:500]
dev_data = dev_data[:100]
test_data = test_data[:100]
with open(train_file, 'w', encoding='utf-8') as f:
for d in train_data:
f.write(json.dumps(d) + '\n')
with open(dev_file, 'w', encoding='utf-8') as f:
for d in dev_data:
f.write(json.dumps(d)+ '\n')
with open(test_file, 'w', encoding='utf-8') as f:
for d in test_data:
f.write(json.dumps(d)+ '\n')
print(f"训练集数量{len(train_data)}, 测试集数量{len(test_data)},开发集数量{len(dev_data)}")
if __name__ == '__main__':
# gen_rel2id(train_dir='/Users/admin/git/Chinese-Literature-NER-RE-Dataset/relation_extraction/Training')
gen_data(source_dir='/Users/admin/git/Chinese-Literature-NER-RE-Dataset/relation_extraction/Training', des_dir='/Users/admin/git/OpenNRE/benchmark/liter', mini_data=False, truncate=196)
```
#### File: OpenNRE/example/infer.py
```python
import opennre
def infer_wiki80_cnn_softmax():
model = opennre.get_model('wiki80_cnn_softmax')
result = model.infer({
'text': 'He was the son of <NAME> mac Má<NAME>, and grandson of the high king Áed Uaridnach (died 612).',
'h': {'pos': (18, 46)}, 't': {'pos': (78, 91)}})
print(result)
def infer_wiki80_bert_softmax():
"""
有一些错误
:return:
"""
model = opennre.get_model('wiki80_bert_softmax')
result = model.infer({
'text': 'He was the son of <NAME> mac Má<NAME>, and grandson of the high king Áed Uaridnach (died 612).',
'h': {'pos': (18, 46)}, 't': {'pos': (78, 91)}})
print(result)
def infer_wiki80_bertentity_softmax():
model = opennre.get_model('wiki80_bertentity_softmax')
result = model.infer({
'text': 'He was the son of <NAME> mac Má<NAME>, and grandson of the high king Áed Uaridnach (died 612).',
'h': {'pos': (18, 46)}, 't': {'pos': (78, 91)}})
print(result)
def infer_tacred_bertentity_softmax():
model = opennre.get_model('tacred_bertentity_softmax')
result = model.infer({
'text': 'He was the son of <NAME> mac <NAME>, and grandson of the high king Áed Uaridnach (died 612).',
'h': {'pos': (18, 46)}, 't': {'pos': (78, 91)}})
print(result)
def infer_tacred_bert_softmax():
model = opennre.get_model('tacred_bert_softmax')
result = model.infer({
'text': 'He was the son of <NAME> mac <NAME>, and grandson of the high king Áed Uaridnach (died 612).',
'h': {'pos': (18, 46)}, 't': {'pos': (78, 91)}})
print(result)
if __name__ == '__main__':
infer_wiki80_bert_softmax()
# infer_tacred_bertentity_softmax()
# infer_tacred_bert_softmax()
``` |
{
"source": "johnson7788/OpenUE",
"score": 2
} |
#### File: openue/data/data_module.py
```python
import os
from .base_data_module import BaseDataModule
from .processor import get_dataset
from transformers import AutoTokenizer
from torch.utils.data import DataLoader
import logging
logger = logging.getLogger(__name__)
from .utils import get_labels_ner, get_labels_seq, openue_data_collator_seq, openue_data_collator_ner, openue_data_collator_interactive
collator_set = {"ner": openue_data_collator_ner, "seq": openue_data_collator_seq, "interactive": openue_data_collator_interactive}
class REDataset(BaseDataModule):
def __init__(self, args) -> None:
super().__init__(args)
self.tokenizer = AutoTokenizer.from_pretrained(self.args.model_name_or_path)
self.num_labels = len(get_labels_ner()) if args.task_name == "ner" else len(get_labels_seq(args))
self.collate_fn = collator_set[args.task_name]
num_relations = len(get_labels_seq(args))
# 默认加入特殊token来表示关系
add_flag = False
for i in range(num_relations):
if f"[relation{i}]" not in self.tokenizer.get_added_vocab():
add_flag = True
break
if add_flag:
relation_tokens = [f"[relation{i}]" for i in range(num_relations)]
num_added_tokens = self.tokenizer.add_special_tokens({'additional_special_tokens': relation_tokens})
logger.info(f"添加特殊token到tokenizer中: {num_added_tokens}个 \n {relation_tokens}")
def setup(self, stage=None):
self.data_train = get_dataset("train", self.args, self.tokenizer)
self.data_val = get_dataset("dev", self.args, self.tokenizer)
self.data_test = get_dataset("test", self.args, self.tokenizer)
def prepare_data(self):
# download the dataset and move it to the dataset fold
if not os.path.exists(self.args.data_dir):
os.system("wget http://47.92.96.190/dataset/ske.tar.gz")
os.system("tar -xzvf ske.tar.gz")
os.system("mkdir dataset")
os.system("mv ske ./dataset")
os.system("rm ske.tar.gz")
def train_dataloader(self):
return DataLoader(self.data_train, shuffle=True, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True, collate_fn=self.collate_fn)
def val_dataloader(self):
return DataLoader(self.data_val, shuffle=False, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True, collate_fn=self.collate_fn)
def test_dataloader(self):
return DataLoader(self.data_test, shuffle=False, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True, collate_fn=self.collate_fn)
@staticmethod
def add_to_argparse(parser):
BaseDataModule.add_to_argparse(parser)
parser.add_argument("--task_name", type=str, default="ner",choices=["ner", "seq", "interactive"], help="[normal, reloss, ptune]",)
parser.add_argument("--model_name_or_path", type=str, default="bert-base-uncased", help="加载的预训练模型")
parser.add_argument("--max_seq_length", type=int, default=128, help="最大序列长度")
return parser
def get_config(self):
return dict(num_tokens=len(self.tokenizer), num_labels=self.num_labels, tokenizer=self.tokenizer)
``` |
{
"source": "johnson7788/Orbit",
"score": 3
} |
#### File: johnson7788/Orbit/random_policy.py
```python
import numpy as np
# import the env class
from Paddle.paddle import Paddle
# create an object of env class
env = Paddle()
np.random.seed(0)
def random_policy(episode):
action_space = 3
state_space = 5
max_steps = 1000
for e in range(episode):
state = env.reset()
score = 0
for i in range(max_steps):
action = np.random.randint(action_space)
reward, next_state, done = env.step(action)
score += reward
state = next_state
if done:
print("episode: {}/{}, score: {}".format(e, episode, score))
break
if __name__ == '__main__':
random_policy(10)
``` |
{
"source": "johnson7788/play-daxigua-using-Reinforcement-Learning",
"score": 4
} |
#### File: johnson7788/play-daxigua-using-Reinforcement-Learning/Fruit.py
```python
import pygame as pg
def create_fruit(type, x, y):
"""
创建一个水果,类型是1到11, 1:葡萄,2:樱桃,3:橘子,4:柠檬,5:猕猴桃,6:西红柿,7:桃子,8:菠萝,9:柚子,10:西瓜,11:大西瓜
:param type: 1-11
:type type: int
:param x: 水果要在屏幕中出现的位置x
:type x:
:param y: 水果要在屏幕中出现的位置y
:type y:
:return: 创建的的水果的类
:rtype:
"""
fruit = None
if type == 1:
fruit = PT(x, y)
elif type == 2:
fruit = YT(x, y)
elif type == 3:
fruit = JZ(x, y)
elif type == 4:
fruit = NM(x, y)
elif type == 5:
fruit = MHT(x, y)
elif type == 6:
fruit = XHS(x, y)
elif type == 7:
fruit = TZ(x, y)
elif type == 8:
fruit = BL(x, y)
elif type == 9:
fruit = YZ(x, y)
elif type == 10:
fruit = XG(x, y)
elif type == 11:
fruit = DXG(x, y)
return fruit
class Fruit():
def __init__(self, x, y):
"""
水果类,是父类
:param x: react的x的大小
:type x:
:param y:
:type y:
"""
self.load_images()
# 初始化一个pygame正方形, eg: <rect(0, 0, 40, 40)>
self.rect = self.image.get_rect()
# react的x和y,确定它的大小
self.rect.x = x
self.rect.y = y
self.angle_degree = 0
def load_images(self):
pass
def update_position(self, x, y, angle_degree=0):
"""
更新水果的位置,当下落的时候
:param x:
:type x:
:param y:
:type y:
:param angle_degree:
:type angle_degree:
:return:
:rtype:
"""
self.rect.x = x - self.r
self.rect.y = y - self.r
self.angle_degree = angle_degree
# self.image = pg.transform.rotate(self.image, self.angle_degree)
def draw(self, surface):
"""
把水果画到游戏画面上
:param surface:
:type surface:
:return:
:rtype:
"""
surface.blit(self.image, self.rect)
class PT(Fruit):
def __init__(self, x, y):
"""
葡萄
:param x: 水果要在屏幕中出现的位置x
:type x:
:param y:水果要在屏幕中出现的位置y
:type y:
"""
# self.r 是半径
self.r = 2 * 10
self.type = 1
# 加载图片后,改成现在的大小
self.size = (self.r*2, self.r*2)
Fruit.__init__(self, x - self.r, y - self.r)
def load_images(self):
"""
加载普通图片
:return:
:rtype:
"""
self.image = pg.image.load('res/01.png')
self.image = pg.transform.smoothscale(self.image, self.size)
class YT(Fruit):
def __init__(self, x, y):
"""
樱桃
:param x:
:type x:
:param y:
:type y:
"""
self.r = 2 * 15
self.type = 2
self.size = (self.r*2, self.r*2)
Fruit.__init__(self, x - self.r, y - self.r)
def load_images(self):
self.image = pg.image.load('res/02.png')
self.image = pg.transform.smoothscale(self.image, self.size)
class JZ(Fruit):
def __init__(self, x, y):
"""
橘子
:param x:
:type x:
:param y:
:type y:
"""
self.r = 2 * 21
self.type = 3
self.size = (self.r*2, self.r*2)
Fruit.__init__(self, x - self.r, y - self.r)
def load_images(self):
self.image = pg.image.load('res/03.png')
self.image = pg.transform.smoothscale(self.image, self.size)
class NM(Fruit):
def __init__(self, x, y):
"""
柠檬
:param x:
:type x:
:param y:
:type y:
"""
self.r = 2 * 23
self.type = 4
self.size = (self.r*2, self.r*2)
Fruit.__init__(self, x - self.r, y - self.r)
def load_images(self):
self.image = pg.image.load('res/04.png')
self.image = pg.transform.smoothscale(self.image, self.size)
class MHT(Fruit):
def __init__(self, x, y):
"""
猕猴桃
:param x:
:type x:
:param y:
:type y:
"""
self.r = 2 * 29
self.type = 5
self.size = (self.r*2, self.r*2)
Fruit.__init__(self, x - self.r, y - self.r)
def load_images(self):
self.image = pg.image.load('res/05.png')
self.image = pg.transform.smoothscale(self.image, self.size)
class XHS(Fruit):
def __init__(self, x, y):
"""
西红柿
:param x:
:type x:
:param y:
:type y:
"""
self.r = 2 * 35
self.type = 6
self.size = (self.r*2, self.r*2)
Fruit.__init__(self, x - self.r, y - self.r)
def load_images(self):
self.image = pg.image.load('res/06.png')
self.image = pg.transform.smoothscale(self.image, self.size)
class TZ(Fruit):
def __init__(self, x, y):
"""
桃子
:param x:
:type x:
:param y:
:type y:
"""
self.r = 2 * 37
self.type = 7
self.size = (self.r*2, self.r*2)
Fruit.__init__(self, x - self.r, y - self.r)
def load_images(self):
self.image = pg.image.load('res/07.png')
self.image = pg.transform.smoothscale(self.image, self.size)
class BL(Fruit):
def __init__(self, x, y):
"""
菠萝
:param x:
:type x:
:param y:
:type y:
"""
self.r = 2 * 50
self.type = 8
self.size = (self.r*2, self.r*2)
Fruit.__init__(self, x - self.r, y - self.r)
def load_images(self):
self.image = pg.image.load('res/08.png')
self.image = pg.transform.smoothscale(self.image, self.size)
class YZ(Fruit):
def __init__(self, x, y):
"""
柚子
:param x:
:type x:
:param y:
:type y:
"""
self.r = 2 * 59
self.type = 9
self.size = (self.r*2, self.r*2)
Fruit.__init__(self, x - self.r, y - self.r)
def load_images(self):
self.image = pg.image.load('res/09.png')
self.image = pg.transform.smoothscale(self.image, self.size)
class XG(Fruit):
def __init__(self, x, y):
"""
西瓜
:param x:
:type x:
:param y:
:type y:
"""
self.r = 2 * 60
self.type = 10
self.size = (self.r*2, self.r*2)
Fruit.__init__(self, x - self.r, y - self.r)
def load_images(self):
self.image = pg.image.load('res/10.png')
self.image = pg.transform.smoothscale(self.image, self.size)
class DXG(Fruit):
def __init__(self, x, y):
"""
大西瓜
:param x:
:type x:
:param y:
:type y:
"""
self.r = 2 * 78
self.type = 11
self.size = (self.r*2, self.r*2)
Fruit.__init__(self, x - self.r, y - self.r)
def load_images(self):
self.image = pg.image.load('res/11.png')
self.image = pg.transform.smoothscale(self.image, self.size)
```
#### File: johnson7788/play-daxigua-using-Reinforcement-Learning/xiguaenv.py
```python
import numpy as np
import gym
from gym import spaces
from stable_baselines3.common.env_checker import check_env
from State import AI_Board
class DaxiguaEnv(gym.Env):
"""
大西瓜env
"""
def __init__(self, grid_size=10):
super(DaxiguaEnv, self).__init__()
# 定义动作和观察空间
# 他们必须是gym.spaces对象
# 当使用离散动作时,这里我们14个动作:代表放置14个位置
n_actions = 14
self.game = AI_Board()
self.action_space = spaces.Discrete(n_actions)
# 这里我们观察到的游戏的返回的图像
self.observation_space = spaces.Box(low=0, high=255,
shape=(N_CHANNELS, HEIGHT, WIDTH), dtype=np.uint8)
def reset(self):
"""
重要提示:观察必须是numpy数组
:return: (np.array)
"""
# 初始化网格右侧的agent
self.agent_pos = self.grid_size - 1
# 这里我们将其转换为float32,以使其更加通用(以防我们想使用连续操作), eg: 返回[9.]
return np.array([self.agent_pos]).astype(np.float32)
def step(self, action):
if action == self.LEFT:
self.agent_pos -= 1
elif action == self.RIGHT:
self.agent_pos += 1
else:
raise ValueError(f"收到了不属于动作空间的动作 {action}")
# 网格的边界裁剪, 小于0的输出0,大于grid_size输出grid_size, 如果在0到grid_size中间,直接返回这个值
self.agent_pos = np.clip(self.agent_pos, 0, self.grid_size)
# 我们是网格左边的吗?用于判断是否结束了这个episode
done = bool(self.agent_pos == 0)
# 除了到达目标时,其他地方的奖励都是null的(grid的左边)。
reward = 1 if self.agent_pos == 0 else 0
# 我们可以选择传递额外的信息,但我们现在还没有使用。
info = {}
# 所以返回了np.array([self.agent_pos]).astype(np.float32)代表观察空间,reward代表奖励
return np.array([self.agent_pos]).astype(np.float32), reward, done, info
def render(self, mode='console'):
if mode != 'console':
raise NotImplementedError()
# 用x代表agent的位置
print(f"目前机器人的位置是,用x代表机器人: ")
print("." * self.agent_pos, end="")
print("x", end="")
print("." * (self.grid_size - self.agent_pos))
def close(self):
pass
def do_check_env():
"""
检查我们的自定义的env是否符合gym的规则
:return:
:rtype:
"""
print(f"开始检查env是否满足gym的设定")
env = GoLeftEnv()
print(f"当前环境的观测空间是 {env.observation_space}")
print(f"当前环境的动作空间是 {env.action_space}")
print(f"当前环境的一个动作抽样是 {env.action_space.sample()}")
# 如果环境不符合接口,将抛出一个错误。 来检查你的环境是否遵循Gym接口。它还可以选择检查环境是否与Stable-Baselines兼容(必要时发出警告)
check_env(env, warn=True)
if __name__ == '__main__':
do_check_env()
``` |
{
"source": "johnson7788/spert",
"score": 2
} |
#### File: johnson7788/spert/args.py
```python
import argparse
def _add_common_args(arg_parser):
"""
常规参数
:param arg_parser:
:type arg_parser:
:return:
:rtype:
"""
arg_parser.add_argument('--config', type=str, help="指定配置文件")
# Input
arg_parser.add_argument('--types_path', type=str, help="通往类型规格的路径")
# Preprocessing
arg_parser.add_argument('--tokenizer_path', type=str, help="tokenizer的路径")
arg_parser.add_argument('--max_span_size', type=int, default=10, help="span跨度的最大大小")
arg_parser.add_argument('--lowercase', action='store_true', default=False,
help="如果为真,在预处理过程中,输入被小写")
arg_parser.add_argument('--sampling_processes', type=int, default=4,
help="采样进程的数量。=0表示没有取样的多进程")
# Model / Training / Evaluation
arg_parser.add_argument('--model_path', type=str, help="包含模型checkpoint的目录的路径")
arg_parser.add_argument('--model_type', type=str, default="spert", help="模型的类型")
arg_parser.add_argument('--cpu', action='store_true', default=False,
help="如果为真,即使有CUDA设备,也在CPU上进行训练/评估。")
arg_parser.add_argument('--eval_batch_size', type=int, default=1, help="评估/预测批次大小")
arg_parser.add_argument('--max_pairs', type=int, default=1000,
help="训练/评估期间要处理的最大实体对")
arg_parser.add_argument('--rel_filter_threshold', type=float, default=0.4, help="关系的过滤阈值")
arg_parser.add_argument('--size_embedding', type=int, default=25, help="维度嵌入的维度")
arg_parser.add_argument('--prop_drop', type=float, default=0.1, help="SpERT中使用的dropout概率")
arg_parser.add_argument('--freeze_transformer', action='store_true', default=False, help="是否冻结 BERT 权重")
arg_parser.add_argument('--no_overlapping', action='store_true', default=False,
help="如果为真,则不对重叠的实体和有重叠的实体的关系进行评估")
# Misc
arg_parser.add_argument('--seed', type=int, default=None, help="随机数种子")
arg_parser.add_argument('--cache_path', type=str, default=None,
help="缓存transformer模型的路径(用于HuggingFacetransformer库)。")
arg_parser.add_argument('--debug', action='store_true', default=False, help="debugging模式开/关")
def _add_logging_args(arg_parser):
arg_parser.add_argument('--label', type=str, help="运行的标签。用作日志/模型的目录名称")
arg_parser.add_argument('--log_path', type=str, help="储存训练/评估日志的目录的路径")
arg_parser.add_argument('--store_predictions', action='store_true', default=False,
help="如果为真,将预测结果存储在磁盘上(在日志目录中)。")
arg_parser.add_argument('--store_examples', action='store_true', default=False,
help="如果为真,将评估实例存储在磁盘上(在日志目录中)。")
arg_parser.add_argument('--example_count', type=int, default=None,
help="要存储的评估实例的数量(如果store_examples == True)。")
def train_argparser():
"""
训练的参数
:return:
:rtype:
"""
arg_parser = argparse.ArgumentParser()
# Input
arg_parser.add_argument('--train_path', type=str, help="训练数据的路径")
arg_parser.add_argument('--valid_path', type=str, help="验证集路径")
# Logging
arg_parser.add_argument('--save_path', type=str, help="存储模型checkpoint的目录的路径")
arg_parser.add_argument('--init_eval', action='store_true', default=False,
help="如果为真,在训练前评估验证集")
arg_parser.add_argument('--save_optimizer', action='store_true', default=False,
help="将优化器与模型一起保存")
arg_parser.add_argument('--train_log_iter', type=int, default=100, help="每x次迭代记录训练过程")
arg_parser.add_argument('--final_eval', action='store_true', default=False,
help="只在训练后评估模型,而不是在每个epoch都评估")
# Model / Training
arg_parser.add_argument('--train_batch_size', type=int, default=2, help="训练批次大小")
arg_parser.add_argument('--epochs', type=int, default=20, help="Number of epochs")
arg_parser.add_argument('--neg_entity_count', type=int, default=100,
help="每份文件(句子)的负面实体样本数。")
arg_parser.add_argument('--neg_relation_count', type=int, default=100,
help="每份文件(句子)的负面关系样本数。")
arg_parser.add_argument('--lr', type=float, default=5e-5, help="Learning rate")
arg_parser.add_argument('--lr_warmup', type=float, default=0.1,
help="在线性增加/减少计划中,warmup训练占总训练迭代的比例")
arg_parser.add_argument('--weight_decay', type=float, default=0.01, help="Weight decay to apply")
arg_parser.add_argument('--max_grad_norm', type=float, default=1.0, help="Maximum gradient norm")
_add_common_args(arg_parser)
_add_logging_args(arg_parser)
return arg_parser
def eval_argparser():
arg_parser = argparse.ArgumentParser()
# Input
arg_parser.add_argument('--dataset_path', type=str, help="评估的数据集")
_add_common_args(arg_parser)
_add_logging_args(arg_parser)
return arg_parser
def predict_argparser():
arg_parser = argparse.ArgumentParser()
# Input
arg_parser.add_argument('--dataset_path', type=str, help="Path to dataset")
arg_parser.add_argument('--predictions_path', type=str, help="Path to store predictions")
arg_parser.add_argument('--spacy_model', type=str, help="Label of SpaCy model (used for tokenization)")
_add_common_args(arg_parser)
return arg_parser
``` |
{
"source": "johnson7788/TextBrewer",
"score": 2
} |
#### File: examples/msra_ner_example/main.train.dist.py
```python
import logging
import os,random
import numpy as np
import torch
from utils_ner import read_features, label2id_dict
from utils import divide_parameters
from transformers import ElectraConfig, AdamW, get_constant_schedule_with_warmup, get_linear_schedule_with_warmup, BertTokenizer, get_constant_schedule
import config
from modeling import ElectraForTokenClassification, ElectraForTokenClassificationAdaptorTraining
from textbrewer import DistillationConfig, TrainingConfig,BasicTrainer
from torch.utils.data import TensorDataset, DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from functools import partial
from train_eval import predict, ddp_predict
def args_check(logger, args):
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
logger.warning("Output directory () already exists and is not empty.")
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if not args.do_train and not args.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count() if not args.no_cuda else 0
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info("rank %d device %s n_gpu %d distributed training %r", torch.distributed.get_rank(), device, n_gpu, bool(args.local_rank != -1))
args.n_gpu = n_gpu
args.device = device
return device, n_gpu
def main():
#parse arguments
config.parse()
args = config.args
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN
)
logger = logging.getLogger("Main")
#arguments check
device, n_gpu = args_check(logger, args)
if args.local_rank in [-1, 0]:
os.makedirs(args.output_dir, exist_ok=True)
if args.local_rank != -1:
logger.warning(f"Process rank: {torch.distributed.get_rank()}, device : {args.device}, n_gpu : {args.n_gpu}, distributed training : {bool(args.local_rank!=-1)}")
for k,v in vars(args).items():
logger.info(f"{k}:{v}")
#set seeds
torch.manual_seed(args.random_seed)
torch.cuda.manual_seed_all(args.random_seed)
np.random.seed(args.random_seed)
random.seed(args.random_seed)
forward_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
args.forward_batch_size = forward_batch_size
#从transformers包中导入ElectraConfig, 并加载配置
bert_config_S = ElectraConfig.from_json_file(args.bert_config_file_S)
# (args.output_encoded_layers=='true') --> True, 默认输出隐藏层的状态
bert_config_S.output_hidden_states = (args.output_encoded_layers=='true')
#num_labels;类别个数
bert_config_S.num_labels = len(label2id_dict)
assert args.max_seq_length <= bert_config_S.max_position_embeddings
#读取数据
train_examples = None
train_dataset = None
eval_examples = None
eval_dataset = None
num_train_steps = None
# 加载Tokenizer
tokenizer = BertTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
if args.do_train:
# 返回所有的样本和样本形成的dataset格式, dataset的格式[all_token_ids,all_input_mask,all_label_ids]
print("开始加载训练集数据")
train_examples,train_dataset = read_features(args.train_file, tokenizer=tokenizer, max_seq_length=args.max_seq_length)
if args.do_predict:
print("开始加载测试集数据")
eval_examples,eval_dataset = read_features(args.predict_file,tokenizer=tokenizer, max_seq_length=args.max_seq_length)
if args.local_rank == 0:
torch.distributed.barrier()
#Build Model and load checkpoint
model_S = ElectraForTokenClassification(bert_config_S)
#加载student模型的参数, 默认是bert类型
if args.load_model_type=='bert':
assert args.init_checkpoint_S is not None
state_dict_S = torch.load(args.init_checkpoint_S, map_location='cpu')
#state_weight = {k[5:]:v for k,v in state_dict_S.items() if k.startswith('bert.')}
#missing_keys,_ = model_S.bert.load_state_dict(state_weight,strict=False)
missing_keys, unexpected_keys = model_S.load_state_dict(state_dict_S,strict=False)
logger.info(f"missing keys:{missing_keys}")
logger.info(f"unexpected keys:{unexpected_keys}")
elif args.load_model_type=='all':
assert args.tuned_checkpoint_S is not None
state_dict_S = torch.load(args.tuned_checkpoint_S,map_location='cpu')
model_S.load_state_dict(state_dict_S)
else:
logger.info("Model is randomly initialized.")
#模型放到device
model_S.to(device)
if args.do_train:
#parameters
if args.lr_decay is not None:
# 分类器层的参数 weight, bias
outputs_params = list(model_S.classifier.named_parameters())
#拆分出做学习率衰减的参数和不衰减的参数
outputs_params = divide_parameters(outputs_params, lr = args.learning_rate)
electra_params = []
# eg: 12, encoder层共12层
n_layers = len(model_S.electra.encoder.layer)
assert n_layers==12
for i,n in enumerate(reversed(range(n_layers))):
encoder_params = list(model_S.electra.encoder.layer[n].named_parameters())
lr = args.learning_rate * args.lr_decay**(i+1)
electra_params += divide_parameters(encoder_params, lr = lr)
logger.info(f"{i}:第{n}层的学习率是:{lr}")
embed_params = [(name,value) for name,value in model_S.electra.named_parameters() if 'embedding' in name]
logger.info(f"{[name for name,value in embed_params]}")
lr = args.learning_rate * args.lr_decay**(n_layers+1)
electra_params += divide_parameters( embed_params, lr = lr)
logger.info(f"embedding层的学习率 lr:{lr}")
all_trainable_params = outputs_params + electra_params
assert sum(map(lambda x:len(x['params']), all_trainable_params))==len(list(model_S.parameters())),\
(sum(map(lambda x:len(x['params']), all_trainable_params)), len(list(model_S.parameters())))
else:
params = list(model_S.named_parameters())
all_trainable_params = divide_parameters(params, lr=args.learning_rate)
logger.info("可训练的参数all_trainable_params共有: %d", len(all_trainable_params))
if args.local_rank == -1:
train_sampler = RandomSampler(train_dataset)
else:
train_sampler = DistributedSampler(train_dataset)
#生成dataloader
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.forward_batch_size,drop_last=True)
# 根据epoch计算出运行多少steps
num_train_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
optimizer = AdamW(all_trainable_params, lr=args.learning_rate, correct_bias = False)
if args.official_schedule == 'const':
# 常数学习率
scheduler_class = get_constant_schedule_with_warmup
scheduler_args = {'num_warmup_steps':int(args.warmup_proportion*num_train_steps)}
#scheduler = get_constant_schedule_with_warmup(optimizer, num_warmup_steps=int(args.warmup_proportion*num_train_steps))
elif args.official_schedule == 'linear':
# 线性学习率
scheduler_class = get_linear_schedule_with_warmup
# warmup多少步,10%的step是warmup的
scheduler_args = {'num_warmup_steps':int(args.warmup_proportion*num_train_steps), 'num_training_steps': num_train_steps}
#scheduler = get_linear_schedule_with_warmup(optimizer,num_warmup_steps=int(args.warmup_proportion*num_train_steps), num_training_steps = num_train_steps)
elif args.official_schedule == 'const_nowarmup':
scheduler_class = get_constant_schedule
scheduler_args = {}
else:
raise NotImplementedError
logger.warning("***** 开始 训练 *****")
logger.warning("local_rank %d 原样本数 = %d", args.local_rank, len(train_examples))
logger.warning("local_rank %d split之后的样本数 = %d", args.local_rank, len(train_dataset))
logger.warning("local_rank %d 前向 batch size = %d", args.local_rank, forward_batch_size)
logger.warning("local_rank %d 训练的steps = %d", args.local_rank, num_train_steps)
########### TRAINING ###########
train_config = TrainingConfig(
gradient_accumulation_steps = args.gradient_accumulation_steps,
ckpt_frequency = args.ckpt_frequency, #保存频率
log_dir = args.output_dir,
output_dir = args.output_dir,
device = args.device,
fp16=args.fp16,
local_rank = args.local_rank)
logger.info(f"训练的配置文件:")
logger.info(f"{train_config}")
# 初始化训练器
distiller = BasicTrainer(train_config = train_config,
model = model_S,
adaptor = ElectraForTokenClassificationAdaptorTraining)
# 初始化callback函数,使用的ddp_predict函数进行评估
callback_func = partial(ddp_predict,
eval_examples=eval_examples,
eval_dataset=eval_dataset,
args=args)
with distiller:
distiller.train(optimizer, scheduler_class=scheduler_class,
scheduler_args=scheduler_args,
max_grad_norm = 1.0,
dataloader=train_dataloader,
num_epochs=args.num_train_epochs, callback=callback_func)
if not args.do_train and args.do_predict:
res = ddp_predict(model_S,eval_examples,eval_dataset,step=0,args=args)
print (res)
if __name__ == "__main__":
main()
```
#### File: huazhuang/utils/compare_components.py
```python
import json
import pandas as pd
import requests
def collect_data(devfile="../data_root_dir/newcos/dev.json", eval_results="../output_root_dir/newcos/eval_results-newcos.json"):
"""
生成excel, 对比main.trainer.py生成的结果和devfile
:param devfile: 训练文件,格式是 [(text, keyword, labels),..]
:param eval_results: main.trainer.py生成的文件output文件中的json文件 [(predid, probality)]
:return:
"""
labels = ["是","否"]
with open(devfile) as f:
dev_data = json.load(f)
with open(eval_results) as f:
eval_data = json.load(f)
assert len(dev_data) == len(eval_data)
data = []
for d, res in zip(dev_data, eval_data):
one_data = {"text": d[0], "keyword":d[1], "label": d[2], "predict":labels[res[0]], "probability": format(res[1], "0.3f")}
data.append(one_data)
df = pd.DataFrame(data)
excel_file = "result2.xlsx"
writer = pd.ExcelWriter(excel_file, engine='xlsxwriter')
df.to_excel(writer)
writer.save()
print(f"保存到excel成功{excel_file}")
return data
def compare_model(hostname='http://127.0.0.1:3314'):
"""
把收集到的数据,放到线上,对比一下准确率,不是和咱们自己的模型对比
:param hostname:
:return:
"""
url = hostname + '/lavector/rest/aspect-sentiment-batch'
headers = {'Content-Type': 'application/json'}
mydata = collect_data()
post_data = []
for d in mydata:
one = (d["text"], [d["keyword"]])
post_data.append(one)
data = {'channel': 'jd', 'data': post_data}
print(f"发送请求到{url}, 数据量{len(post_data)}")
res = requests.post(url, data=json.dumps(data), headers=headers)
result = res.json()
myresults = []
for r in result['result']:
keyword_list = list(r.keys())
pres_list = list(r.values())
assert len(keyword_list) == 1
assert len(pres_list) == 1
keyword = keyword_list[0]
pres = pres_list[0]
for k,v in pres.items():
if v == 1:
if k == "负向":
predict = "消极"
elif k =="正向":
predict = "积极"
else:
predict = "中性"
myresults.append([keyword,predict])
assert len(post_data) == len(myresults)
#保存到文件
newdata = []
for d, res in zip(mydata, myresults):
if res[0] != d["keyword"]:
print(f"这条数据预测回来的关键字不一致{res[0]}")
continue
d["online_predict"] = res[1]
newdata.append(d)
df = pd.DataFrame(newdata)
excel_file = "result_online.xlsx"
writer = pd.ExcelWriter(excel_file, engine='xlsxwriter')
df.to_excel(writer)
writer.save()
print(f"保存到excel成功{excel_file}")
return newdata
def read_result_online():
"""
读取result_online.xlsx,比较
上文,关键字,下午的字数比
pretext + keyword + posttest
predict 表示的结果是75个字的,25+25+25的结果
online_predict 表示的结果是 15+30+20
:return:
"""
df = pd.read_excel("result_online.xlsx")
total = 0
predict_yes = 0
online_yes = 0
for index, row in df.iterrows():
label = row['label']
predict = row['predict']
online_predict = row['online_predict']
if predict != online_predict:
total += 1
if predict == label:
predict_yes +=1
elif online_predict == label:
online_yes +=1
else:
print("都没预测正确")
print(row)
print()
print(f"共有{total}个不一样, 75个字预测的结果是{predict_yes}, 线上65个字的预测结果是{online_yes}")
def dopredict(test_data, url="http://127.0.0.1:5000/api/predict_macbert", type=None):
"""
预测结果
:param test_data:
:return:
"""
if type:
data = {'data': test_data, 'type':type}
else:
data = {'data': test_data}
headers = {'content-type': 'application/json'}
r = requests.post(url, headers=headers, data=json.dumps(data), timeout=360)
result = r.json()
return result
def download_data_and_compare(hostname=["http://192.168.50.139:8081/api/"], dirpath="/opt/lavector/absa/", jsonfile=["192.168.50.139_500_8081_0129.json"], isabsa=True, result_excel="result.xlsx", export_wrong_examples_excel="wrong.xlsx",correct_examples_excel= "correct.xlsx", type=None):
"""
从label_studio的某个hostname下载数据,然后预测,最后给出结果
:return:
"""
from absa_api import export_data
#从label-studio下载文
original_data = []
for hname, jfile in zip(hostname,jsonfile):
json_file = export_data(hostname=hname, dirpath=dirpath, jsonfile=jfile, proxy=False)
#加载从label-studio获取的到json文件
with open(json_file, 'r') as f:
data = json.load(f)
print(f"共收集主机{hname}的数据{len(data)} 条")
original_data.extend(data)
data = predict_comare_excel(original_data, result_excel=result_excel, export_wrong_examples_excel=export_wrong_examples_excel,correct_examples_excel= correct_examples_excel, isabsa=isabsa, type=type)
return data
def download_data_and_compare_same(hostname=["http://192.168.50.139:8081/api/","http://192.168.50.139:8080/api/"], dirpath="/opt/lavector/absa/", jsonfile=["192.168.50.139_500_8081_0129.json","192.168.50.139_500_8080_0129.json"], isabsa=True):
"""
对比相同的hostname的数据
从label_studio的某个hostname下载数据,然后预测,最后给出结果
:return:
"""
from absa_api import export_data
#从label-studio下载文
if len(hostname) != 2:
raise Exception("必须准2个hostname,里面包含相同的评估数据")
result = []
for hname, jfile in zip(hostname,jsonfile):
original_data = []
json_file = export_data(hostname=hname, dirpath=dirpath, jsonfile=jfile, proxy=False)
#加载从label-studio获取的到json文件
with open(json_file, 'r') as f:
data = json.load(f)
print(f"共收集主机{hname}的数据{len(data)} 条")
original_data.extend(data)
predict_data, excel_data = predict_comare_excel(original_data, isabsa=isabsa)
result.append([hname, predict_data, excel_data])
#对比2个人标注的数据
diffrent_data = []
print(f"对比host为 {result[0][0], result[1][0]}")
hname1, data1, pre1 = result[0]
hname2, data2, pre2 = result[1]
if len(data1) != len(data2):
raise Exception("两个人标注的数据总数不一致")
for d1, d2 in zip(data1,data2):
if d1[0] != d2[0]:
print("这条数据不一致")
else:
if d1[4] != d2[4]:
print(f"2个人标注的标签不一致")
print(d1[0])
print(d1[1])
print(d1[4])
print(d2[4])
one_data = {"text": d1[0], "keyword": d1[1], "P1_label": d1[4], "P2_label": d2[4], "location": d1[2:4]}
diffrent_data.append(one_data)
print(f"不一致的数据总量是{len(diffrent_data)}")
df = pd.DataFrame(diffrent_data)
writer = pd.ExcelWriter("diffrent.xlsx", engine='xlsxwriter')
df.to_excel(writer)
writer.save()
print(f"保存到diffrent.xlsx excel成功")
return data
def predict_comare_excel(original_data,result_excel="result.xlsx", export_wrong_examples_excel="wrong.xlsx",correct_examples_excel= "correct.xlsx", isabsa=True, type=None):
"""
:param original_data:
:param result_excel:
:param export_wrong_examples_excel:
:param correct_examples_excel:
:param isabsa:
:return: data是预处理后的,excel_data是模型预测的结果
"""
from convert_label_studio_data import format_data, do_truncate_data
# [(text, keyword, start_idx, end_idx, label)]
data = format_data(original_data)
# original_data,truncate_data, locations = do_truncate_data(data)
if isabsa:
predict_result = dopredict(test_data=data, url="http://192.168.50.189:5000/api/predict_macbert")
else:
assert type is not None, "类型必须给定,确定是成分,还是功效,香味等等"
predict_result = dopredict(test_data=data, url="http://192.168.50.189:5015/api/predict_truncate",type=type)
# print(predict_result)
excel_data = []
for d, pred in zip(data, predict_result):
one_data = {"text": d[0], "keyword": d[1], "label": d[4], "predict": pred[0], "start": d[2], "end":d[3],
"probability": format(pred[1], "0.3f"), "channel":d[-2], "wordtype":d[-1]}
excel_data.append(one_data)
df = pd.DataFrame(excel_data)
writer = pd.ExcelWriter(result_excel, engine='xlsxwriter')
df.to_excel(writer)
writer.save()
print(f"保存到excel成功{result_excel}")
#预测错误的样本
predict_wrong_examples = []
# 保存预测错误的样本到excel中
correct_examples = []
for d, pred in zip(data, predict_result):
one_data = {"text": d[0], "keyword": d[1], "label": d[4], "predict": pred[0], "start": d[2], "end": d[3],
"probability": format(pred[1], "0.3f"), "channel": d[-2], "wordtype": d[-1]}
if one_data["label"] != one_data["predict"]:
print(f"{one_data['text']}: 模型预测的结果与ground truth不一致")
predict_wrong_examples.append(one_data)
else:
correct_examples.append(one_data)
print(f"总样本数是{len(data)},预测错误的样本总数是{len(predict_wrong_examples)}")
print(f"总样本数是{len(data)},预测正确的样本总数是{len(correct_examples)}")
df = pd.DataFrame(predict_wrong_examples)
writer = pd.ExcelWriter(export_wrong_examples_excel, engine='xlsxwriter')
df.to_excel(writer, sheet_name='table1')
writer.save()
df = pd.DataFrame(correct_examples)
writer = pd.ExcelWriter(correct_examples_excel, engine='xlsxwriter')
df.to_excel(writer, sheet_name='table1')
writer.save()
print(f"保存全部为错误的样本到excel: {export_wrong_examples_excel}完成")
print(f"保存全部为正确的样本到excel: {correct_examples_excel}完成")
print(f"准确率为{(len(correct_examples))/len(data)}")
return data, excel_data
def get_json_data_compare(jsonfile="/opt/lavector/192.168.50.119_8086.json"):
"""
获取jsonfile,然后预测
:return:
"""
#加载从label-studio获取的到json文件
data = predict_comare_excel(jsonfile, result_excel="result.xlsx", export_wrong_examples_excel="wrong.xlsx",correct_examples_excel= "correct.xlsx")
return data
def check_train_data(result_excel="result.xlsx", export_wrong_examples_excel="wrong.xlsx",correct_examples_excel= "correct.xlsx", dev=True):
"""
检查训练和评估模型时,哪些数据被预测错误了, 默认是dev_data
:return:
"""
train_data = "data_root_dir/components"
from convert_label_studio_data import get_all
# [(text, keyword, start_idx, end_idx, label)]
original_data, train_data, dev_data = get_all(absa=False, keep_cancel=False, split=True)
if dev:
#评估dev的数据
eval_data = dev_data
else:
eval_data = original_data
# predict_result = dopredict(test_data=original_data, url="http://127.0.0.1:5010/api/predict_truncate")
predict_result = dopredict(test_data=eval_data, url="http://192.168.50.139:5010/api/predict_truncate")
excel_data = []
for ori, d in zip(eval_data, predict_result):
one_data = {"text": ori[0], "keyword": ori[1], "label": ori[4], "predict": d[0], "location": d[3],
"probability": format(d[1], "0.3f"), "channel": ori[-2], "wordtype": ori[-1]}
excel_data.append(one_data)
df = pd.DataFrame(excel_data)
writer = pd.ExcelWriter(result_excel, engine='xlsxwriter')
df.to_excel(writer)
writer.save()
print(f"保存到excel成功{result_excel}")
# 预测错误的样本
predict_wrong_examples = []
# 保存预测错误的样本到excel中
correct_examples = []
for ori, d in zip(eval_data, predict_result):
one_data = {"text": ori[0], "keyword": ori[1], "label": ori[4], "predict": d[0], "location": d[3],
"probability": format(d[1], "0.3f"), "channel": ori[-2], "wordtype": ori[-1]}
if one_data["label"] != one_data["predict"]:
print(f"{one_data['text']}: 模型预测的结果与ground truth不一致")
predict_wrong_examples.append(one_data)
else:
correct_examples.append(one_data)
print(f"总样本数是{len(eval_data)},预测错误的样本总数是{len(predict_wrong_examples)}")
print(f"总样本数是{len(eval_data)},预测正确的样本总数是{len(correct_examples)}")
df = pd.DataFrame(predict_wrong_examples)
writer = pd.ExcelWriter(export_wrong_examples_excel, engine='xlsxwriter')
df.to_excel(writer, sheet_name='table1')
writer.save()
df = pd.DataFrame(correct_examples)
writer = pd.ExcelWriter(correct_examples_excel, engine='xlsxwriter')
df.to_excel(writer, sheet_name='table1')
writer.save()
print(f"保存全部为错误的样本到excel: {export_wrong_examples_excel}完成")
print(f"保存全部为正确的样本到excel: {correct_examples_excel}完成")
print(f"准确率为{(len(correct_examples)) / len(eval_data)}")
return eval_data, excel_data
def eval4dems():
"""
评估4个维度的结果
:return:
:rtype:
"""
hostnames = ["http://192.168.50.139:7081/api/","http://192.168.50.139:7082/api/","http://192.168.50.139:7083/api/","http://192.168.50.139:7084/api/"]
dirs = ["/opt/lavector/effect/","/opt/lavector/pack/", "/opt/lavector/fragrance/", "/opt/lavector/promotion/"]
save_files = ["effect_200_0629.json", "pack_200_0629.json", "fragrance_200_0629.json", "promotion_200_0629.json"]
names = ["effect", "pack", "fragrance", "promotion"]
for host,dir,savefile,name in zip(hostnames, dirs, save_files, names):
download_data_and_compare(hostname=[host], dirpath=dir,
jsonfile=[savefile], isabsa=False, result_excel=f"{name}_result.xlsx", export_wrong_examples_excel=f"{name}_wrong.xlsx",correct_examples_excel=f"{name}_correct.xlsx",type=name)
if __name__ == '__main__':
# collect_data()
# compare_model()
# read_result_online()
# download_data_and_compare()
# download_data_and_compare(hostname=["http://192.168.50.139:8086/api/"], dirpath="/opt/lavector/components/", jsonfile= ["192.168.50.139_500_8086_0129.json"],isabsa=False)
# download_data_and_compare(hostname=["http://192.168.50.139:8081/api/"], dirpath="/opt/lavector/absa/", jsonfile= ["192.168.50.139_500_8081_0220.json"],isabsa=True)
# get_json_data_compare()
# download_data_and_compare_same()
# check_train_data()
eval4dems()
``` |
{
"source": "johnson7788/transformers",
"score": 3
} |
#### File: transformers/myexample2/run_chinese_ref.py
```python
import argparse
import json
from typing import List
from tqdm import tqdm
from ltp import LTP
from transformers import BertTokenizer
def _is_chinese_char(cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def is_chinese(word: str):
# word like '180' or '身高' or '神'
for char in word:
char = ord(char)
if not _is_chinese_char(char):
return 0
return 1
def get_chinese_word(tokens: List[str]):
word_set = set()
for token in tokens:
chinese_word = len(token) > 1 and is_chinese(token)
if chinese_word:
word_set.add(token)
word_list = list(word_set)
return word_list
def add_sub_symbol(bert_tokens: List[str], chinese_word_set: set()):
"""
根据LTP对一个句子的分词,把bert的token转换成Whole word的形式,
Args:
bert_tokens: ['[CLS]', '老', '用', '户', '了', ',', '一', '直', '在', '用', '满', '婷', ',', '感', '觉', '对', '控', '痘', '控', '油', '效', '果', '挺', '好', '的', '[SEP]']
chinese_word_set: 中文的分词结果, ['效果', '用户', '感觉', '一直']
Returns:
['[CLS]', '老', '用', '##户', '了', ',', '一', '##直', '在', '用', '满', '婷', ',', '感', '##觉', '对', '控', '痘', '控', '油', '效', '##果', '挺', '好', '的', '[SEP]']
"""
if not chinese_word_set:
return bert_tokens
max_word_len = max([len(w) for w in chinese_word_set])
bert_word = bert_tokens
start, end = 0, len(bert_word)
while start < end:
single_word = True
if is_chinese(bert_word[start]):
l = min(end - start, max_word_len)
for i in range(l, 1, -1):
whole_word = "".join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1, start + i):
bert_word[j] = "##" + bert_word[j]
start = start + i
single_word = False
break
if single_word:
start += 1
return bert_word
def prepare_ref(lines: List[str], ltp_tokenizer: LTP, bert_tokenizer: BertTokenizer, batch_size=1000):
"""
Args:
lines: 每行一个中文段落,
ltp_tokenizer: ltp的tokenizer处理器
bert_tokenizer: bert的tokenizer处理器
Returns:
"""
ltp_res = []
# batch_size等于100,每次处理100行,
print(f"开始用ltp模型进行分词处理...")
for i in tqdm(range(0, len(lines), batch_size)):
#调用ltp进行分词
res = ltp_tokenizer.seg(lines[i : i + batch_size])[0]
#过滤出分词后都是中文的部分
res = [get_chinese_word(r) for r in res]
#加到ltp_res
ltp_res.extend(res)
assert len(ltp_res) == len(lines)
# eg: ltp_res中的文本处理的结果 [ ['效果', '一直', '用户', '感觉'],....]
#bert也进行tokenizer, 每次处理100行
print(f"开始用bert tokenizer模型进行token处理...")
bert_res = []
for i in tqdm(range(0, len(lines), batch_size)):
res = bert_tokenizer(lines[i : i + batch_size], add_special_tokens=True, truncation=True, max_length=512)
bert_res.extend(res["input_ids"])
# eg: bert_res [ [101, 5439, 4500, 2787, 749, 8024, 671, 4684, 1762, 4500, 4007, 2051, 8024, 2697, 6230, 2190, 2971, 4576, 2971, 3779, 3126, 3362, 2923, 1962, 4638, 102]...]
#确保行数相同
print(f"开始生成对应关系")
assert len(bert_res) == len(lines)
print_num = 5
ref_ids = []
for input_ids, chinese_word in zip(bert_res, ltp_res):
input_tokens = []
for id in input_ids:
token = bert_tokenizer._convert_id_to_token(id)
input_tokens.append(token)
# eg : ['[CLS]', '古', '##龙', '洗', '发', '##水', ',', '洗', '完', '头', '##发', '不', '干', '##燥', '、', '也', '不', '容', '##易', '油', '、', '不', '痒', ',', '味', '##道', '持', '##久', ',', '非', '##常', '柔', '##顺', ',', '而', '##且', '泡', '##泡', '很', '容', '##易', '冲', '##洗', '干', '##净', '泡', '##沫', '非', '##常', '细', '##腻', ',', '洗', '后', '头', '##发', '很', '滑', '很', '顺', ',', '洗', '了', '之', '##后', '就', '头', '##发', '很', '蓬', '##松', ',', '很', '香', ',', '而', '##且', '我', '洗', '了', '是', '没', '##有', '头', '##皮', '##屑', '的', '[SEP]']
input_tokens = add_sub_symbol(input_tokens, chinese_word)
ref_id = []
# 我们只保存以##开头的中文子词的位置,这意味着它是全词的一部分。
for i, token in enumerate(input_tokens):
if token[:2] == "##":
clean_token = token[2:]
# 只保存中文子词的后半部分,把和bert的对应关系,保存到ref_id中,ref_id是这个句子的所有子词的后半部分映射
if len(clean_token) == 1 and _is_chinese_char(ord(clean_token)):
ref_id.append(i)
#打印前5个示例
if print_num >0:
example_num = 5 - print_num
print(f"第{example_num}个样本是: {lines[example_num]}")
print(f"第{example_num}个样本的ltp分词后结果: {ltp_res[example_num]}")
print(f"第{example_num}个样本的bert toknizer后结果: {bert_res[example_num]}")
print(f"第{example_num}个样本的bert toknizer被ltp的全词处理后的结果: {input_tokens}")
print(f"第{example_num}个样本的bert的token对应的子词的后半部分的位置的最终的ref_id: {ref_id}")
print()
print_num -=1
ref_ids.append(ref_id)
#判断每个句子的子词的映射关系都保存了
assert len(ref_ids) == len(bert_res)
return ref_ids
def main(args):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# 如果要微调这些模型,则必须使用相同的tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, "r", encoding="utf-8", errors='ignore') as f:
data = f.readlines()
print(f'开始处理数据,共有{len(data)}条')
data = [line.strip() for line in data if len(line) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
print(f"开始加载ltp和bert的tokenizer模型")
ltp_tokenizer = LTP(path=args.ltp) # faster in GPU device
bert_tokenizer = BertTokenizer.from_pretrained(args.bert)
#准备映射关系
ref_ids = prepare_ref(data, ltp_tokenizer, bert_tokenizer)
#保存映射关系
with open(args.save_path, "w", encoding="utf-8") as f:
data = [json.dumps(ref) + "\n" for ref in ref_ids]
f.writelines(data)
print(f"保存所有{len(data)}条数据的映射关系到文件{args.save_path}")
def thread_main(args, gpu=True):
"""
多线程处理
Args:
args:
gpu: 是否使用gpu
Returns:
"""
from functools import partial
from multiprocessing import Pool
from tqdm import tqdm
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# 如果要微调这些模型,则必须使用相同的tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, "r", encoding="utf-8") as f:
data = f.readlines()
print(f'开始处理数据,共有{len(data)}条')
data = [line.strip() for line in data if len(line) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
print(f"开始加载ltp和bert的tokenizer模型")
ltp_tokenizer = LTP(path=args.ltp) # faster in GPU device
bert_tokenizer = BertTokenizer.from_pretrained(args.bert)
newdata = [data[i:i + 1000] for i in range(0, len(data), 1000)]
#准备映射关系, 并行线程数
#如果使用GPU,请设置如下
if gpu:
import torch
torch.multiprocessing.set_start_method('spawn')
with Pool(processes=args.processes) as p:
# partial_clean 是封装一下函数
partial_clean = partial(prepare_ref, ltp_tokenizer=ltp_tokenizer, bert_tokenizer=bert_tokenizer)
# chunksize8,就是数据分成8份
ref_ids_nest = list(tqdm(p.imap(partial_clean, newdata, chunksize=8), desc="开始处理数据"))
ref_ids = [ref for nest in ref_ids_nest for ref in nest]
#保存映射关系
with open(args.save_path, "w", encoding="utf-8") as f:
data = [json.dumps(ref) + "\n" for ref in ref_ids]
f.writelines(data)
print(f"保存所有{len(data)}条数据的映射关系到文件{args.save_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="准备中文参考")
parser.add_argument(
"--file_name",
type=str,
default="data/demo.txt",
help="需要处理的文件,例如训练数据, 每行一个文本",
)
parser.add_argument(
"--ltp", type=str, default="small", help="LTP tokenizer模型, 使用小型模型,那么,写成small即可"
)
parser.add_argument("--bert", type=str, default="bert-base-chinese", help="Bert的tokenizer模型")
parser.add_argument("--save_path", type=str, default="data/ref.txt", help="输出保存参考的位置")
args = parser.parse_args()
#并行处理数
# args.processes = 4
main(args)
# thread_main(args)
```
#### File: myexample3/metric/repair.py
```python
import datasets
_CITATION = """\
@InProceedings{huggingface:metric,
title = {repair test},
authors={johnson
},
year={2020}
}
"""
_DESCRIPTION = """\
repair metric
"""
_KWARGS_DESCRIPTION = """
这里是描述函数,使用某些分数计算在给出某些参考的情况下预测的效果如何
Args:
predictions: predictions的score 列表。 每个predictions都应该是一个带有用空格分隔的token的字符串。
references: 每个预测的参考列表。 每个引用应该是带有用空格分隔的token的字符串。
Returns:
accuracy: first score的描述,
another_score: 另外一个score的描述
"""
#自定义一些变量,如果需要的话
# BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
def cal_matthews_corrcoef(references, predictions):
from sklearn.metrics import matthews_corrcoef
return matthews_corrcoef(references, predictions)
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
from sklearn.metrics import f1_score
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"accuracy": acc,
"f1": f1,
}
def pearson_and_spearman(preds, labels):
from scipy.stats import pearsonr, spearmanr
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
class SmoothMetric(datasets.Metric):
"""metric的描述"""
def _info(self):
# 会作为 datasets.MetricInfo 的信息
return datasets.MetricInfo(
# 这是将在metric页面上显示的描述。
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
# 定义预测和真实标签的格式, 注意预测时的标签格式,一般为int格式, 如果是回归模型为float32
features=datasets.Features({
'predictions': datasets.Value("int64"),
'references': datasets.Value("int64"),
}),
homepage="http://metric.homepage",
#其它介绍信息
codebase_urls=["http://github.com/path/to/codebase/of/new_metric"],
reference_urls=["http://path.to.reference.url/new_metric"]
)
def _download_and_prepare(self, dl_manager):
"""如果需要的话,下载外部资源,不需要设置为pass"""
# TODO: Download external resources if needed
pass
# bad_words_path = dl_manager.download_and_extract(BAD_WORDS_URL)
# self.bad_words = set([w.strip() for w in open(bad_words_path, "r", encoding="utf-8")])
def _compute(self, predictions, references):
"""
计算指标返回, score计算, 可以根据不同的name,返回不同的计算方法,例如
if self.config_name == "cola":
return {"matthews_correlation": cal_matthews_corrcoef(references, predictions)}
elif self.config_name == "stsb":
return pearson_and_spearman(predictions, references)
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_f1(predictions, references)
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(predictions, references)}
Args:
predictions: 模型的预测值
references: 真实值
Returns:
"""
# 可以返回不同的score, 例如计算准确率
accuracy = sum(i == j for i, j in zip(predictions, references)) / len(predictions)
# 计算其它score,如果需要的话, 返回时,也返回second_score就可以了
# if self.config_name == "max":
# second_score = max(abs(len(i) - len(j)) for i, j in zip(predictions, references) if i not in self.bad_words)
return {
"accuracy": accuracy,
}
```
#### File: myexample3/utils/gen_smooth_data.py
```python
from pathlib import Path
import re
import random, os
import json
from tqdm import tqdm
from collections import Counter
def search_and_read(path='/opt/nlp/pycorrect'):
data = []
for fpath in Path(path).rglob('*.mix'):
with open(fpath) as f:
for line in f:
#匹配英文,并且,单词数大于10
if len(line.split()) > 20 and not re.search("[\u4e00-\u9fa5]", line):
# 按逗号和句号分隔空格分隔
line_split = re.split('[,.]', line)
for sentence in line_split:
if len(sentence) >10:
#按空格分隔
tokens = sentence.split()
if len(tokens) > 4:
#至少这个句子包含5个词
data.append(tokens)
cnt = Counter([len(d) for d in data])
print(f"生成文件行数{len(data)}, 长度最多的个数是{cnt.most_common(1)[0][1]},长度是{cnt.most_common(1)[0][0]}, 样本的长度规则是{cnt}")
return data
def split_train_test(data):
"""
生成正负样本,保存到json文件, 生成8:1:1的样本,保存到train.json, test.json, dev.json
Args:
data: [txt1,txt2,txt3,...]
Returns:
"""
# 正样本样本组成[[sen1,sen2,yes]... ]
positive = []
negative = []
dir_path = "../dataset"
dev_file = os.path.join(dir_path, "dev.json")
test_file = os.path.join(dir_path,"test.json")
train_file = os.path.join(dir_path,"train.json")
# 随机拆分句子为正样本,txt --> sentence1, sentence2
# 随机拆分句子为负样本, txt1, txt2 ---> sentence1(from txt1), sentence2(from txt2)
def gen_neg_smaple(data):
"""
Args:
data: 所有样本
Returns:返回一个句子的的随机部分,作为负样本
"""
# 负样本,随机组合负样本
neg_one = random.choice(data)
neg_len = len(neg_one)
neg_split = random.randrange(1, neg_len - 1)
neg_half_sentence = " ".join(neg_one[neg_split:])
return neg_half_sentence
for one in tqdm(data, desc="样本生成中: "):
#正样本
pos_len = len(one)
#随机取一个分隔点
pos_split = random.randrange(1,pos_len-1)
neg_half_sentence1 = gen_neg_smaple(data)
neg_half_sentence2 = gen_neg_smaple(data)
positive.append([" ".join(one[:pos_split]), " ".join(one[pos_split:]), "yes"])
negative.append([neg_half_sentence1, neg_half_sentence2, "no"])
print(f"正样本数{len(positive)}, 负样本数{len(negative)}")
examples = positive + negative
random.shuffle(examples)
#拆分样本
total = len(examples)
train_num = int(total*0.8)
test_num = int(total*0.1)
train_data = examples[:train_num]
test_data = examples[train_num:train_num+test_num]
dev_data = examples[train_num+test_num:]
with open(train_file, 'w') as f:
json.dump(train_data, f)
with open(dev_file, 'w') as f:
json.dump(dev_data, f)
with open(test_file, 'w') as f:
json.dump(test_data, f)
print(f"训练集{train_num}, 测试集{test_num}, 开发集{total-train_num-test_num}")
if __name__ == '__main__':
data = search_and_read()
split_train_test(data)
```
#### File: transformers/myexample/conll2003_run_ner.py
```python
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
from datasets import ClassLabel, load_dataset
from seqeval.metrics import accuracy_score, f1_score, precision_score, recall_score
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorForTokenClassification,
HfArgumentParser,
PreTrainedTokenizerFast,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
task_name: Optional[str] = field(default="ner", metadata={"help": "The name of the task (ner, pos...)."})
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a csv or JSON file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
},
)
label_all_tokens: bool = field(
default=False,
metadata={
"help": "Whether to put the label for one word on all tokens of generated by that word or just on the "
"one (in which case the other tokens will have a padding index)."
},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
self.task_name = self.task_name.lower()
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# 如果我们仅将一个参数传递给脚本,并且它是json文件的路径,对其进行解析以获取参数。
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
#检查output文件夹
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty."
"Use --overwrite_output_dir to overcome."
)
# 日志
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN,
)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# 自动下载数据集
if data_args.dataset_name is not None:
# 下载数据集并加载
conll_path = 'data/conll2003.py'
# conll_path = '/Users/admin/.cache/huggingface/datasets/5bbfb41c2d18b5da104592eb8efc82f70e1be1f17dd6d3c9a2beb1d9280e88b9.53af5bfc09eda28c03c3ba490c11c1bd6dd3d86633304d9f12406554943f2bb0.py'
# data_dir = '/Users/admin/.cache/huggingface/datasets/conll2003/conll2003/1.0.0/63ba56944e35c1943434322a07ceefd79864672041b7834583709af4a5de4664'
# datasets = load_dataset(path=data_args.dataset_name, name=data_args.dataset_config_name)
datasets = load_dataset(path=conll_path, name=data_args.dataset_config_name)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.train_file.split(".")[-1]
datasets = load_dataset(extension, data_files=data_files)
#使用数据
if training_args.do_train:
column_names = datasets["train"].column_names
features = datasets["train"].features
else:
column_names = datasets["validation"].column_names
features = datasets["validation"].features
# 使用哪个column作为text, column_names: ['chunk_tags', 'id', 'ner_tags', 'pos_tags', 'tokens']
text_column_name = "tokens" if "tokens" in column_names else column_names[0]
# 哪列作为label,这里是'ner_tags'
label_column_name = (
f"{data_args.task_name}_tags" if f"{data_args.task_name}_tags" in column_names else column_names[1]
)
#如果labels不是`Sequence [ClassLabel]`,我们将需要遍历数据集以获得唯一标签。
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = unique_labels | set(label)
label_list = list(unique_labels)
label_list.sort()
return label_list
# 看一下label的feautre是不是ClassLabel类型,已经定制好的
if isinstance(features[label_column_name].feature, ClassLabel):
# label_list: ['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC', 'B-MISC', 'I-MISC']
label_list = features[label_column_name].feature.names
# 由名称变成id格式的字典
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = get_label_list(datasets["train"][label_column_name])
label_to_id = {l: i for i, l in enumerate(label_list)}
num_labels = len(label_list)
# 开始加载预训练模型和tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
# 加载模型配置
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name, #ner
cache_dir=model_args.cache_dir, #None
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=True,
)
model = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
# 这个只能用fast tokenizer, Tokenizer check
if not isinstance(tokenizer, PreTrainedTokenizerFast):
raise ValueError(
"This example script only works for models that have a fast tokenizer. Checkout the big table of models "
"at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this "
"requirement"
)
# 数据预处理
# Padding 策略
padding = "max_length" if data_args.pad_to_max_length else False
# Tokenize所有文本并将label与它们对齐。
def tokenize_and_align_labels(examples):
"""
datasets.map函数处理时会调用
Args:
examples: 这里是2条样本,
例如: examples = {dict: 5}
'chunk_tags' = {list: 2} [[11, 21, 11, 12, 21, 22, 11, 12, 0], [11, 12]]
'id' = {list: 2} ['0', '1']
'ner_tags' = {list: 2} [[3, 0, 7, 0, 0, 0, 7, 0, 0], [1, 2]]
'pos_tags' = {list: 2} [[22, 42, 16, 21, 35, 37, 16, 21, 7], [22, 22]]
'tokens' = {list: 2} [['EU', 'rejects', 'German', 'call', 'to', 'boycott', 'British', 'lamb', '.'], ['Peter', 'Blackburn']]
Returns:
"""
# 对单条样本的examples的tokens字段,即文本字段进行tokenizer
tokenized_inputs = tokenizer(
examples[text_column_name],
padding=padding,
truncation=True,
# 我们使用此参数是因为数据集中的文本是单词列表(每个单词带有标签)
is_split_into_words=True,
)
labels = []
for i, label in enumerate(examples[label_column_name]):
# 对每条样本进行处理, label 是列表[3, 0, 7, 0, 0, 0, 7, 0, 0]
word_ids = tokenized_inputs.word_ids(batch_index=i)
# word_ids: [None, 0, 1, 2, 3, 4, 5, 6, 7, 8, None]
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
# 特殊token的单词ID为None的。 我们将label设置为-100,以便在损失函数中自动将其忽略
if word_idx is None:
label_ids.append(-100)
# 我们为每个单词的第一个token设置label。
elif word_idx != previous_word_idx:
label_ids.append(label_to_id[label[word_idx]])
# 对于单词中的其他token,我们根据label_all_tokens标志将label设置为当前label或-100。
# 这里是对token中不是ner的部分,给label,默认给的-100
else:
label_ids.append(label_to_id[label[word_idx]] if data_args.label_all_tokens else -100)
previous_word_idx = word_idx
labels.append(label_ids)
# 最终labels是一个列表
# {list: 11}[-100, 3, 0, 7, 0, 0, 0, 7, 0, 0, -100]
# {list: 4}[-100, 1, 2, -100]
tokenized_inputs["labels"] = labels
return tokenized_inputs
#处理数据,用map函数
tokenized_datasets = datasets.map(
tokenize_and_align_labels,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
# Data collator, 在for循环dataloader时调用
data_collator = DataCollatorForTokenClassification(tokenizer)
# 计算metrics
def compute_metrics(p):
predictions, labels = p
predictions = np.argmax(predictions, axis=2)
# Remove ignored index (special tokens)
true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
true_labels = [
[label_list[l] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
return {
"accuracy_score": accuracy_score(true_labels, true_predictions),
"precision": precision_score(true_labels, true_predictions),
"recall": recall_score(true_labels, true_predictions),
"f1": f1_score(true_labels, true_predictions),
}
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_datasets["train"] if training_args.do_train else None,
eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
# 这里model_path是用来是否继续训练的,恢复训练
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
)
trainer.save_model() # Saves the tokenizer too for easy upload
#评估模型
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
results = trainer.evaluate()
output_eval_file = os.path.join(training_args.output_dir, "eval_results_ner.txt")
if trainer.is_world_process_zero():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key, value in results.items():
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
# Predict
if training_args.do_predict:
logger.info("*** Predict ***")
test_dataset = tokenized_datasets["test"]
predictions, labels, metrics = trainer.predict(test_dataset)
predictions = np.argmax(predictions, axis=2)
# Remove ignored index (special tokens)
true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
output_test_results_file = os.path.join(training_args.output_dir, "test_results.txt")
if trainer.is_world_process_zero():
with open(output_test_results_file, "w") as writer:
for key, value in metrics.items():
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
# Save predictions
output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt")
if trainer.is_world_process_zero():
with open(output_test_predictions_file, "w") as writer:
for prediction in true_predictions:
writer.write(" ".join(prediction) + "\n")
return results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
``` |
{
"source": "johnson7788/webinfo-crawler",
"score": 2
} |
#### File: webinfo-crawler/qichacha/crawler.py
```python
import logging as log
from qichacha.client import QichachaClient
from qichacha.manager import QichachaManager
from db.model.model import Company
from db.mysql_connector import insert as save
# 企查查客户端
qcc_client = QichachaClient()
manager = QichachaManager()
def start():
keywords = globals().get('keywords')
for keyword in keywords:
raw_companies = qcc_client.search(keyword)
cost_time = 2 * raw_companies.__len__() + 4
log.info('正在处理爬取[%s],大概需要%s秒' % (keyword, cost_time))
# company对象
company = Company()
for raw_company in raw_companies:
company.keyword = keyword
# 组装公司信息
manager.assembly(company, raw_company)
raw_company_detail = qcc_client.search_detail(raw_company.get('KeyNo'))
# 补充公司详细信息
manager.assembly_detail(company, raw_company_detail)
# 保存到数据库
# save(company.__dict__)
log.info(company)
company.clear()
log.info('completed')
def load_keys(keys: list):
globals().setdefault('keywords', keys)
```
#### File: webinfo-crawler/tianyancha/manager.py
```python
from db.model.model import Company
class TianyanchaManager(object):
@classmethod
def assembly(cls, company: Company, raw_company: dict):
company.name = raw_company.get('name', '-').replace('<em>', '').replace('</em>', '')
company.representative = raw_company.get('legalPersonName', '-')
company.address = raw_company.get('regLocation', '-')
company.region = raw_company.get('base', '-')
company.city = raw_company.get('city', '-')
company.district = raw_company.get('district', '-')
company.biz_status = raw_company.get('regStatus', '-')
company.credit_code = raw_company.get('creditCode', '-')
company.email = raw_company.get('emails', ['-']).split(';')[0].replace('\t', '')
company.phone = raw_company.get('phoneNum', '-')
company.biz_scope = raw_company.get('businessScope', '-')
company.company_type = raw_company.get('companyOrgType', '-').replace('\t', '')
company.taxpayer_code = raw_company.get('creditCode', '-')
company.registered_capital = raw_company.get('regCapital', '-')
company.lat_long = str({
'lat': raw_company.get('latitude', '-'),
'long': raw_company.get('longitude', '-')
})
company.setup_time = raw_company.get('estiblishTime', '-')[0:10]
@classmethod
def assembly_detail(cls, company: Company, raw_company_detail: dict):
company.homepage = raw_company_detail.get('websiteList', '-')
company.register_code = raw_company_detail.get('regNumber', '-')
company.organization_code = raw_company_detail.get('orgNumber', '-')
company.english_name = raw_company_detail.get('property3', '-')
company.authorization = raw_company_detail.get('regInstitute', '-')
company.actual_capital = raw_company_detail.get('actualCapital', '缺省')
company.industry = raw_company_detail.get('industry', '-')
company.used_name = raw_company_detail.get('historyNames', '-')
``` |
{
"source": "johnson880319/Software",
"score": 3
} |
#### File: include/anti_instagram/AntiInstagram.py
```python
from .kmeans import getparameters2, identifyColors, runKMeans
from .scale_and_shift import scaleandshift
from anti_instagram.kmeans import CENTERS, CENTERS2
import numpy as np
from duckietown_utils import logger
def calculate_transform(image):
"""
Returns tuple (bool, float, parameters)
success, health, parameters
parameters['scale']
parameters['shift']
"""
centers4 = CENTERS2
trained4, counter4, score4 = runKMeans(image, num_colors=4, init=centers4)
trained4 = trained4[[0, 2, 3], :]
counter4 = [counter4[0], counter4[2], counter4[3]]
centers4 = centers4[[0, 2, 3], :]
centers3 = CENTERS
trained3, counter3,score3 = runKMeans(image, num_colors=3, init=centers3)
decision34=((score3+3e7)>score4)>score4;
if (decision34):
logger.info("picked 3 colors")
trained=trained3
counter=counter3
centers=centers3
else:
logger.info("picked 4 colors")
trained=trained4
counter=counter4
centers=centers4
# milansc: for debug reasons
print('Score for 3 colors: ' + str(score3))
print('Score for 4 colors: ' + str(score4))
print('...in calculate_transform: found centers:')
print(centers)
print('...in calculate_transform: found counter:')
print(counter)
print('...in calculate_transform: found trained centers:')
print(trained)
mapping = identifyColors(trained, centers)
r, g, b, cost = getparameters2(mapping, trained, counter, centers)
if r[0][0] == 0.0:
# XXX: not sure what this is supposed to be
return False, 0.0, None
scale = np.array([r[0][0][0],g[0][0][0],b[0][0][0]])
shift = np.array([r[1][0], g[1][0],b[1][0]])
eps = np.finfo('double').eps
health = 1.0 / (cost + eps)
parameters = dict(scale=scale, shift=shift)
return True, float(health), parameters
# # Estimates the scale and shift over multiple frame via an IIR filter with preference towards low-cost frames
# IIR_weight=1000/(10000+cost)
# #logger.info("cost = %f, IIR_weight = %f" % (cost, IIR_weight))
# # self.scale = [r[0][0][0],g[0][0][0],b[0][0][0]]
# # self.shift = [r[1][0], g[1][0],b[1][0]]
# deltascale = np.array([r[0][0][0],g[0][0][0],b[0][0][0]])
# deltashift = np.array([r[1][0], g[1][0],b[1][0]])
# if testframe:
# self.scale = deltascale
# self.shift = deltashift
# else:
# self.scale = (self.scale+deltascale*IIR_weight)/(1+IIR_weight)
# self.shift = (self.shift+deltashift*IIR_weight)/(1+IIR_weight)
class ScaleAndShift():
""" Represents the transformation """
def __init__(self, scale, shift):
self.scale = scale
self.shift = shift
def __call__(self, image):
corrected_image = scaleandshift(image, self.scale, self.shift)
return corrected_image
@staticmethod
def identity():
return ScaleAndShift([1.0,1.0,1.0], [0.0,0.0,0.0])
class AntiInstagram():
def __init__(self):
self.scale = [1.0, 1.0, 1.0]
self.shift = [0.0, 0.0, 0.0]
self.health = 0
def applyTransform(self, image):
corrected_image = scaleandshift(image, self.scale, self.shift)
return corrected_image
def calculateTransform(self, image, testframe=False):
success, self.health, parameters = calculate_transform(image)
if not success:
raise Exception('calculate_transform failed')
self.scale = parameters['scale']
self.shift = parameters['shift']
def calculateHealth(self):
return self.health
```
#### File: include/anti_instagram/kmeans_rebuild.py
```python
from anti_instagram.calcLstsqTransform import calcTransform
import sys
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import argparse
from matplotlib.patches import Rectangle
from sklearn.cluster import KMeans
from collections import Counter
from anti_instagram.geom import processGeom2
import math
CENTERS_BRYW = np.array([[60, 60, 60], [60, 60, 240], [50, 240, 240], [240, 240, 240]]);
CENTERS_BYW = np.array([[60, 60, 60], [50, 240, 240], [240, 240, 240]])
class kMeansClass:
""" This class gives the ability to use the kMeans alg. with different numbers of initial centers """
input_image = []
resized_image = []
blurred_image = []
image_array = []
num_centers = -1
blur_alg = []
fac_resize = -1
blur_kernel = -1
trained_centers = []
labels = []
labelcount = Counter()
color_array = []
color_image_array = []
# initialize
def __init__(self, numCenters, blurAlg, resize, blurKer):
self.input_image = None
self.num_centers = int(numCenters)
self.blur_alg = blurAlg
self.fac_resize = float(resize)
self.blur_kernel = int(blurKer)
# set up array for center colors
self.color_image_array = np.zeros((self.num_centers, 200, 200, 3), np.uint8)
# re-shape input image for kMeans
def _getimgdatapts(self, cv2img, fancyGeom=False):
x, y, p = cv2img.shape
if not fancyGeom:
img_geom = cv2img[int(x * 0.3):(x - 1), :, :]
x_new, y_new, p = img_geom.shape
cv2_tpose = img_geom.transpose()
cv2_arr_tpose = np.reshape(cv2_tpose, [p, x_new * y_new])
else:
mask = processGeom2(cv2img)
img_geom = np.expand_dims(mask, axis=-1)*cv2img
mask = mask.transpose()
inds = np.array(np.nonzero(mask))
cv2_tpose = np.transpose(img_geom)
cv2_arr_tpose = cv2_tpose[:, inds[0, :], inds[1, :]]
npdata = np.transpose(cv2_arr_tpose)
return npdata
def _blurImg(self):
# blur image using median:
if self.blur_alg == 'median':
self.blurred_image = cv2.medianBlur(self.resized_image, self.blur_kernel)
# blur image using gaussian:
elif self.blur_alg == 'gaussian':
self.blurred_image = cv2.GaussianBlur(self.resized_image, (self.blur_kernel, self.blur_kernel), 0)
else:
self.blurred_image = self.resized_image
# apply kMeans alg
def applyKM(self, img, fancyGeom=False):
self.input_image = img
# resize image
self.resized_image = cv2.resize(self.input_image, (0, 0), fx=self.fac_resize, fy=self.fac_resize)
# blur image
self._blurImg()
# prepare KMeans
kmc = KMeans(n_clusters=self.num_centers, init='k-means++', max_iter=20)
# prepare data points
self.image_array = self._getimgdatapts(self.blurred_image, fancyGeom=fancyGeom)
# run KMeans
kmc.fit(self.image_array)
# get centers, labels and labelcount from KMeans
self.trained_centers = kmc.cluster_centers_
self.labels = kmc.labels_
for i in np.arange(self.num_centers):
self.labelcount[i] = np.sum(self.labels == i)
def determineColor(self, withRed, trained_centers):
# define the true centers. This color is preset. The color transformation
# tries to transform a picture such that the black areas will become true black.
# The same applies for yellow, white and (if valid) red.
trueBlack = [60, 60, 60]
trueYellow = [50, 240, 240]
trueWhite = [240, 240, 240]
if (withRed):
trueRed = [60, 60, 240]
# initialize arrays which save the errors to each true center
# later the minimal error cluster center will be defined as this color
errorBlack = np.zeros(self.num_centers)
errorYellow = np.zeros(self.num_centers)
errorWhite = np.zeros(self.num_centers)
if (withRed):
errorRed = np.zeros(self.num_centers)
# determine the error for each trained cluster center to all true centers
for i in range(self.num_centers):
errorBlack[i] = np.linalg.norm(trueBlack - trained_centers[i])
errorYellow[i] = np.linalg.norm(trueYellow - trained_centers[i])
errorWhite[i] = np.linalg.norm(trueWhite - trained_centers[i])
if (withRed):
errorRed[i] = np.linalg.norm(trueRed - trained_centers[i])
nTrueCenters = 3
# sort the error arrays and save the corresponding index of the original array
# in the following array. This allows us to determine the index of the cluster.
errorBlackSortedIdx = np.argsort(errorBlack)
errorYellowSortedIdx = np.argsort(errorYellow)
errorWhiteSortedIdx = np.argsort(errorWhite)
# errorSorted = np.vstack([errorBlack, errorWhite, errorYellow])
if (withRed):
errorRedSortedIdx = np.argsort(errorRed)
#errorSorted = np.vstack((errorSorted,errorRed))
if (withRed):
nTrueCenters = 4
ListOfIndices = []
# boolean variables to determine whether the minimal error index has been found
blackIdxFound = False
whiteIdxFound = False
yellowIdxFound = False
if (withRed):
redIdxFound = False
centersFound = False
index = 0
#w,h = errorSorted.shape
#errorList = np.reshape(errorSorted,(w*h))
# find for every true center the corresponding trained center.
# this code considers the global minimum for assigning clusters,
# instead of assigning first black, then white, yellow and red
while (not centersFound):
if errorBlackSortedIdx[index] not in ListOfIndices and not blackIdxFound:
ListOfIndices.append(errorBlackSortedIdx[index])
# print str(index) + " in black " + str(ListOfIndices)
blackIdxFound = True
idxBlack = errorBlackSortedIdx[index]
if errorWhiteSortedIdx[index] not in ListOfIndices and not whiteIdxFound:
ListOfIndices.append(errorWhiteSortedIdx[index])
# print str(index) + " in white " + str(ListOfIndices)
whiteIdxFound = True
idxWhite = errorWhiteSortedIdx[index]
if errorYellowSortedIdx[index] not in ListOfIndices and not yellowIdxFound:
ListOfIndices.append(errorYellowSortedIdx[index])
# print str(index) + " in yellow " + str(ListOfIndices)
yellowIdxFound = True
idxYellow = errorYellowSortedIdx[index]
if withRed:
if errorRedSortedIdx[index] not in ListOfIndices and not redIdxFound:
ListOfIndices.append(errorRedSortedIdx[index])
redIdxFound = True
# print str(index) + "in red" + str(ListOfIndices)
idxRed = errorRedSortedIdx[index]
# print "True?: " + str(redIdxFound) + str(yellowIdxFound) + str(whiteIdxFound) + str(blackIdxFound)
centersFound = blackIdxFound and whiteIdxFound and yellowIdxFound and redIdxFound
# print "centersFound: " + str(centersFound)
else:
centersFound = blackIdxFound and whiteIdxFound and yellowIdxFound
index = index + 1
# print "End of while loop. Index: " + str(index)
# return the minimal error indices for the trained centers.
if (withRed):
return idxBlack, idxRed, idxYellow, idxWhite,
else:
return idxBlack, idxYellow, idxWhite
def detectOutlier(self, trainedCenters, trueCenters): # YWRB
n_centers, n_channels = trainedCenters.shape
# trueCenters = np.vstack([[50, 240, 240], [240, 240, 240], [60, 60, 240], [60, 60, 60]]) # YWRB
# print n_centers
errors = np.zeros(n_centers)
errorArrayTotal = np.zeros(n_centers)
# leave one trained center out and estimate transform with the rest of the centers
for i in range(n_centers):
# leave out i-th trained center
trainedCentersTemp = np.vstack([trainedCenters[0:i, :], trainedCenters[i + 1:n_centers, :]])
trueCenterstemp = np.vstack([trueCenters[0:i, :], trueCenters[i + 1:n_centers, :]])
# calculate transform with the other centers
T = calcTransform(n_centers - 1, trainedCentersTemp, trueCenterstemp)
T.calcTransform()
# print "the transform is: shift - " + str(T.shift) + ", scale - " + str(T.scale)
# print "left out " + str(leaveOut) + ", new centers: " + str(tempArray)
# transformedCenters = np.zeros((tempArray.shape))
errorArray = np.zeros(n_centers)
# estimate the error of the transformed trained centers wrt. the true centers
for j in range(n_centers):
tempTrafoCenter = self.transformOneCenter(trainedCenters[j, :], T.shift, T.scale)
tempTrueCenter = trueCenters[j]
errorArray[j] = self.estimateError(tempTrafoCenter, tempTrueCenter)
errorArrayTotal[i] = np.sum(errorArray)
# print "error of trafo: " + str(errorArrayTotal[i])
errorArraySortedIdx = np.argsort(errorArrayTotal)
averageError = np.average(errorArrayTotal[1:n_centers])
# print(averageError)
return errorArraySortedIdx[0], [errorArraySortedIdx[0]], averageError # return first element.
# this set of centers leads to the lowest error and the left out center is therefore the outlier.
def estimateError(self, trained_center, truecenter):
return np.linalg.norm(truecenter - trained_center)
def transformOneCenter(self, center, shift, scale):
center = np.array(center)
shift = np.array(shift)
scale = np.array(scale)
transformedCenter = np.zeros(center.shape)
n_channels, = center.shape
# print n_channels
for i in range(n_channels):
transformedCenter[i] = center[i] * scale[i] + shift[i]
return transformedCenter
```
#### File: camera_rectifier/src/camera_rectifier_node.py
```python
import rospy
from camera_image_rectifier import CameraImageRectifier
from duckietown_utils import get_base_name, rgb_from_ros
from sensor_msgs.msg import CompressedImage
class CameraRectifierNode(object):
def __init__(self):
self.node_name = "Camera Rectifier Node"
robot_name = rospy.get_param("~robot_name")
self.image_rectifier = CameraImageRectifier(robot_name)
# Create a Publisher
self.pub_topic = '/{}/camera_rectifier/image/compressed'.format(
robot_name)
self.publisher = rospy.Publisher(
self.pub_topic, CompressedImage, queue_size=1)
# Create a Subscriber
self.sub_topic = '/{}/camera_node/image/compressed'.format(robot_name)
self.subscriber = rospy.Subscriber(
self.sub_topic, CompressedImage, self.callback)
def callback(self, image):
rectified_img = CompressedImage()
rectified_img.header.stamp = image.header.stamp
rectified_img.format = "jpeg"
rectified_img.data = self.image_rectifier.process_image(
rgb_from_ros(image))
# publish new message
self.publisher.publish(rectified_img)
if __name__ == '__main__':
rospy.init_node('camera_rectifier_node', anonymous=False)
camera_rectifier_node = CameraRectifierNode()
rospy.spin()
```
#### File: anti_instagram/sandbox/error_estimation.py
```python
import sys
import os
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
import cv2
import numpy as np
import datetime
class estimateError:
""" This class evaluates the quality of an image, which was transformed by a certain color transformation """
# contains image to be analysed
image = []
out_image = []
# hardcoded colors
black = [60, 60, 60]
red = [60, 60, 240]
yellow = [50, 240, 240]
white = [240, 240, 240]
# container for the polygons
# TODO possibility to add more than one polygon per color
polygon_black = Polygon()
polygon_white = Polygon()
polygon_yellow = Polygon()
polygon_red = Polygon()
# containers for pixels within polygons
pix_black = []
pix_white = []
pix_yellow = []
pix_red = []
# create instance with an image
def __init__(self, input_image):
self.image = input_image
self.out_image = input_image
print('created instance of estimateError!')
def printVertices(self, polygon):
# print black lines
cv2.line(self.out_image,
(int(polygon.exterior.coords[0][0]), int(polygon.exterior.coords[0][1])),
(int(polygon.exterior.coords[1][0]), int(polygon.exterior.coords[1][1])),
(0, 0, 0))
cv2.line(self.out_image,
(int(polygon.exterior.coords[1][0]), int(polygon.exterior.coords[1][1])),
(int(polygon.exterior.coords[2][0]), int(polygon.exterior.coords[2][1])),
(0, 0, 0))
cv2.line(self.out_image,
(int(polygon.exterior.coords[2][0]), int(polygon.exterior.coords[2][1])),
(int(polygon.exterior.coords[3][0]), int(polygon.exterior.coords[3][1])),
(0, 0, 0))
cv2.line(self.out_image,
(int(polygon.exterior.coords[3][0]), int(polygon.exterior.coords[3][1])),
(int(polygon.exterior.coords[0][0]), int(polygon.exterior.coords[0][1])),
(0, 0, 0))
def GetErrorEstimation(self):
height, width = self.image.shape[:2]
print(self.image.shape[:2])
# loop over image
for j in range(width):
for i in range(height):
point = Point(j, i)
# check if current pixel is within black polygon
if(self.polygon_black.contains(point)):
# store indices to pixel
self.pix_black.append(self.image[i,j,:])
# check if current pixel is within white polygon
elif (self.polygon_white.contains(point)):
# store indices to pixel
self.pix_white.append(self.image[i,j,:])
# check if current pixel is within yellow polygon
elif (self.polygon_yellow.contains(point)):
# store indices to pixel
self.pix_yellow.append(self.image[i,j,:])
# check if current pixel is within red polygon
elif (self.polygon_red.contains(point)):
# store indices to pixel
self.pix_red.append(self.image[i,j,:])
print('black len = ' + str(len(self.pix_black)))
print('white len = ' + str(len(self.pix_white)))
print('yellow len = ' + str(len(self.pix_yellow)))
print('red len = ' + str(len(self.pix_red)))
average_black = np.average(self.pix_black, axis=0)
average_white = np.average(self.pix_white, axis=0)
average_yellow = np.average(self.pix_yellow, axis=0)
average_red = np.average(self.pix_red, axis=0)
variance_black = np.var(self.pix_black, axis=0)
variance_white = np.var(self.pix_white, axis=0)
variance_yellow = np.var(self.pix_yellow, axis=0)
variance_red = np.var(self.pix_red, axis=0)
dist_black = np.linalg.norm(average_black - self.black)
dist_white = np.linalg.norm(average_white - self.white)
dist_yellow = np.linalg.norm(average_yellow - self.yellow)
dist_red = np.linalg.norm(average_red - self.red)
print(' ')
print('summary:')
print(' ')
print(' ')
print('BLACK:')
print('average = ' + str(average_black))
print('distance = ' + str(dist_black))
print('variance = ' + str(variance_black))
print(' ')
print('WHITE')
print('average = ' + str(average_white))
print('distance = ' + str(dist_white))
print('variance = ' + str(variance_white))
print(' ')
print('YELLOW')
print('average = ' + str(average_yellow))
print('distance = ' + str(dist_yellow))
print('variance = ' + str(variance_yellow))
print(' ')
print('RED')
print('average = ' + str(average_red))
print('distance = ' + str(dist_red))
print('variance red = ' + str(variance_red))
self.printVertices(self.polygon_black)
self.printVertices(self.polygon_white)
self.printVertices(self.polygon_yellow)
self.printVertices(self.polygon_red)
cv2.imshow('polygons', self.out_image)
cv2.waitKey(0)
# this function takes a dictionary containing the polygons for the colors and creates the internal polygons:
# input:
# - polygons: dictionary {'black' : [], 'white' : [], 'yellow' : [], 'red' : []}
def createPolygon(self, polygons):
self.polygon_black = Polygon(polygons['black'])
self.polygon_white = Polygon(polygons['white'])
self.polygon_yellow = Polygon(polygons['yellow'])
self.polygon_red = Polygon(polygons['red'])
print('Created Polygons with the following vertices:')
print('black: ' + str(self.polygon_black))
print('white: ' + str(self.polygon_white))
print('yellow: ' + str(self.polygon_yellow))
print('red: ' + str(self.polygon_red))
#def createPolygonByClick(self):
# the main function takes an image as argument, and calculates the estimated error
def main():
# check number of arguments
if len(sys.argv) != 3:
print('This program takes an image file and an output directory as input.')
sys.exit(2)
# store input
file = sys.argv[1]
outdir = sys.argv[2]
# check if file exists
if not os.path.isfile(file):
print('file not found')
sys.exit(2)
# check if dir exists, create if not
if not os.path.exists(outdir):
os.makedirs(outdir)
# read the image
img = cv2.imread(file, cv2.IMREAD_UNCHANGED)
"""
# polygons for pic1_smaller.jpg and pic2_smaller.jpg (hardcoded for now)
polygon_black = [(280, 570), (220, 760), (870, 750), (810, 580)]
polygon_white = [(781, 431), (975, 660), (1040, 633), (827, 418)]
polygon_yellow = [(131, 523), (67, 597), (99, 609), (161, 530)]
polygon_red = [(432, 282), (418, 337), (577, 338), (568, 283)]
"""
# polygons for pic3_smaller.jpg and pic2_smaller.jpg (hardcoded for now)
polygon_black = [(280, 570), (220, 760), (870, 750), (810, 580)]
polygon_white = [(900, 520), (1000, 640), (1060, 620), (970, 515)]
polygon_yellow = [(234, 430), (190, 485), (230, 490), (270, 430)]
polygon_red = [(285, 435), (250, 490), (830, 480), (800, 437)]
"""
# polygons for pic4_smaller.jpg (hardcoded for now)
polygon_black = [(316, 414), (215, 623), (783, 605), (673, 422)]
polygon_white = [(710, 388), (947, 656), (1018, 620), (788, 400)]
polygon_yellow = [(148, 474), (94, 537), (133, 542), (184, 475)]
polygon_red = [(285, 435), (250, 490), (830, 480), (800, 437)]
# polygons for pic5_smaller.jpg (hardcoded for now)
polygon_black = [(354, 418), (291, 612), (804, 590), (677, 396)]
polygon_white = [(783, 424), (949, 602), (1002, 564), (840, 420)]
polygon_yellow = [(344, 307), (331, 319), (354, 319), (366, 306)]
polygon_red = [(135, 325), (119, 332), (325, 316), (332, 309)]
"""
# create dictionary containing colors
polygons = {'black': polygon_black, 'white': polygon_white, 'yellow': polygon_yellow, 'red': polygon_red}
# initialize estimateError class
E = estimateError(img)
# set polygons
E.createPolygon(polygons)
# estimate error
E.GetErrorEstimation()
# write the corrected image
date = datetime.datetime.now().strftime("%H-%M-%S")
path = outdir + '/' + str(date) + '_polygon.jpg'
cv2.imwrite(path, E.out_image)
if __name__ == "__main__":
sys.exit(main())
```
#### File: anti_instagram/sandbox/KM_and_transform_pipeline.py
```python
import numpy as np
#from anti_instagram.AntiInstagram import *
from anti_instagram.scale_and_shift import *
from anti_instagram.calcLstsqTransform import *
from anti_instagram.kmeans_rebuild import *
import sys
import os
import matplotlib.pyplot as plt
import argparse
"""
This function does three things:
1) It calls the Kmeans alg. with the desired number of initial centers. Other parameters can be tweaked, see -h option.
2) The function passes the centers trained by the KMeans alg. to the calcTransform function. It then calculates
a transform once based on three centers and once based on four centers (without and with the color red)
3) It takes the transform with the better result (most frequent three centers) and applies the transform on the
input image.
"""
def main():
# define and parse command line arguments
parser = argparse.ArgumentParser(
description='Perform kMeans with n initial centers.')
parser.add_argument('img_path', help='path to the image')
parser.add_argument('n_centers', help='numbers of initial centers')
parser.add_argument('--resize', default='0.1',
help='factor of downsampling the input image. DEFAULT = 0.1')
parser.add_argument('--blur', default='median',
help="blur algorithm. 'median' or 'gaussian. DEFAULT = median")
parser.add_argument('--blur_kernel', default='5',
help='size of kernel for blurring. DEFAULT = 5')
parser.add_argument('--output_dir', default='./output_images',
help='directory for the output images. DEFAULT = ./output_images')
parser.add_argument('--fancyGeom', default=False, action='store_true',
help='use the contour detector to find regions of interest')
args = parser.parse_args()
# check if file exists
if not os.path.isfile(args.img_path):
print('file not found')
sys.exit(2)
# check if dir exists, create if not
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# check resize factor
if (args.resize < 1) or (args.resize <= 0):
print('resize factor between 0 and 1')
sys.exit(2)
# check blur alg
if not (args.blur == "median" or args.blur == "gaussian"):
print('blur alg must be median or gaussian')
sys.exit(2)
# check kernel size
print(args.blur_kernel)
if (int(args.blur_kernel) % 2 == 0):
print('kernel size must be odd')
sys.exit(2)
# create instance of kMeans
KM = kMeansClass(args.img_path, args.n_centers, args.blur, args.resize, args.blur_kernel, args.fancyGeom)
# apply KMeans
KM.applyKM()
# get the indices of the matched centers
idxBlack, idxRed, idxYellow, idxWhite = KM.determineColor(True, KM.trained_centers)
# get centers with red
trained_centers = np.array([KM.trained_centers[idxBlack], KM.trained_centers[idxRed],
KM.trained_centers[idxYellow], KM.trained_centers[idxWhite]])
# get centers w/o red
trained_centers_woRed = np.array([KM.trained_centers[idxBlack], KM.trained_centers[idxYellow],
KM.trained_centers[idxWhite]])
print(trained_centers)
# calculate transform with 4 centers
T4 = calcTransform(4, trained_centers)
T4.calcTransform()
# calculate transform with 3 centers
T3 = calcTransform(3, trained_centers_woRed)
T3.calcTransform()
# compare residuals
# TODO verify if we can compare the residuals like this
if T4.returnResidualNorm() >= T3.returnResidualNorm():
shift = T4.shift
scale = T4.scale
else:
shift = T3.shift
scale = T3.scale
# apply transform
corrected_img = scaleandshift2(KM.input_image, scale, shift)
corrected_image_cv2 = np.clip(
corrected_img, 0, 255).astype(np.uint8)
cv2.namedWindow('corrected', flags=cv2.WINDOW_NORMAL)
cv2.imshow('corrected', corrected_image_cv2)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
sys.exit(main())
```
#### File: anti_instagram/sandbox/KMeansTrialczuidema.py
```python
import sys
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import argparse
from matplotlib.patches import Rectangle
from sklearn.cluster import KMeans
from collections import Counter
import math
from anti_instagram.calcLstsqTransform import calcTransform
from anti_instagram.AntiInstagram import *
from anti_instagram.scale_and_shift import *
# from .scale_and_shift import scaleandshift
# from .scale_and_shift import scaleandshift2
from anti_instagram.simpleColorBalanceClass import *
from colorBalanceKMeans import *
from outlierEstimation import *
class kMeanClass:
""" This class gives the ability to use the kMeans alg. with different numbers of initial centers """
input_image = []
resized_image = []
blurred_image = []
image_array = []
num_centers = -1
blur_alg = []
fac_resize = -1
blur_kernel = -1
trained_centers = []
labels = []
labelcount = Counter()
color_array = []
color_image_array = []
# initialize
def __init__(self, inputImage, numCenters, blurAlg, resize, blurKer):
self.input_image = inputImage
self.num_centers = int(numCenters)
self.blur_alg = blurAlg
self.fac_resize = float(resize)
self.blur_kernel = int(blurKer)
self.shiftB = None
self.shiftG = None
self.shiftR = None
# set up array for center colors
self.color_image_array = np.zeros((self.num_centers, 200, 200, 3), np.uint8)
print('created instance of kMeans with arguments:')
print(' number of centers = ' + str(self.num_centers))
print(' blur algorithm = ' + str(self.blur_alg))
print(' resize factor = ' + str(self.fac_resize))
print(' blurring kernel size = ' + str(self.blur_kernel))
# re-shape input image for kMeans
def _getimgdatapts(self, cv2img):
x, y, p = cv2img.shape
img_geom = cv2img[int(x*0):(x-1), :, :]
x_new, y_new, p = img_geom.shape
cv2_tpose = img_geom.transpose()
cv2_arr_tpose = np.reshape(cv2_tpose, [p, x_new * y_new])
npdata = np.transpose(cv2_arr_tpose)
return npdata
def _blurImg(self):
# blur image using median:
if self.blur_alg == 'median':
self.blurred_image = cv2.medianBlur(self.resized_image, self.blur_kernel)
# blur image using gaussian:
elif self.blur_alg == 'gaussian':
self.blurred_image = cv2.GaussianBlur(self.resized_image, (self.blur_kernel, self.blur_kernel), 0)
def _plotColors(self):
# loop over all centers
for center in np.arange(self.num_centers):
# get color
color_i = tuple(
[self.trained_centers[center, 2], self.trained_centers[center, 1], self.trained_centers[center, 0]])
self.color_array.append(color_i)
self.color_image_array[center, :] = color_i
plotRows = int(math.ceil(self.num_centers / 2.0))
f, axarr = plt.subplots(plotRows, 2)
for row in range(plotRows):
if self.num_centers % 2 == 0:
axarr[row, 0].imshow(self.color_image_array[2 * row])
axarr[row, 0].axis('off')
axarr[row, 0].set_title(str(self.labelcount[2 * row]))
axarr[row, 1].imshow(self.color_image_array[2 * row + 1])
axarr[row, 1].axis('off')
axarr[row, 1].set_title(str(self.labelcount[2 * row + 1]))
else:
if row != plotRows - 1:
axarr[row, 0].imshow(self.color_image_array[2 * row])
axarr[row, 0].axis('off')
axarr[row, 0].set_title(str(self.labelcount[2 * row]))
axarr[row, 1].imshow(self.color_image_array[2 * row + 1])
axarr[row, 1].axis('off')
axarr[row, 1].set_title(str(self.labelcount[2 * row + 1]))
else:
axarr[row, 0].imshow(self.color_image_array[2 * row])
axarr[row, 0].axis('off')
axarr[row, 0].set_title(str(self.labelcount[2 * row]))
axarr[row, 1].axis('off')
print(self.color_array)
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
# apply kMeans alg
def applyKM(self):
# resize image
self.resized_image = cv2.resize(self.input_image, (0, 0), fx=self.fac_resize, fy=self.fac_resize)
print('resized image!')
# blur image
self._blurImg()
print('blurred image!')
# self.blurred_image, self.shiftB, self.shiftG, self.shiftR = blackBalance(self.blurred_image)
# prepare KMeans
kmc = KMeans(n_clusters=self.num_centers, init='k-means++', max_iter=20)
# try out color balance first
# self.blurred_image = simplest_cb(self.blurred_image, 1) # percentages around 1% are normal
cv2.namedWindow('blurred', flags=cv2.WINDOW_NORMAL)
cv2.imshow('blurred', self.blurred_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# prepare data points
self.image_array = self._getimgdatapts(self.blurred_image)
# debug
print(self.image_array.shape)
# run KMeans
kmc.fit(self.image_array)
# get centers, labels and labelcount from KMeans
self.trained_centers = kmc.cluster_centers_
self.labels = kmc.labels_
for i in np.arange(self.num_centers):
self.labelcount[i] = np.sum(self.labels == i)
# plot colors
self._plotColors()
def determineColor(self, withRed, trained_centers):
# define the true centers. This color is preset. The color transformation
# tries to transform a picture such that the black areas will become true black.
# The same applies for yellow, white and (if valid) red.
trueBlack = [60, 60, 60]
if (withRed):
trueRed = [60, 60, 240]
trueYellow = [50, 240, 240]
trueWhite = [240, 240, 240]
errorBlack = np.zeros(self.num_centers)
errorYellow = np.zeros(self.num_centers)
errorWhite = np.zeros(self.num_centers)
if (withRed):
errorRed = np.zeros(self.num_centers)
for i in range(self.num_centers):
print(trained_centers[i])
errorBlack[i] = np.linalg.norm(trueBlack - trained_centers[i])
errorYellow[i] = np.linalg.norm(trueYellow - trained_centers[i])
errorWhite[i] = np.linalg.norm(trueWhite - trained_centers[i])
if (withRed):
errorRed[i] = np.linalg.norm(trueRed - trained_centers[i])
print "black error:" + str(errorBlack)
print "yellow error:" + str(errorYellow)
print "white error:" + str(errorWhite)
print "red error:" + str(errorRed)
nTrueCenters = 3
errorBlackSortedIdx = np.argsort(errorBlack)
errorYellowSortedIdx = np.argsort(errorYellow)
errorWhiteSortedIdx = np.argsort(errorWhite)
if (withRed):
errorRedSortedIdx = np.argsort(errorRed)
if (withRed):
nTrueCenters = 4
ListOfIndices = []
blackIdxFound = False
whiteIdxFound = False
yellowIdxFound = False
if (withRed):
redIdxFound = False
centersFound = False
index = 0
print "errorBlackSortedIdx: " + str(errorBlackSortedIdx)
print "errorYellowSortedIdx: " + str(errorYellowSortedIdx)
print "errorWhiteSortedIdx: " + str(errorWhiteSortedIdx)
print "errorRedSortedIdx: " + str(errorRedSortedIdx)
while (not centersFound):
if errorBlackSortedIdx[index] not in ListOfIndices and not blackIdxFound:
ListOfIndices.append(errorBlackSortedIdx[index])
print str(index) + " in black " + str(ListOfIndices)
blackIdxFound = True
idxBlack = errorBlackSortedIdx[index]
if errorWhiteSortedIdx[index] not in ListOfIndices and not whiteIdxFound:
ListOfIndices.append(errorWhiteSortedIdx[index])
print str(index) + " in white " + str(ListOfIndices)
whiteIdxFound = True
idxWhite = errorWhiteSortedIdx[index]
if errorYellowSortedIdx[index] not in ListOfIndices and not yellowIdxFound:
ListOfIndices.append(errorYellowSortedIdx[index])
print str(index) + " in yellow " + str(ListOfIndices)
yellowIdxFound = True
idxYellow = errorYellowSortedIdx[index]
if withRed:
if errorRedSortedIdx[index] not in ListOfIndices and not redIdxFound:
ListOfIndices.append(errorRedSortedIdx[index])
redIdxFound = True
print str(index) + "in red" + str(ListOfIndices)
idxRed = errorRedSortedIdx[index]
print "True?: " + str(redIdxFound) + str(yellowIdxFound) + str(whiteIdxFound) + str(blackIdxFound)
centersFound = blackIdxFound and whiteIdxFound and yellowIdxFound and redIdxFound
print "centersFound: " + str(centersFound)
else:
centersFound = blackIdxFound and whiteIdxFound and yellowIdxFound
index = index + 1
print "End of while loop. Index: " + str(index)
print idxRed, idxWhite, idxYellow, idxBlack
if (withRed):
return idxBlack, idxRed, idxYellow, idxWhite
else:
return idxBlack, idxYellow, idxWhite
def plotDeterminedCenters(self, centerBlack, centerYellow, centerWhite, centerRed):
tupleBlack = tuple([centerBlack[2], centerBlack[1], centerBlack[0]])
tupleWhite = tuple([centerWhite[2], centerWhite[1], centerWhite[0]])
tupleYellow = tuple([centerYellow[2], centerYellow[1], centerYellow[0]])
tupleRed = tuple([centerRed[2], centerRed[1], centerRed[0]])
imageBlack = np.zeros((200, 200, 3), np.uint8)
imageBlack[:] = tupleBlack
imageWhite = np.zeros((200, 200, 3), np.uint8)
imageWhite[:] = tupleWhite
imageYellow = np.zeros((200, 200, 3), np.uint8)
imageYellow[:] = tupleYellow
imageRed = np.zeros((200, 200, 3), np.uint8)
imageRed[:] = tupleRed
f, axarr = plt.subplots(2, 2)
axarr[0, 0].imshow(imageBlack)
axarr[0, 0].axis('off')
axarr[0, 0].set_title("Black")
axarr[0, 1].imshow(imageWhite)
axarr[0, 1].axis('off')
axarr[0, 1].set_title("White")
axarr[1, 0].imshow(imageYellow)
axarr[1, 0].axis('off')
axarr[1, 0].set_title("Yellow")
axarr[1, 1].imshow(imageRed)
axarr[1, 1].axis('off')
axarr[1, 1].set_title("Red")
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
def main():
# define and parse command line arguments
parser = argparse.ArgumentParser(
description='Perform kMeans with n initial centers.')
parser.add_argument('img_path', help='path to the image')
parser.add_argument('n_centers', help='numbers of initial centers')
parser.add_argument('--resize', default='0.1',
help='factor of downsampling the input image. DEFAULT = 0.1')
parser.add_argument('--blur', default='median',
help="blur algorithm. 'median' or 'gaussian. DEFAULT = median")
parser.add_argument('--blur_kernel', default='5',
help='size of kernel for blurring. DEFAULT = 5')
parser.add_argument('--output_dir', default='./output_images',
help='directory for the output images. DEFAULT = ./output_images')
args = parser.parse_args()
# check if file exists
if not os.path.isfile(args.img_path):
print('file not found')
sys.exit(2)
# check if dir exists, create if not
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# check resize factor
if (args.resize < 1) or (args.resize <= 0):
print('resize factor between 0 and 1')
sys.exit(2)
# check blur alg
if not (args.blur == "median" or args.blur == "gaussian"):
print('blur alg must be median or gaussian')
sys.exit(2)
# check kernel size
print "kernel: " + str(args.blur_kernel)
if (int(args.blur_kernel) % 2 == 0):
print('kernel size must be odd')
sys.exit(2)
# create instance of kMeans
print("all arguments have been read.")
inputImage = cv2.imread(args.img_path, cv2.IMREAD_UNCHANGED)
CB = simpleColorBalanceClass()
CB.thresholdAnalysis(inputImage, 1)
imageBalanced = CB.applyTrafo(inputImage)
KM = kMeanClass(imageBalanced, args.n_centers, args.blur, args.resize, args.blur_kernel)
cv2.namedWindow('input', flags=cv2.WINDOW_NORMAL)
cv2.imshow('input', inputImage)
cv2.namedWindow('balanced', flags=cv2.WINDOW_NORMAL)
cv2.imshow('balanced', imageBalanced)
cv2.waitKey(0)
cv2.destroyAllWindows()
KM.applyKM()
idxBlack, idxRed, idxYellow, idxWhite = KM.determineColor(True, KM.trained_centers)
trained_centers = np.array([KM.trained_centers[idxBlack], KM.trained_centers[idxRed],
KM.trained_centers[idxYellow], KM.trained_centers[idxWhite]])
print "the trained centers are: " + str(trained_centers)
KM.plotDeterminedCenters(KM.trained_centers[idxBlack], KM.trained_centers[idxYellow],
KM.trained_centers[idxWhite], KM.trained_centers[idxRed])
trained_centers_woRed = np.array([KM.trained_centers[idxBlack], KM.trained_centers[idxYellow],
KM.trained_centers[idxWhite]])
true_centers = np.vstack([[70, 50, 60], [50, 70, 240], [60, 240, 230], [250, 250, 250]])
outlierIndex, outlierCenter = detectOutlier(trained_centers, true_centers)
true_centers_woOutlier = np.delete(true_centers, outlierIndex, 0)
trained_centers_woOutlier = np.delete(trained_centers, outlierIndex, 0)
print "outlier center is: " + str(outlierCenter)
print("transform instance will be created!")
T = calcTransform(3, trained_centers_woOutlier, true_centers_woOutlier)
T.calcTransform()
# corr_img1 = scaleandshift2(KM.input_image, [1, 1, 1], [KM.shiftB, KM.shiftG, KM.shiftR])
corrected_img = scaleandshift2(KM.input_image, T.scale, T.shift)
corrected_image_cv2 = np.clip(
corrected_img, 0, 255).astype(np.uint8)
cv2.namedWindow('corrected', flags=cv2.WINDOW_NORMAL)
cv2.imshow('corrected', corrected_image_cv2)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
sys.exit(main())
"""
def batchExtraction(image, batchSideLength):
xSize, ySize, zSize = image.shape
xSizeNew = int(xSize / batchSideLength)
ySizeNew = int(ySize / batchSideLength)
newImage = np.zeros((xSizeNew,ySizeNew,zSize))
for i in range(xSizeNew):
for j in range(ySizeNew):
# create indices for the batches
xlow = i*batchSideLength
xhigh = (i+1)*batchSideLength
ylow = j*batchSideLength
yhigh = (j+1)*batchSideLength
if(i == (xSizeNew-1) ):
xhigh = xSize - 1
if(j == (ySizeNew - 1)):
yhigh = ySize -1
# average the batches
newImage[i, j, 0] = np.mean(image[xlow:xhigh, ylow:yhigh, 0])
newImage[i, j, 1] = np.mean(image[xlow:xhigh, ylow:yhigh, 1])
newImage[i, j, 2] = np.mean(image[xlow:xhigh, ylow:yhigh, 2])
return newImage
input_img = cv2.imread("test_images/pic3.jpg", cv2.IMREAD_UNCHANGED)
#input_img_converted = getimgdatapts(input_img)
#print(input_img_converted.shape)
width, height, channels = input_img.shape
trial = cv2.resize(input_img, (0, 0), fx=0.1, fy=0.1)
print(trial.shape)
# blur image using gaussian:
blurG = cv2.GaussianBlur(trial, (5,5), 0)
# blur image using median:
blurM = cv2.medianBlur(trial, 5)
# plot both blurred images
blurBoth = np.concatenate((blurG, blurM), axis=1)
# apply kmeans on blurred image:
# number of centers for kmeans
n_centers = 6
kmc = KMeans(n_clusters=n_centers, init='k-means++', max_iter=20)
trial_converted = getimgdatapts(blurM)
kmc.fit(trial_converted)
trained_centers = kmc.cluster_centers_
labels = kmc.labels_
# print centers and counts
labelcount = Counter()
for i in np.arange(n_centers):
labelcount[i] = np.sum(labels == i)
print(labelcount)
print(trained_centers)
print(kmc.cluster_centers_[1]/255)
str0 = tuple([kmc.cluster_centers_[0,2],kmc.cluster_centers_[0,1],kmc.cluster_centers_[0,0]])
str1 = tuple([kmc.cluster_centers_[1,2],kmc.cluster_centers_[1,1],kmc.cluster_centers_[1,0]])
str2 = tuple([kmc.cluster_centers_[2,2],kmc.cluster_centers_[2,1],kmc.cluster_centers_[2,0]])
str3 = tuple([kmc.cluster_centers_[3,2],kmc.cluster_centers_[3,1],kmc.cluster_centers_[3,0]])
str4 = tuple([kmc.cluster_centers_[4,2],kmc.cluster_centers_[4,1],kmc.cluster_centers_[4,0]])
str5 = tuple([kmc.cluster_centers_[5,2],kmc.cluster_centers_[5,1],kmc.cluster_centers_[5,0]])
print(str1)
image0 = np.zeros((200, 200, 3), np.uint8)
image0[:] = str0
image1 = np.zeros((200, 200, 3), np.uint8)
image1[:] = str1
image2 = np.zeros((200, 200, 3), np.uint8)
image2[:] = str2
image3 = np.zeros((200, 200, 3), np.uint8)
image3[:] = str3
image4 = np.zeros((200, 200, 3), np.uint8)
image4[:] = str4
image5 = np.zeros((200, 200, 3), np.uint8)
image5[:] = str5
labelArray = kmc.labels_
num0 = np.sum(labelArray==0)
num1 = np.sum(labelArray==1)
num2 = np.sum(labelArray==2)
num3 = np.sum(labelArray==3)
num4 = np.sum(labelArray==4)
num5 = np.sum(labelArray==5)
f, axarr = plt.subplots(3, 2)
axarr[0,0].imshow(image0)
axarr[0,0].axis('off')
axarr[0,0].set_title(str(num0))
axarr[0,1].imshow(image1)
axarr[0,1].axis('off')
axarr[0,1].set_title(str(num1))
axarr[1,0].imshow(image2)
axarr[1,0].axis('off')
axarr[1,0].set_title(str(num2))
axarr[1,1].imshow(image3)
axarr[1,1].axis('off')
axarr[1,1].set_title(str(num3))
axarr[2,0].imshow(image4)
axarr[2,0].axis('off')
axarr[2,0].set_title(str(num4))
axarr[2,1].imshow(image5)
axarr[2,1].axis('off')
axarr[2,1].set_title(str(num5))
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
for i in range(kmc.n_clusters):
print(np.sum(labelArray==i))
"""
```
#### File: anti_instagram/sandbox/outlierEstimation.py
```python
from anti_instagram.calcLstsqTransform import *
import numpy as np
import sys
def detectOutlier(trainedCenters, trueCenters): # YWRB
n_centers, n_channels = trainedCenters.shape
# trueCenters = np.vstack([[50, 240, 240], [240, 240, 240], [60, 60, 240], [60, 60, 60]]) # YWRB
print n_centers
errors = np.zeros(n_centers)
errorArrayTotal = np.zeros(n_centers)
# leave one trained center out and estimate transform with the rest of the centers
for i in range(n_centers):
# leave out i-th trained center
trainedCentersTemp = np.vstack([trainedCenters[0:i, :], trainedCenters[i+1:n_centers, :]])
trueCenterstemp = np.vstack([trueCenters[0:i, :], trueCenters[i+1:n_centers, :]])
# calculate transform with the other centers
T = calcTransform(n_centers-1, trainedCentersTemp, trueCenterstemp)
T.calcTransform()
print "the transform is: shift - " + str(T.shift) + ", scale - " + str(T.scale)
# print "left out " + str(leaveOut) + ", new centers: " + str(tempArray)
# transformedCenters = np.zeros((tempArray.shape))
errorArray = np.zeros(n_centers)
# estimate the error of the transformed trained centers wrt. the true centers
for j in range(n_centers):
tempTrafoCenter = transformOneCenter(trainedCenters[j, :], T.shift, T.scale)
tempTrueCenter = trueCenters[j]
errorArray[j] = estimateError(tempTrafoCenter, tempTrueCenter)
errorArrayTotal[i] = np.sum(errorArray)
print "error of trafo: " + str(errorArrayTotal[i])
errorArraySortedIdx = np.argsort(errorArrayTotal)
return errorArraySortedIdx[0], trainedCenters[errorArraySortedIdx[0]] # return first element.
# this set of centers leads to the lowest error and the left out center is therefore the outlier.
def estimateError(trained_center, truecenter):
return np.linalg.norm(truecenter - trained_center)
def transformOneCenter(center, shift, scale):
center = np.array(center)
shift = np.array(shift)
scale = np.array(scale)
transformedCenter = np.zeros(center.shape)
n_channels, = center.shape
# print n_channels
for i in range(n_channels):
transformedCenter[i] = center[i]*scale[i] + shift[i]
return transformedCenter
def main():
center1 = [50, 50, 50]
center2 = [40, 230, 230]
center3 = [50, 50, 230]
center4 = [250, 250, 250]
centers = np.vstack([center2, center4, center3, center1])
print "shape of the n x 3 centers: " + str(centers.shape)
trueCenters = np.vstack([[50, 240, 240], [240, 240, 240], [60, 60, 240], [60, 60, 60]]) # YWRB
print "shape of the n x 3 centers: " + str(trueCenters.shape)
print "the centers: " + str(centers)
print(detectOutlier(centers, trueCenters))
print(np.delete(centers, 0))
if __name__ == '__main__':
main()
sys.exit()
```
#### File: anti_instagram/sandbox/wb2trafo.py
```python
from simpleColorBalance import *
import cv2
import argparse
import os
import sys
import numpy as np
from anti_instagram.AntiInstagram_rebuild import *
def blurImg(img, method, kernel):
# blur image using median:
if method == 'median':
blurred_image = cv2.medianBlur(img, kernel)
# blur image using gaussian:
elif method == 'gaussian':
blurred_image = cv2.GaussianBlur(img, (kernel, kernel), 0)
# no blur
elif method == 'none':
blurred_image = img.copy()
return blurred_image
def averageColor(img):
avg_color_per_row = np.average(img, axis=0)
avg_color = np.average(avg_color_per_row, axis=0)
return avg_color
def getShift(input_avg, output_avg):
shift = [0, 0, 0]
for i in range(3):
shift[i] = output_avg[i] - input_avg[i]
return shift
def getScale(input_avg, output_avg):
scale = [0, 0, 0]
for i in range(3):
scale[i] = output_avg[i] / input_avg[i]
return scale
def main():
# define and parse command line arguments
parser = argparse.ArgumentParser(
description='lorem Ipsum.')
parser.add_argument('img_path', help='path to the image')
parser.add_argument('--blur', default='median',
help="blur algorithm. 'median' or 'gaussian. DEFAULT = median")
parser.add_argument('--blur_kernel', default='5',
help='size of kernel for blurring. DEFAULT = 5')
parser.add_argument('--output_dir', default='./output_images',
help='directory for the output images. DEFAULT = ./output_images')
parser.add_argument('--percentage', default='1',
help='percentage for WB. DEFAULT = 1')
args = parser.parse_args()
# check if file exists
if not os.path.isfile(args.img_path):
print('file not found')
sys.exit(2)
# check if dir exists, create if not
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# check blur alg
if not (args.blur == "median" or args.blur == "gaussian" or args.blur == "none"):
print('blur alg must be none, median or gaussian')
sys.exit(2)
# check kernel size
if (int(args.blur_kernel) % 2 == 0):
print('kernel size must be odd')
sys.exit(2)
# read image
image_cv = cv2.imread(args.img_path, cv2.IMREAD_UNCHANGED)
# blur if desired
image_blurred = blurImg(image_cv, args.blur, int(args.blur_kernel))
image_cb = simplest_cb(image_blurred, int(args.percentage))
avg_input = averageColor(image_cv)
print('average color of input image:\n' + str(avg_input))
avg_cb = averageColor(image_cb)
print('average color of output image:\n' + str(avg_cb))
shift = getShift(avg_input, avg_cb)
print('shift:\n' + str(shift))
scale = getScale(avg_input, avg_cb)
print('scale:\n' + str(scale))
ai = AntiInstagram()
ai.setScaleShift(scale, [0, 0, 0])
scaled_image = ai.applyTransform(image_cv)
ai.setScaleShift([1, 1, 1], shift)
shifted_image = ai.applyTransform(image_cv)
compare = np.concatenate((image_cv, image_cb), axis=1)
compare2 = np.concatenate((scaled_image, shifted_image), axis=1)
cv2.namedWindow('input vs ColorBalance', flags=cv2.WINDOW_NORMAL)
cv2.imshow('input vs ColorBalance', compare)
cv2.namedWindow('scaled vs shifted', flags=cv2.WINDOW_NORMAL)
cv2.imshow('scaled vs shifted', compare2)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
sys.exit(main())
```
#### File: line_detector/src/line_detector_node.py
```python
from anti_instagram.AntiInstagram import AntiInstagram
from cv_bridge import CvBridge, CvBridgeError
from duckietown_msgs.msg import (AntiInstagramTransform, BoolStamped, Segment,
SegmentList, Vector2D, FSMState)
from duckietown_utils.instantiate_utils import instantiate
from duckietown_utils.jpg import image_cv_from_jpg
from geometry_msgs.msg import Point
from sensor_msgs.msg import CompressedImage, Image
from visualization_msgs.msg import Marker
from line_detector.timekeeper import TimeKeeper
import cv2
import rospy
import threading
import time
from line_detector.line_detector_plot import color_segment, drawLines
import numpy as np
class LineDetectorNode(object):
def __init__(self):
self.node_name = rospy.get_name()
# Thread lock
self.thread_lock = threading.Lock()
# Constructor of line detector
self.bridge = CvBridge()
self.active = True
self.stats = Stats()
# Only be verbose every 10 cycles
self.intermittent_interval = 100
self.intermittent_counter = 0
# color correction
self.ai = AntiInstagram()
# these will be added if it becomes verbose
self.pub_edge = None
self.pub_colorSegment = None
self.detector = None
self.verbose = None
self.updateParams(None)
# Publishers
self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
# Subscribers
self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
self.sub_transform = rospy.Subscriber("~transform", AntiInstagramTransform, self.cbTransform, queue_size=1)
# FSM
self.sub_switch = rospy.Subscriber("~switch", BoolStamped, self.cbSwitch, queue_size=1)
self.sub_fsm_mode = rospy.Subscriber("~fsm_mode", FSMState, self.cbMode, queue_size=1)
rospy.loginfo("[%s] Initialized (verbose = %s)." %(self.node_name, self.verbose))
rospy.Timer(rospy.Duration.from_sec(2.0), self.updateParams)
def updateParams(self, _event):
old_verbose = self.verbose
self.verbose = rospy.get_param('~verbose', True)
# self.loginfo('verbose = %r' % self.verbose)
if self.verbose != old_verbose:
self.loginfo('Verbose is now %r' % self.verbose)
self.image_size = rospy.get_param('~img_size')
self.top_cutoff = rospy.get_param('~top_cutoff')
if self.detector is None:
c = rospy.get_param('~detector')
assert isinstance(c, list) and len(c) == 2, c
# if str(self.detector_config) != str(c):
self.loginfo('new detector config: %s' % str(c))
self.detector = instantiate(c[0], c[1])
# self.detector_config = c
if self.verbose and self.pub_edge is None:
self.pub_edge = rospy.Publisher("~edge", Image, queue_size=1)
self.pub_colorSegment = rospy.Publisher("~colorSegment", Image, queue_size=1)
#FSM
def cbSwitch(self, switch_msg):
self.active = switch_msg.data
#FSM
def cbMode(self, mode_msg):
self.fsm_state = mode_msg.state # String of current FSM state
def cbImage(self, image_msg):
self.stats.received()
if not self.active:
return
# Start a daemon thread to process the image
thread = threading.Thread(target=self.processImage,args=(image_msg,))
thread.setDaemon(True)
thread.start()
# Returns rightaway
def cbTransform(self, transform_msg):
self.ai.shift = transform_msg.s[0:3]
self.ai.scale = transform_msg.s[3:6]
self.loginfo("AntiInstagram transform received")
def loginfo(self, s):
rospy.loginfo('[%s] %s' % (self.node_name, s))
def intermittent_log_now(self):
return self.intermittent_counter % self.intermittent_interval == 1
def intermittent_log(self, s):
if not self.intermittent_log_now():
return
self.loginfo('%3d:%s' % (self.intermittent_counter, s))
def processImage(self, image_msg):
if not self.thread_lock.acquire(False):
self.stats.skipped()
# Return immediately if the thread is locked
return
try:
self.processImage_(image_msg)
finally:
# Release the thread lock
self.thread_lock.release()
def processImage_(self, image_msg):
self.stats.processed()
if self.intermittent_log_now():
self.intermittent_log(self.stats.info())
self.stats.reset()
tk = TimeKeeper(image_msg)
self.intermittent_counter += 1
# Decode from compressed image with OpenCV
try:
image_cv = image_cv_from_jpg(image_msg.data)
except ValueError as e:
self.loginfo('Could not decode image: %s' % e)
return
tk.completed('decoded')
# Resize and crop image
hei_original, wid_original = image_cv.shape[0:2]
if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
# image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
interpolation=cv2.INTER_NEAREST)
image_cv = image_cv[self.top_cutoff:,:,:]
tk.completed('resized')
# apply color correction: AntiInstagram
image_cv_corr = self.ai.applyTransform(image_cv)
image_cv_corr = cv2.convertScaleAbs(image_cv_corr)
tk.completed('corrected')
# Set the image to be detected
self.detector.setImage(image_cv_corr)
# Detect lines and normals
white = self.detector.detectLines('white')
yellow = self.detector.detectLines('yellow')
red = self.detector.detectLines('red')
tk.completed('detected')
# SegmentList constructor
segmentList = SegmentList()
segmentList.header.stamp = image_msg.header.stamp
# Convert to normalized pixel coordinates, and add segments to segmentList
arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0]))
if len(white.lines) > 0:
lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio)
segmentList.segments.extend(self.toSegmentMsg(lines_normalized_white, white.normals, Segment.WHITE))
if len(yellow.lines) > 0:
lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio)
segmentList.segments.extend(self.toSegmentMsg(lines_normalized_yellow, yellow.normals, Segment.YELLOW))
if len(red.lines) > 0:
lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio)
segmentList.segments.extend(self.toSegmentMsg(lines_normalized_red, red.normals, Segment.RED))
self.intermittent_log('# segments: white %3d yellow %3d red %3d' % (len(white.lines),
len(yellow.lines), len(red.lines)))
tk.completed('prepared')
# Publish segmentList
self.pub_lines.publish(segmentList)
tk.completed('--pub_lines--')
# VISUALIZATION only below
if self.verbose:
# Draw lines and normals
image_with_lines = np.copy(image_cv_corr)
drawLines(image_with_lines, white.lines, (0, 0, 0))
drawLines(image_with_lines, yellow.lines, (255, 0, 0))
drawLines(image_with_lines, red.lines, (0, 255, 0))
tk.completed('drawn')
# Publish the frame with lines
image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
image_msg_out.header.stamp = image_msg.header.stamp
self.pub_image.publish(image_msg_out)
tk.completed('pub_image')
# if self.verbose:
colorSegment = color_segment(white.area, red.area, yellow.area)
edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
self.pub_edge.publish(edge_msg_out)
self.pub_colorSegment.publish(colorSegment_msg_out)
tk.completed('pub_edge/pub_segment')
self.intermittent_log(tk.getall())
def onShutdown(self):
self.loginfo("Shutdown.")
def toSegmentMsg(self, lines, normals, color):
segmentMsgList = []
for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines,normals)):
segment = Segment()
segment.color = color
segment.pixels_normalized[0].x = x1
segment.pixels_normalized[0].y = y1
segment.pixels_normalized[1].x = x2
segment.pixels_normalized[1].y = y2
segment.normal.x = norm_x
segment.normal.y = norm_y
segmentMsgList.append(segment)
return segmentMsgList
class Stats():
def __init__(self):
self.nresets = 0
self.reset()
def reset(self):
self.nresets += 1
self.t0 = time.time()
self.nreceived = 0
self.nskipped = 0
self.nprocessed = 0
def received(self):
if self.nreceived == 0 and self.nresets == 1:
rospy.loginfo('line_detector_node received first image.')
self.nreceived += 1
def skipped(self):
self.nskipped += 1
def processed(self):
if self.nprocessed == 0 and self.nresets == 1:
rospy.loginfo('line_detector_node processing first image.')
self.nprocessed += 1
def info(self):
delta = time.time() - self.t0
if self.nreceived:
skipped_perc = (100.0 * self.nskipped / self.nreceived)
else:
skipped_perc = 0
def fps(x):
return '%.1f fps' % (x / delta)
m = ('In the last %.1f s: received %d (%s) processed %d (%s) skipped %d (%s) (%1.f%%)' %
(delta, self.nreceived, fps(self.nreceived),
self.nprocessed, fps(self.nprocessed),
self.nskipped, fps(self.nskipped), skipped_perc))
return m
if __name__ == '__main__':
rospy.init_node('line_detector',anonymous=False)
line_detector_node = LineDetectorNode()
rospy.on_shutdown(line_detector_node.onShutdown)
rospy.spin()
```
#### File: include/obst_avoid/detector.py
```python
import argparse
import cv2
import numpy as np
from numpy.linalg import inv
from numpy import linalg as LA
from os.path import basename, expanduser, isfile, join, splitext
import socket
from matplotlib import pyplot as plt
import time
from skimage import measure
import rospy
from sensor_msgs.msg import CompressedImage
from geometry_msgs.msg import PoseArray, Point, Pose, Quaternion
from duckietown_utils import d8_compressed_image_from_cv_image, logger, rgb_from_ros, yaml_load, get_duckiefleet_root
from duckietown_utils import get_base_name, load_camera_intrinsics, load_homography, load_map, rectify
from duckietown_utils import load_map, load_camera_intrinsics, load_homography, rectify
class Detector():
'''class for detecting obstacles'''
def __init__(self, robot_name=''):
# Robot name
self.robot_name = robot_name
# Array which safes known obstacles to track them better
self.track_array = np.int_([])
# Load camera calibration parameters
self.intrinsics = load_camera_intrinsics(robot_name)
self.H = load_homography(self.robot_name)
self.inv_H = inv(self.H)
# define where to cut the image, color range,...
self.crop = 150 # default value but is overwritten in init_homo
# self.lower_yellow = np.array([20,100,150]) #more restrictive -> if you can be sure for "no reddish yellow"
self.lower_yellow = np.array([15, 100, 200])
self.upper_yellow = np.array([35, 255, 255])
self.lower_orange = np.array([0, 100, 100])
self.upper_orange = np.array([15, 255, 255])
self.lower_white = np.array([0, 0, 150])
self.upper_white = np.array([255, 25, 255])
self.ref_world_point_x = 1.7 # this is the reference world point where we crop the img
self.major_intertia_thres = 10 # if you want to detect very little ducks might lower it to 10, not smaller,..
self.img_width = 0 # to be set in init_inv_homography
self.img_height = 0 # to be set in init_inv_homography
self.maximum_height = 0 # to be set in ground2bird_view_pixel_init
self.maximum_left = 0
self.factor = 1.0 # to be set in ground2bird_view_pixel_init
self.obst_thres = 20 # to be set in init_inv_homography, this is default was 35
self.minimum_tracking_distance = 60
self.min_consec_tracking = 2 #number if times you must have seen critical object before adding as obstacle
self.new_track_array = np.array([])
self.M = self.init_inv_homography()
self.inv_M = inv(self.M)
center_world_coords = np.float32([[0.0], [0.0], [1.0]])
center_pixels = self.ground2bird_view_pixel(center_world_coords)
self.center_x = int(center_pixels[0])
self.center_y = int(center_pixels[1])
# initializes where the robot is in the bird view image
def init_inv_homography(self):
reference_world_point = np.float32([[self.ref_world_point_x], [0.0], [1.0]]) # adaptive cropping is dangerous
real_pix_of_ref_point = self.ground2real_pic_pixel(reference_world_point)
image_height = 480 # default height of image
self.crop = int(real_pix_of_ref_point[1])
x0 = 0 # take full width of image
x1 = 640 # take full width of image
y0 = 0 # take top of cropped image!
y1 = image_height - self.crop # complete bottom
x_center = 320
y_center = image_height - self.crop
pts1 = np.float32([[x0, y0], [x0, y1], [x1, y1], [x1, y0]])
pts1_h = np.float32(
[[x0, y0 + self.crop, 1], [x0, y1 + self.crop, 1], [x1, y1 + self.crop, 1], [x1, y0 + self.crop, 1]])
# add the crop offset to being able to calc real world coordinates correctly!!!
pts2_h = self.real_pic_pixel2ground(np.transpose(pts1_h))
bird_view_pixel = self.ground2bird_view_pixel_init(pts2_h)
# ATTENTION: bird view pixel with (x,y)
self.img_width = int(np.max(bird_view_pixel[0]))
self.img_height = int(np.max(bird_view_pixel[1]))
self.obst_thres = self.obst_thres * (self.img_height / 218.0) # adaptive threshold
# print self.obst_thres
return cv2.getPerspectiveTransform(pts1, np.transpose(bird_view_pixel))
def process_image(self, image):
obst_list = PoseArray()
# FILTER CROPPED IMAGE
# Convert BGR to HSV
hsv = cv2.cvtColor(image[self.crop:, :, :], cv2.COLOR_RGB2HSV)
# Threshold the HSV image to get only yellow colors
im_test = cv2.warpPerspective(hsv, self.M, (self.img_width, self.img_height))
mask1 = cv2.inRange(im_test, self.lower_yellow, self.upper_yellow)
mask2 = cv2.inRange(im_test, self.lower_orange, self.upper_orange)
mask = np.bitwise_or((mask1 / 2), mask2)
# mask = mask1/2 #to only test yellow
# mask = mask2 #to only test orange
# yellow objects have value 127, orange 255
if (np.sum(mask != 0) != 0): # there were segment detected then
# SEGMENT IMAGE
segmented_image = self.segment_img(mask)
props = measure.regionprops(segmented_image, mask)
no_elements = np.max(segmented_image)
# apply filter on elements-> only obstacles remain and mark them in original picture
# in the future: might be separated in 2 steps 1)extract objects 2)visualisation
obst_list = self.object_filter(props, no_elements, im_test)
return obst_list
def segment_img(self, image):
# returns segmented image on Grayscale where all interconnected pixels have same number
return measure.label(image)
def object_filter(self, props, no_elements, image):
# for future: filter has to become adaptive to depth
obst_list = PoseArray()
obst_list.header.frame_id = self.robot_name
self.new_track_array = np.array([])
for k in range(1, no_elements + 1): # iterate through all segmented numbers
# first only keep large elements then eval their shape
if (props[k - 1]['area'] > self.obst_thres): # skip all those who are too small
# Coordinates in the bord view
top = props[k - 1]['bbox'][0]
bottom = props[k - 1]['bbox'][2]
left = props[k - 1]['bbox'][1]
right = props[k - 1]['bbox'][3]
# calc center in top view image
total_width = right - left
total_height = bottom - top
# yellow: 127, orange: 255
color_info = props[k - 1]['max_intensity']
# to take small duckies into account:(color_info == 127 and props[k-1]['inertia_tensor_eigvals'][0]>10)
if ((color_info == 127 and props[k - 1]['inertia_tensor_eigvals'][0] > self.major_intertia_thres) or \
(color_info == 255 and \
props[k - 1]['inertia_tensor_eigvals'][0] / props[k - 1]['inertia_tensor_eigvals'][1] > 50)):
obst_object = Pose()
if (color_info == 127): # means: yellow object:
# old fill weight tracker:
# new_position = np.array([[left+0.5*total_width],[bottom],[props[k-1]['inertia_tensor_eigvals'][0]],[props[k-1]['inertia_tensor_eigvals'][1]],[-1],[0],[0],[0]])
# new leightweight version:
new_position = np.array(
[[left + 0.5 * total_width], [bottom], [props[k - 1]['inertia_tensor_eigvals'][0]], [0]])
# Checks if there is close object from frame before
checker = self.obst_tracker(new_position)
else:
checker = True
# Checks if there is close object from frame before
if (checker):
point_calc = np.zeros((3, 2), dtype=np.float)
point_calc = self.bird_view_pixel2ground(
np.array([[left + 0.5 * total_width, left], [bottom, bottom]]))
obst_object.position.x = point_calc[0, 0] # obstacle coord x
# if (point_calc[0, 0] < 0.35):
# print "DANGEROUS OBSTACLE:"
# print point_calc[0:2, 0]
obst_object.position.y = point_calc[1, 0] # obstacle coord y
# calculate radius:
obst_object.position.z = point_calc[1, 1] - point_calc[1, 0] # this is the radius!
# determine wheter obstacle is out of bounds:
if (obst_object.position.y > 0): # obstacle is left of me
line1 = np.array([measure.profile_line(image, (self.center_y, self.center_x),
(bottom, right), linewidth=1, order=1,
mode='constant')])
else:
line1 = np.array([measure.profile_line(image, (self.center_y, self.center_x),
(bottom, left), linewidth=1, order=1,
mode='constant')])
# bottom,left
line1 = cv2.inRange(line1, self.lower_white, self.upper_white)
if (np.sum(line1 == 255) > 3):
obst_object.position.z = -1 * obst_object.position.z # means it is out of bounds!
# fill in the pixel boundaries of bird view image!!!
obst_object.orientation.x = top
obst_object.orientation.y = bottom
obst_object.orientation.z = left
obst_object.orientation.w = right
obst_list.poses.append(obst_object)
# explanation: those parameters published here are seen from the !center of the axle! in direction
# of drive with x pointing in direction and y to the left of direction of drive in [m]
# cv2.rectangle(orig_img,(np.min(C[1]),np.min(C[0])),(np.max(C[1]),np.max(C[0])),(0,255,0),3)
# eig box np.min breite und hoehe!! if they passed the test!!!!
# print abc
self.track_array = self.new_track_array
return obst_list
def obst_tracker(self, new_position):
# input: array of the current track_array which will be used in the next frame
# input: new_position in 2D image which has to be checked if it could be tracked from the image before
# outout: minimum distance to an obstacle in the image before, 2*minimum_tracking_distance if there haven't been obstacles in the frame before
if self.track_array.size != 0:
distances = -(self.track_array[0:2, :] - new_position[0:2, :]) # only interested in depth change
distances_norms = LA.norm(distances, axis=0)
distance_min = np.amin(distances_norms)
distance_min_index = np.argmin(distances_norms)
y_distance = distances[1, distance_min_index]
if (y_distance < -5): # CHANGE OF DEPTH in PIXELS
# print "DISTANCE CRITAERIA"
distance_min = 2 * self.minimum_tracking_distance
else:
distance_min = 2 * self.minimum_tracking_distance
# FINALE AUSWERTUNG:
if (distance_min < self.minimum_tracking_distance): # only then modify the entry!
#major_mom_change = abs(1 - abs(self.track_array[2, distance_min_index] / new_position[2, :]))
# LEFT OUT DUE TO NO EFFICIENCY BUT ATTNETION: NOW ONE ELEMENT CAN SAY IT IS CLOSE TO MULTIPLE,...
# new_position[4,:] = distance_min_index #show where we reference to
# new_position[5,:] = distance_min
# self.track_array[6,distance_min_index] = self.track_array[6,distance_min_index] +1 #indicate someone refers to this
# if (self.track_array[6,distance_min_index]>1):
# self.track_array[6,distance_min_index] = self.track_array[6,distance_min_index] - 1 #decr again
# #print "!!!!!!!!!MULTIPLE REFERENCES!!!!!!!!!!!!!!!!!!" #we must get active
# competitor_idx = np.argmax(self.new_track_array[4,:]==distance_min_index)
# if (self.new_track_array[4,competitor_idx]<new_position[5,:]): #competitor is closer and can stay
# distance_min = 2*self.minimum_tracking_distance
# new_position[4,:] = -1 #track lost!
# else: #current point is closer
# distance_min = 2*self.minimum_tracking_distance
# self.new_track_array[4,competitor_idx] = -1 #track lost
# self.new_track_array[7,competitor_idx] = 0 #track lost
# new_position[7,:]= self.track_array[7,distance_min_index]+1
# else:
new_position[3, :] = self.track_array[3, distance_min_index] + 1
if ((new_position[2, :] < 100) and new_position[3,
:] < self.min_consec_tracking): # not seen 2 times yet
# print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
distance_min = 2 * self.minimum_tracking_distance
if self.new_track_array.size == 0:
self.new_track_array = new_position
else:
self.new_track_array = np.append(self.new_track_array, new_position, axis=1)
return (distance_min < self.minimum_tracking_distance)
def real_pic_pixel2ground(self, real_pic_pixel):
# input: pixel coordinates of real picture in homogeneous coords (3byN)
# output: real world coordinates (z-component is equal to 1!!!)
# taking pixel coordinates with (column,row) <-> (x,y) and returning real world coordinates!!! (3byN)
point_calc = np.zeros(np.shape(real_pic_pixel), dtype=np.float32)
point_calc = np.dot(self.H, real_pic_pixel) # calculating realWorldcoords
point_calc = np.concatenate(([(point_calc[0, :]) / point_calc[2, :], (point_calc[1, :]) / point_calc[2, :]],
np.ones((1, np.shape(real_pic_pixel)[1]))), axis=0)
return point_calc
def ground2real_pic_pixel(self, ground):
# input: real world coordinates (z-component is equal to 1!!!)
# output: pixel coordinates of real picture in homogeneous coords (3byN)
# taking real world coordinates and returning (column,row) <-> (x,y) of real_pic_pixels! (2byN)
point_calc = np.zeros(np.shape(ground), dtype=np.float32)
point_calc = np.dot(self.inv_H, ground) # calculating realWorldcoords
return ([(point_calc[0, :]) / point_calc[2, :], (point_calc[1, :]) / point_calc[2, :]])
def ground2bird_view_pixel_init(self, ground):
# input: real world coordinate (3byN)
# output: bird view pixel (column,row) <-> (x,y)
# this is initialisation function to set the class parameters!!!!
# goal: find correct factor:
min_width = np.min([ground[1, :]])
max_width = np.max([ground[1, :]])
total_width = max_width - min_width
# this width should be 640 pixel, since image is 640pix wide!!!
self.factor = 640.0 / total_width
ground = np.float32((ground[0:2, :] / ground[2, :] * self.factor))
self.maximum_height = np.max([ground[0, :]])
self.maximum_left = np.max([ground[1, :]])
return np.flipud((np.float32((np.float32([[self.maximum_height], [self.maximum_left]]) - ground))))
def ground2bird_view_pixel(self, ground):
# input: real world coordinate (3byN)
# output: bird view pixel (column,row) <-> (x,y) (2byN)
ground = np.float32((ground[0:2, :] / ground[2, :] * self.factor))
return np.flipud((np.float32((np.float32([[self.maximum_height], [self.maximum_left]]) - ground))))
def bird_view_pixel2ground(self, bird_view_pixel):
# input: bird view pixel (column,row) <-> (x,y) (2byN)
# output: real world coordinate (3byN)
bird_view_pixel = np.flipud(bird_view_pixel)
bird_view_pixel = np.float32((np.float32([[self.maximum_height], [self.maximum_left]])) - bird_view_pixel)
return np.concatenate((bird_view_pixel / self.factor, np.ones((1, np.shape(bird_view_pixel)[1]))), axis=0)
def bird_view_pixel2real_pic_pixel(self, bird_view_pixel):
# input: bird view pixel (column,row) <-> (x,y) (2byN)
# output: real pic pixel (2byN)!!! uncropped!!!
points = np.transpose(np.float32(bird_view_pixel))
trans_points = np.float32(cv2.perspectiveTransform(np.array([points]), self.inv_M))
return np.concatenate(
(np.reshape(trans_points[:, :, 0], (1, -1)), np.reshape(trans_points[:, :, 1] + self.crop, (1, -1))),
axis=0)
```
#### File: include/obst_avoid/visualizer.py
```python
import argparse
import cv2
import numpy as np
import rospy
from sensor_msgs.msg import CompressedImage
from geometry_msgs.msg import PoseArray, Point, Pose, Quaternion
from visualization_msgs.msg import MarkerArray, Marker
from obst_avoid.detector import Detector
from duckietown_utils import d8_compressed_image_from_cv_image, logger, rgb_from_ros, yaml_load, get_duckiefleet_root
from duckietown_utils import get_base_name, load_camera_intrinsics, load_homography, load_map, rectify
from duckietown_utils import load_map, load_camera_intrinsics, load_homography, rectify
class Visualizer():
'''class for visualizing detected obstacles'''
def __init__(self, robot_name=''):
# Robot name
self.robot_name = robot_name
self.detector = Detector(robot_name=self.robot_name)
#create detector object to get access to all the trafos
def visualize_marker(self, obst_list):
marker_list=MarkerArray()
size = obst_list.poses.__len__()
for i in range(0,size):
marker = Marker()
marker.type = marker.CYLINDER
marker.header.frame_id=self.robot_name
marker.frame_locked=False
marker.scale.z = 1.0
marker.color.a = 1.0
marker.color.r = 0.0
marker.color.g = 0.0
marker.color.b = 0.0
if (obst_list.poses[i].position.z<0):
marker.color.g = 1.0
else:
marker.color.r = 1.0
marker.pose.orientation.w = 1.0
marker.lifetime = rospy.Time(7.0)
#each marker if not replaced earlier by same id will dissapear after max 1 second
marker.id = i
marker.scale.x = abs(obst_list.poses[i].position.z) #since is negative if not relevant
#marker.scale.x = 0.2
marker.scale.y = abs(obst_list.poses[i].position.z)
#marker.scale.y = 0.2
marker.pose.position.x = obst_list.poses[i].position.x
marker.pose.position.y = obst_list.poses[i].position.y
marker.pose.position.z = 0
marker_list.markers.append(marker)
#print marker_list.markers.__len__()
return marker_list
def visualize_image(self, image,obst_list):
size = obst_list.poses.__len__()
for i in range(0,size):
top = obst_list.poses[i].orientation.x
bottom = obst_list.poses[i].orientation.y
left = obst_list.poses[i].orientation.z
right = obst_list.poses[i].orientation.w
points = np.float32([[left,left,right,right],[top,bottom,bottom,top]])
pts = self.detector.bird_view_pixel2real_pic_pixel(points)
if (obst_list.poses[i].position.z<0):
cv2.polylines(image,np.int32([np.transpose(pts)]),True,(0,255,0),3)
else:
cv2.polylines(image,np.int32([np.transpose(pts)]),True,(255,0,0),3)
return d8_compressed_image_from_cv_image(image[:,:,::-1])
```
#### File: site-packages/contracts/backported.py
```python
import sys
from inspect import ArgSpec
import six
if sys.version_info[0] >= 3: # pragma: no cover
from inspect import getfullargspec
unicode = str
else: # pragma: no cover
from collections import namedtuple
FullArgSpec = namedtuple('FullArgSpec', 'args varargs varkw defaults'
' kwonlyargs kwonlydefaults annotations')
from inspect import getargspec as _getargspec
def getargspec(function):
# print 'hasattr im_func', hasattr(function, 'im_func')
if hasattr(function, 'im_func'):
# print('this is a special function : %s' % function)
# For methods or classmethods drop the first
# argument from the returned list because
# python supplies that automatically for us.
# Note that this differs from what
# inspect.getargspec() returns for methods.
# NB: We use im_func so we work with
# instancemethod objects also.
x = _getargspec(function.im_func)
new_args = x.args[1:]
spec = ArgSpec(args=new_args, varargs=x.varargs,
keywords=x.keywords, defaults=x.defaults)
return spec
# print 'calling normal %s' % function
return _getargspec(function)
def getfullargspec(function):
spec = getargspec(function)
fullspec = FullArgSpec(args=spec.args, varargs=spec.varargs,
varkw=spec.keywords,
defaults=spec.defaults, kwonlyargs=[],
kwonlydefaults=None,
annotations={})
return fullspec
# Backport inspect.getcallargs from Python 2.7 to 2.6
if sys.version_info[:2] == (2, 7):
# noinspection PyUnresolvedReferences
from inspect import getcallargs
else: # pragma: no cover
inPy3k = sys.version_info[0] == 3
from inspect import ismethod
def getcallargs(func, *positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'.
"""
args, varargs, varkw, defaults, \
kwonlyargs, kwonlydefaults, annotations = getfullargspec(func)
if kwonlyargs:
raise ValueError("I'm sorry, I don't have the logic to use kwonlyargs. "
"Perhapse you can help PyContracts and implement this? Thanks.")
f_name = func.__name__
arg2value = {}
# The following closures are basically because of tuple
# parameter unpacking.
assigned_tuple_params = []
def assign(arg, value):
if isinstance(arg, six.string_types):
arg2value[arg] = value
else:
assigned_tuple_params.append(arg)
value = iter(value)
for i, subarg in enumerate(arg):
try:
subvalue = next(value)
except StopIteration:
raise ValueError('need more than %d %s to unpack' %
(i, 'values' if i > 1 else 'value'))
assign(subarg, subvalue)
try:
next(value)
except StopIteration:
pass
else:
raise ValueError('too many values to unpack')
def is_assigned(arg):
if isinstance(arg, six.string_types):
return arg in arg2value
return arg in assigned_tuple_params
if not inPy3k:
im_self = getattr(func, 'im_self', None)
else:
im_self = getattr(func, '__self__', None)
if ismethod(func) and im_self is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (im_self,) + positional
num_pos = len(positional)
num_total = num_pos + len(named)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
for arg, value in zip(args, positional):
assign(arg, value)
if varargs:
if num_pos > num_args:
assign(varargs, positional[-(num_pos - num_args):])
else:
assign(varargs, ())
elif 0 < num_args < num_pos:
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at most' if defaults else 'exactly', num_args,
'arguments' if num_args > 1 else 'argument', num_total))
elif num_args == 0 and num_total:
raise TypeError('%s() takes no arguments (%d given)' %
(f_name, num_total))
for arg in args:
if isinstance(arg, six.string_types) and arg in named:
if is_assigned(arg):
raise TypeError("%s() got multiple values for keyword "
"argument '%s'" % (f_name, arg))
else:
assign(arg, named.pop(arg))
if defaults: # fill in any missing values with the defaults
for arg, value in zip(args[-num_defaults:], defaults):
if not is_assigned(arg):
assign(arg, value)
if varkw:
assign(varkw, named)
elif named:
unexpected = next(iter(named))
if isinstance(unexpected, unicode):
unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace')
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(f_name, unexpected))
unassigned = num_args - len([arg for arg in args if is_assigned(arg)])
if unassigned:
num_required = num_args - num_defaults
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at least' if defaults else 'exactly', num_required,
'arguments' if num_required > 1 else 'argument', num_total))
return arg2value
```
#### File: site-packages/contracts/enabling.py
```python
from . import logger
import os
class Switches:
# default to ENV variable
disable_all = os.environ.get('DISABLE_CONTRACTS', False)
def disable_all():
""" Disables all contracts checks. """
# print('disable_all()')
Switches.disable_all = True
logger.info('All contracts checking disabled.')
def enable_all():
"""
Enables all contracts checks.
Can be overridden by an environment variable.
"""
# print('enable_all()')
if not os.environ.get('DISABLE_CONTRACTS', False):
Switches.disable_all = False
logger.info('All contracts checking enabled.')
def all_disabled():
# print('all_Disabled? %s' % Switches.disable_all)
""" Returns true if all contracts checks are disabled. """
return Switches.disable_all
```
#### File: contracts/library/comparison.py
```python
import math
from pyparsing import Or
from ..interface import Contract, ContractNotRespected, RValue, eval_in_context
from ..syntax import W, add_contract, O, Literal, isnumber, rvalue
class CheckOrder(Contract):
conditions = {
'=': (False, True, False), # smaller, equal, larger flags
'==': (False, True, False),
'!=': (True, False, True),
'>': (False, False, True),
'>=': (False, True, True),
'<': (True, False, False),
'<=': (True, True, False)
}
def __init__(self, expr1, glyph, expr2, where=None):
Contract.__init__(self, where)
assert isinstance(expr1, RValue) or expr1 is None
assert isinstance(expr2, RValue)
self.expr1 = expr1
self.glyph = glyph
self.expr2 = expr2
self.smaller, self.equal, self.larger = CheckOrder.conditions[glyph]
def check_contract(self, context, value, silent):
if self.expr1 is None:
val1 = value
else:
# val1 = context.eval(self.expr1, self)
val1 = eval_in_context(context, self.expr1, self)
# val2 = context.eval(self.expr2, self)
val2 = eval_in_context(context, self.expr2, self)
# Check if we only need to check equality
# in that case, we don't care for the type
# FIXME: add support for != here
pure_equality = (
(self.smaller, self.equal, self.larger) == (False, True, False)
or
(self.smaller, self.equal, self.larger) == (True, False, True))
if pure_equality:
# but we want them to be either numbers or same type
if (not (isnumber(val1) and isnumber(val2))) and \
(not isinstance(val1, val2.__class__)):
msg = ("I won't let you compare two different types if they "
"are not numbers (%s,%s)" % (type(val1), type(val2)))
raise ContractNotRespected(self, msg, (val1, val2), context)
ok = (val1 == val2) ^ (not self.equal)
else:
# We potentially want < or >. They must be numbers.
for val in [val1, val2]:
if not isnumber(val):
msg = ('I can only compare the order of numbers, not %r.' %
val.__class__.__name__)
raise ContractNotRespected(self, msg, (val1, val2),
context)
if math.isnan(val1) or math.isnan(val2):
msg = ('I cannot compare NaN (checking: %s %s %s)'
% (val1, self.glyph, val2))
raise ContractNotRespected(self, msg, (val1, val2), context)
if val1 < val2:
ok = self.smaller
elif val1 > val2:
ok = self.larger
else:
assert val1 == val2
ok = self.equal
if not ok:
error = ('Condition %s %s %s not respected' %
(val1, self.glyph, val2))
raise ContractNotRespected(contract=self, error=error,
value=value, context=context)
def __str__(self):
if self.expr1 is not None:
return '%s%s%s' % (self.expr1, self.glyph, self.expr2)
else:
return '%s%s' % (self.glyph, self.expr2)
def __repr__(self):
return 'CheckOrder(%r,%r,%r)' % (self.expr1, self.glyph, self.expr2)
@staticmethod
def parse_action(s, loc, tokens):
expr1 = tokens.get('expr1', None)
glyph = "".join(tokens['glyph'])
expr2 = tokens['expr2']
where = W(s, loc)
return CheckOrder(expr1, glyph, expr2, where=where)
comparisons_expr = {}
for glyph in CheckOrder.conditions:
if glyph == '!=':
# special case: ! must be followed by =
glyph_expression = Literal('!') - Literal('=')
glyph_expression.setName('!=')
else:
glyph_expression = Literal(glyph)
# 2015-05: not sure why this doesn't work and the alternative with + does
# expr = O(rvalue('expr1')) + glyph_expression('glyph') - rvalue('expr2')
expr = O(rvalue('expr1')) + glyph_expression('glyph') + rvalue('expr2')
expr.setParseAction(CheckOrder.parse_action)
add_contract(expr)
comparisons_expr[glyph] = expr
comparison_expr = Or(exprs=list(comparisons_expr.values()))
```
#### File: contracts/library/compositions.py
```python
from ..interface import Contract, ContractNotRespected, add_prefix
from ..pyparsing_utils import myOperatorPrecedence
from ..syntax import ParsingTmp, W, opAssoc, simple_contract
from .extensions import Extension
from .suggester import create_suggester
NOT_GLYPH = '!'
AND_GLYPH = ','
OR_GLYPH = '|'
class Logical(object):
def __init__(self, glyph, precedence):
self.glyph = glyph
self.precedence = precedence
def __str__(self):
s = self.glyph.join(self._convert(x) for x in self.clauses)
return s
def _convert(self, x):
if isinstance(x, Logical) and x.precedence < self.precedence:
return '(%s)' % x
return '%s' % x
class OR(Logical, Contract):
def __init__(self, clauses, where=None):
assert isinstance(clauses, list)
assert len(clauses) >= 2
Contract.__init__(self, where)
Logical.__init__(self, OR_GLYPH, 1)
self.clauses = clauses
def _check_quick(self, context, value):
""" Returns True if this checks out. """
# first make a quick pass
for c in self.clauses:
try:
# try with fake context
c._check_contract(context.copy(), value, silent=True)
# if ok, do with main context
c._check_contract(context, value, silent=True)
return True
except ContractNotRespected as e:
pass
return False
def check_contract(self, context, value, silent):
orig = context.copy()
if self._check_quick(context, value):
return
else:
if silent:
msg = '(Error description suppressed.)'
raise ContractNotRespected(contract=self, error=msg,
value=value, context=context)
# otherwise need to do it again with detailed error messages
self.get_error(orig, value)
def get_error(self, context, value):
""" This assumes that we are going to fail """
exceptions = []
for c in self.clauses:
try:
# try with fake context
c._check_contract(context.copy(), value, silent=False)
# if ok, do with main context
c._check_contract(context, value, silent=False)
assert False, "We should not be here."
except ContractNotRespected as e:
exceptions.append((c, e))
else:
msg = self._format_exceptions(exceptions)
raise ContractNotRespected(contract=self, error=msg,
value=value, context=context)
def _format_exceptions(self, exceptions):
msg = ('Could not satisfy any of the %d clauses in %s.'
% (len(self.clauses), self))
for i, ex in enumerate(exceptions):
c, e = ex
msg += '\n ---- Clause #%d: %s\n' % (i + 1, c)
msg += add_prefix('%s' % e, ' | ')
msg += '\n ------- (end clauses) -------'
return msg
def __repr__(self):
s = 'OR(%r)' % self.clauses
return s
@staticmethod
def parse_action(string, location, tokens):
l = list(tokens[0])
clauses = [l.pop(0)]
while l:
glyph = l.pop(0) # @UnusedVariable
assert glyph == OR_GLYPH
operand = l.pop(0)
clauses.append(operand)
where = W(string, location)
return OR(clauses, where=where)
class And(Logical, Contract):
def __init__(self, clauses, where=None):
assert isinstance(clauses, list)
assert len(clauses) >= 2, clauses
Contract.__init__(self, where)
Logical.__init__(self, AND_GLYPH, 2)
self.clauses = clauses
def check_contract(self, context, value, silent):
for c in self.clauses:
c._check_contract(context, value, silent)
def __repr__(self):
s = 'And(%r)' % self.clauses
return s
@staticmethod
def parse_action(string, location, tokens):
l = list(tokens[0])
clauses = [l.pop(0)]
while l:
glyph = l.pop(0) # @UnusedVariable
assert glyph == AND_GLYPH
operand = l.pop(0)
clauses.append(operand)
where = W(string, location)
return And(clauses, where=where)
class Not(Logical, Contract):
def __init__(self, clauses, where=None):
assert isinstance(clauses, list)
assert len(clauses) == 1, clauses
Contract.__init__(self, where)
Logical.__init__(self, NOT_GLYPH, 3)
self.clauses = clauses
def check_contract(self, context, value, silent):
clause = self.clauses[0]
try:
clause._check_contract(context, value, silent)
except ContractNotRespected:
pass
else:
msg = "Shouldn't have satisfied the clause %s." % clause
raise ContractNotRespected(contract=self, error=msg,
value=value, context=context)
@staticmethod
def parse_action(string, location, tokens):
l = list(tokens[0])
assert l.pop(0) == NOT_GLYPH
where = W(string, location)
return Not(l, where=where)
def __repr__(self):
s = 'Not(%r)' % self.clauses
return s
def __str__(self):
return self.glyph + self._convert(self.clauses[0])
suggester = create_suggester(get_options=lambda: ParsingTmp.keywords +
list(Extension.registrar.keys()))
baseExpr = simple_contract | suggester
baseExpr.setName('Simple contract (recovering)')
op = myOperatorPrecedence
# op = operatorPrecedence
composite_contract = op(baseExpr, [
(NOT_GLYPH, 1, opAssoc.RIGHT, Not.parse_action),
(AND_GLYPH, 2, opAssoc.LEFT, And.parse_action),
(OR_GLYPH, 2, opAssoc.LEFT, OR.parse_action),
])
composite_contract.setName('NOT/OR/AND contract')
or_contract = op(baseExpr, [
(OR_GLYPH, 2, opAssoc.LEFT, OR.parse_action),
])
or_contract.setName('OR contract')
```
#### File: contracts/library/extensions.py
```python
from ..interface import Contract, ContractNotRespected, describe_value
from ..syntax import (Combine, Word, W, alphas, alphanums, oneOf,
ParseException, ZeroOrMore, S, rvalue,
delimitedList, Optional)
from pyparsing import ParseFatalException
class Extension(Contract):
registrar = {}
def __init__(self, identifier, where=None, args=tuple(), kwargs=None):
assert identifier in Extension.registrar
self.contract = Extension.registrar[identifier]
self.identifier = identifier
self.args = args
self.kwargs = kwargs or {}
Contract.__init__(self, where)
def __str__(self):
inside = []
if self.args:
inside.extend(map(str, self.args))
if self.kwargs:
ks = sorted(self.kwargs)
inside.extend(["%s=%s" % (k, self.kwargs[k]) for k in ks])
s = self.identifier
if inside:
return self.identifier + "(" + ",".join(inside) + ")"
else:
return s
def __repr__(self):
if self.args or self.kwargs:
return ("Extension(%r, args=%r, kwargs=%r)" %
(self.identifier, self.args, self.kwargs))
return "Extension(%r)" % self.identifier
def check_contract(self, context, value, silent):
context['args'] = tuple(a.eval(context) for a in self.args)
context['kwargs'] = dict((k, v.eval(context)) for
k, v in self.kwargs.items())
self.contract._check_contract(context, value, silent)
@staticmethod
def parse_action(s, loc, tokens):
identifier = tokens[0]
args = tuple()
kwargs = {}
if len(tokens) == 2:
args, kwargs = tokens[1]
args = tuple(args)
if not identifier in Extension.registrar:
raise ParseException('Unknown extension contract %r' % identifier)
# from contracts.library.separate_context import SeparateContext
contract_ext = Extension.registrar[identifier]
if isinstance(contract_ext, CheckCallable):
callable_thing = contract_ext.callable
test_args = ('value',) + args
from contracts.inspection import check_callable_accepts_these_arguments, InvalidArgs
try:
check_callable_accepts_these_arguments(callable_thing, test_args, kwargs)
except InvalidArgs as e:
msg = 'The callable %s cannot accept these arguments ' % callable_thing
msg += 'args = %s, kwargs = %s ' % (test_args, kwargs)
msg += '%s' % e
raise ParseFatalException(msg)
where = W(s, loc)
return Extension(identifier, where, args, kwargs)
# We want to be pickable so we do not save self.contract
# which might point to a lambda
def __getstate__(self):
return {'identifier': self.identifier,
'args': self.args,
'kwargs': self.kwargs}
def __setstate__(self, d):
self.identifier = d['identifier']
self.contract = Extension.registrar[self.identifier]
self.args = d['args']
self.kwargs = d['kwargs']
class CheckCallable(Contract):
def __init__(self, callable):
self.callable = callable
Contract.__init__(self, where=None)
def check_contract(self, context, value, silent):
allowed = (ValueError, AssertionError)
args = context.get('args', tuple())
kwargs = context.get('kwargs', {})
try:
result = self.callable(value, *args, **kwargs)
except allowed as e: # failed
raise ContractNotRespected(self, str(e), value, context)
if result in [None, True]:
# passed
pass
elif result == False:
msg = ('Value does not pass criteria of %s() (module: %s).' %
(get_callable_name(self.callable),
get_callable_module(self.callable)))
raise ContractNotRespected(self, msg, value, context)
else:
msg = ('I expect that %r returns either True, False, None; or '
'raises a ValueError exception. Instead, I got %s.' %
(self.callable, describe_value(value)))
raise ValueError(msg)
def __repr__(self):
""" Note: this contract is not representable, but anyway it is
only used by Extension, which serializes using the identifier. """
return 'CheckCallable(%r)' % self.callable
def __str__(self):
""" Note: this contract is not representable, but anyway it is only
used by Extension, which serializes using the identifier. """
return get_callable_name(callable)
def get_callable_name(c):
""" Get a displayable name for the callable even if __name__
is not available. """
try:
return c.__name__ + '()'
except:
return str(c)
def get_callable_module(c):
try:
return c.__module__
except:
return '(No __module__ attr)'
def describe_callable(c):
return get_callable_name(c) + ' module: %s' % get_callable_module(c)
class CheckCallableWithSelf(Contract):
def __init__(self, callable): # @ReservedAssignment
self.callable = callable
Contract.__init__(self, where=None)
def check_contract(self, context, value, silent):
args = context.get('args', tuple())
kwargs = context.get('kwargs', {})
if not 'self' in context:
msg = ('You can only call this contract in the context of '
' a function call to a regular method.')
raise ContractNotRespected(self, msg, value, context)
args = (context['self'], value) + args
allowed = (ValueError, AssertionError)
try:
result = self.callable(*args, **kwargs)
except allowed as e: # failed
raise ContractNotRespected(self, str(e), value, context)
if result in [None, True]:
# passed
pass
elif result == False:
msg = ('Value does not pass criteria of %s.' %
describe_callable(self.callable))
raise ContractNotRespected(self, msg, value, context)
else:
msg = ('I expect that %r returns either True, False, None; or '
'raises a ValueError exception. Instead, I got %s.' %
(self.callable, describe_value(value)))
raise ValueError(msg)
def __repr__(self):
""" Note: this contract is not representable, but anyway it is only
used by Extension, which serializes using the identifier. """
return 'CheckCallableWithSelf(%r)' % self.callable
def __str__(self):
""" Note: this contract is not representable, but anyway it is only
used by Extension, which serializes using the identifier. """
return 'function %s()' % get_callable_name(self.callable)
w = Word('_' + alphanums)
arg = rvalue.copy()
kwarg = w + ZeroOrMore(' ') + S('=') + ZeroOrMore(' ') + rvalue
kwarg.setParseAction(lambda s, loc, tokens: {tokens[0]: tokens[1]})
def build_args_kwargs(s, loc, tokens):
return (tuple(t for t in tokens if not isinstance(t, dict)),
dict((k, v) for t in tokens if isinstance(t, dict)
for k, v in t.items()))
arglist = delimitedList(kwarg | arg)
arglist.setParseAction(build_args_kwargs)
identifier_expression = (Combine(oneOf(list(alphas)) + Word('_' + alphanums)) +
Optional(S('(') + arglist + S(')')))
identifier_contract = identifier_expression.copy().setParseAction(
Extension.parse_action)
```
#### File: contracts/library/files.py
```python
import io
import sys
from ..interface import Contract, ContractNotRespected
from ..syntax import (add_contract, add_keyword, Keyword, W)
inPy2 = sys.version_info[0] == 2
if inPy2:
file_type = (file, io.IOBase)
else:
file_type = io.IOBase
class File(Contract):
def __init__(self, where=None):
Contract.__init__(self, where)
def check_contract(self, context, value, silent):
if not isinstance(value, file_type):
error = 'Expected a file, got %r.' % value.__class__.__name__
raise ContractNotRespected(contract=self, error=error,
value=value, context=context)
def __str__(self):
return 'file'
def __repr__(self):
return 'File()'
@staticmethod
def parse_action(s, loc, _):
where = W(s, loc)
return File(where=where)
file_contract = Keyword('file')
file_contract.setParseAction(File.parse_action)
add_contract(file_contract)
add_keyword('file')
```
#### File: contracts/library/tuple.py
```python
from ..interface import Contract, ContractNotRespected
from ..syntax import(add_contract, W, contract_expression, O, S, ZeroOrMore,
Group, add_keyword, Keyword)
from .compositions import or_contract
class Tuple(Contract):
def __init__(self, length=None, elements=None, where=None):
Contract.__init__(self, where)
self.length = length
self.elements = elements
assert elements is None or isinstance(elements, list)
if elements:
for e in elements:
assert isinstance(e, Contract)
def check_contract(self, context, value, silent):
if not isinstance(value, tuple):
error = 'Expected a tuple, got %r.' % value.__class__.__name__
raise ContractNotRespected(contract=self, error=error,
value=value, context=context)
if self.length is not None:
self.length._check_contract(context, len(value), silent)
if self.elements is not None:
if len(value) != len(self.elements):
error = ('Expected a tuple of length '
'%s, got %r of length %s.' %
(len(self.elements), value, len(value)))
raise ContractNotRespected(contract=self, error=error,
value=value, context=context)
for i in range(len(value)):
self.elements[i]._check_contract(context, value[i], silent)
def __repr__(self):
return 'Tuple(%r,%r)' % (self.length, self.elements)
def __str__(self):
s = 'tuple'
if self.length is not None:
s += '[%s]' % self.length
def rep(x):
from .compositions import And
if isinstance(x, And):
return "(%s)" % x
else:
return "%s" % x
if self.elements is not None:
s += '(%s)' % ",".join(rep(x) for x in self.elements)
return s
@staticmethod
def parse_action(s, loc, tokens):
where = W(s, loc)
length = tokens.get('length', [None])[0]
# elements = tokens.get('elements', [None])[0]
if 'elements' in tokens:
elements = list(tokens['elements'])
else:
elements = None
assert elements is None or length is None
assert length is None or isinstance(length, Contract), ("Wrong type %r"
% length)
if elements:
for e in elements:
assert isinstance(e, Contract), ("Wrong type %s (%r)"
% (type(e), e))
return Tuple(length, elements, where=where)
# if you use contract instead of simple_contract, it will be matched as And
inside = (S('(') - contract_expression - S(')')) | or_contract
inside.setName('Any contract for tuple elements (use parenthesis for AND)')
elements = (Group(S('(') - inside - ZeroOrMore(S(',')
- inside) - S(')'))('elements'))
elements.setName('Tuple elements contract.')
length = Group(S('[') - contract_expression - S(']'))('length')
length.setName('Tuple length contract.')
tuple_contract = Keyword('tuple') - O(length | elements)
tuple_contract.setName('tuple contract')
add_contract(tuple_contract.setParseAction(Tuple.parse_action))
add_keyword('tuple')
```
#### File: contracts/library/variables.py
```python
import six
from ..interface import Contract, ContractNotRespected, RValue, describe_value
from ..syntax import (W, oneOf, FollowedBy, NotAny)
class BindVariable(Contract):
def __init__(self, variable, allowed_types, where=None):
assert isinstance(variable, six.string_types) and len(variable) == 1
assert allowed_types, '%r' % allowed_types
Contract.__init__(self, where)
self.variable = variable
self.allowed_types = allowed_types
def check_contract(self, context, value, silent):
if self.variable in context:
expected = context[self.variable]
if not (expected == value):
# TODO: add where it was bound
error = (
'Expected value for %r was: %s\n'
' instead I received: %s' %
(self.variable, describe_value(expected),
describe_value(value)))
raise ContractNotRespected(contract=self, error=error,
value=value, context=context)
else:
# bound variable
if not isinstance(value, self.allowed_types):
error = ('Variable %r can only bind to %r, not %r.' %
(self.variable, self.allowed_types,
value.__class__.__name__))
raise ContractNotRespected(self, error, value, context)
context[self.variable] = value
def __str__(self):
return self.variable
def __repr__(self):
# XXX: invalid if tuple
return 'BindVariable(%r,%s)' % (self.variable,
self.allowed_types.__name__)
@staticmethod
def parse_action(allowed_types):
def parse(s, loc, tokens):
where = W(s, loc)
variable = tokens[0]
assert len(variable) == 1, \
('Wrong syntax, matched %r as variable in %r.'
% (variable, s))
# print ('Matched %r as variable in %r.' % (variable, s))
return BindVariable(variable, allowed_types, where=where)
return parse
class VariableRef(RValue):
def __init__(self, variable, where=None):
assert isinstance(variable, six.string_types)
self.where = where
self.variable = variable
def eval(self, context): # @ReservedAssignment
var = self.variable
if not var in context:
raise ValueError('Unknown variable %r.' % var)
return context[var]
def __repr__(self):
return "VariableRef(%r)" % self.variable
def __str__(self):
return "%s" % self.variable
@staticmethod
def parse_action(s, loc, tokens):
where = W(s, loc)
return VariableRef(tokens[0], where=where)
alphabetu = 'A B C D E F G H I J K L M N O P Q R S T U W V X Y Z '
alphabetl = 'a b c d e f g h i j k l m n o p q r s t u w v x y z '
# Special case: allow an expression like AxBxC
nofollow = 'a b c d e f g h i j k l m n o p q r s t u w v y z'
# also do not commit if part of word (SEn, a_2)
nofollow += ' A B C D E F G H I J K L M N O P Q R S T U W V X Y Z '
nofollow += ' 0 1 2 3 4 5 6 7 8 9 _'
# but recall 'axis_angle'
int_variables = (oneOf(alphabetu.split())
+ FollowedBy(NotAny(oneOf(nofollow.split()))))
misc_variables = (oneOf(alphabetl.split())
+ FollowedBy(NotAny(oneOf(nofollow.split() + ['x']))))
int_variables_ref = int_variables.copy().setParseAction(
VariableRef.parse_action)
misc_variables_ref = misc_variables.copy().setParseAction(
VariableRef.parse_action)
#int_variables = oneOf(alphabetu.split()) + FollowedBy(White() ^ 'x')
# These must be followed by whitespace; punctuation
#misc_variables = oneOf(alphabet.lower()) + FollowedBy(White())
nofollow = 'a b c d e f g h i j k l m n o p q r s t u w v y z '
nofollow += ' * - + / '
nofollow += ' A B C D E F G H I J K L M N O P Q R S T U W V X Y Z '
nofollow += ' 0 1 2 3 4 5 6 7 8 9 _'
int_variables2 = (oneOf(alphabetu.split())
+ FollowedBy(NotAny(oneOf(nofollow.split()))))
misc_variables2 = (oneOf(alphabetl.split())
+ FollowedBy(NotAny(oneOf(nofollow.split() + ['x']))))
int_variables_contract = int_variables2.setParseAction(
BindVariable.parse_action(int))
misc_variables_contract = misc_variables2.setParseAction(
BindVariable.parse_action(object))
```
#### File: contracts/testing/array_extended_test.py
```python
try:
import numpy
except ImportError:
pass
else:
import unittest
from contracts import decorate, new_contract, ContractNotRespected
new_contract('rgb', 'array[HxWx3],H>0,W>0')
new_contract('rgba', 'array[HxWx4],H>0,W>0')
def blend_function(image1, image2, bug=False):
"""
Blends two RGB or RGBA images together.
:param image1: The first image to blend.
:type image1: (rgb|rgba),array[HxWx*]
:param image2: The second image to blend.
:type image2: (rgb|rgba),array[HxWx*]
:param bug: Introduce a bug to check the contracts.
:type bug: bool
:return: The blended image.
:rtype: rgb,array[HxWx3]
"""
H, W = image1.shape[0], image1.shape[1]
if bug:
# if we want to show a bug, return a different shape
W += 1
result = numpy.zeros((H, W, 3), 'uint8')
# put here the actual function
image2
return result
im_float = numpy.zeros((10, 10, 3), dtype='float32')
rgb_small = numpy.zeros((10, 10, 3), dtype='uint8')
rgb_large = numpy.zeros((20, 20, 3), dtype='uint8')
rgba_small = numpy.zeros((10, 10, 3), dtype='uint8')
rgba_large = numpy.zeros((20, 20, 3), dtype='uint8')
class ArrayTest(unittest.TestCase):
def setUp(self):
self.blend = decorate(blend_function)
def test_correct_behavior(self):
self.blend(rgb_small, rgb_small)
self.blend(rgb_small, rgba_small)
self.blend(rgba_small, rgb_small)
self.blend(rgb_large, rgba_large)
self.blend(rgba_large, rgb_large)
def test_incorrect1(self):
self.assertRaises(ContractNotRespected, self.blend, None, None)
def test_incorrect2(self):
self.assertRaises(ContractNotRespected, self.blend,
None, rgb_small)
def test_incorrect3(self):
self.assertRaises(ContractNotRespected, self.blend,
rgb_small, None)
def test_incorrect4(self):
self.assertRaises(ContractNotRespected, self.blend,
rgb_small, rgb_large)
def test_incorrect5(self):
self.assertRaises(ContractNotRespected, self.blend,
rgb_small, rgb_large)
def test_incorrect6(self):
# check that rtype checking works, introduce a bug
self.assertRaises(ContractNotRespected, self.blend, rgb_small,
rgb_small, bug=True)
```
#### File: contracts/testing/friendliness_statistics.py
```python
from contracts.test_registrar import (good_examples, semantic_fail_examples,
contract_fail_examples)
from contracts import parse, ContractSyntaxError
def get_all_strings():
all_strings = (good_examples + semantic_fail_examples +
contract_fail_examples)
for contract, _, _ in all_strings:
if isinstance(contract, list):
for c in contract:
yield c
else:
yield contract
# went from 89.9% to 95.7%
def main():
examples = get_all_strings()
differences = run_joker(examples)
diff = list(differences)
unfriendliness = sum(diff) / len(diff)
friendliness = 100 - 100 * unfriendliness
print("Friendliness: %.2f%% " % friendliness)
def replace_one(s, i, c):
assert i >= 0 and i < len(s)
return s[:i] + c + s[i + 1:]
assert replace_one('python', 1, 'a') == 'pathon'
s = 'python'
for i in range(len(s)):
s2 = replace_one(s, i, '~')
s3 = replace_one(s2, i, s[i])
assert s == s3, 'i=%d %r -> %r -> %r' % (i, s, s2, s3)
def run_joker(examples):
for s in examples:
# make sure we can parse it
parse(s)
# now alter one letter
for i in range(len(s)):
s2 = replace_one(s, i, '~')
try:
parse(s2)
except ContractSyntaxError as e:
detected = e.where.col - 1
displacement = i - detected
# if displacement < 0:
# print displacement
# print e
assert displacement >= 0
value = displacement * 1.0 / len(s)
if displacement > 0:
print(e)
# assert False
yield value
if __name__ == '__main__':
main()
```
#### File: contracts/testing/test_particulars.py
```python
from contracts import parse
from contracts.interface import Where, ContractSyntaxError
from contracts.library import * # @UnusedWildImport
from contracts.syntax import ParseFatalException, ParseException
import unittest
def expression_fails(expression, string, all=True): # @ReservedAssignment
try:
c = expression.parseString(string, parseAll=all)
except ParseException:
pass
except ParseFatalException:
pass
else:
raise Exception('Expression: %s\nparsed to: %s\n(%r)' %
(string, c, c))
def expression_parses(expression, string, all=True): # @ReservedAssignment
try:
expression.parseString(string, parseAll=all)
except ParseException as e:
where = Where(string, e.loc)
msg = 'Error in parsing string: %s' % e
raise ContractSyntaxError(msg, where=where)
except ParseFatalException as e:
where = Where(string, e.loc)
msg = 'Fatal error in parsing string: %s' % e
raise ContractSyntaxError(msg, where=where)
class TestParticular(unittest.TestCase):
def test_variables(self):
for s in ['a', 'b', 'c', 'd', 'x', 'y']:
self.assertEqual(parse(s), BindVariable(s, object))
U = s.upper()
self.assertEqual(parse(U), BindVariable(U, int))
def test_variable_parseable(self):
for s in ['a', 'b', 'c', 'd', 'x', 'y']:
expression_fails(int_variables_contract, s)
expression_parses(misc_variables_contract, s)
U = s.upper()
expression_parses(int_variables_contract, U)
expression_fails(misc_variables_contract, U)
def test_partial(self):
expression_parses(int_variables_contract, 'A', all=False)
expression_fails(int_variables_contract, 'A A', all=True)
expression_parses(int_variables_contract, 'A', all=True)
expression_fails(int_variables_contract, 'A*', all=False)
class TestBindingVsRef(unittest.TestCase):
def test_binding_vs_ref(self):
self.assertEqual(parse('list[N]'), List(BindVariable('N', int), None))
def test_binding_vs_ref2(self):
self.assertEqual(parse('N'), BindVariable('N', int))
```
#### File: contracts/useful_contracts/numpy_specific.py
```python
from contracts import new_contract
import numpy as np
from contracts.interface import describe_value, describe_type
__all__ = ['finite']
@new_contract
def finite(x):
return np.isfinite(x).all()
new_contract('np_scalar_uint', 'np_uint8|np_uint16|np_uint32|np_uint64')
new_contract('np_scalar_int', 'np_int8|np_int16|np_int32|np_int64')
new_contract('np_scalar_float', 'np_float32|np_float64')
new_contract('np_scalar_type', 'np_scalar_int|np_scalar_uint|np_scalar_float')
@new_contract
def np_zeroshape_array(x):
# scalars = [
# np.int, # Platform integer (normally either int32 or int64)
# np.int8, # Byte (-128 to 127)
# np.int16, # Integer (-32768 to 32767)
# np.int32, # Integer (-2147483648 to 2147483647)
# np.int64, # Integer (9223372036854775808 to 9223372036854775807)
# np.uint8, # Unsigned integer (0 to 255)
# np.uint16, # Unsigned integer (0 to 65535)
# np.uint32, # Unsigned integer (0 to 4294967295)
# np.uint64, # Unsigned integer (0 to 18446744073709551615)
# np.float, # Shorthand for float64.
# np.float16, # Half precision float: sign bit, 5 bits exponent, 10 bits mantissa
# np.float32, # Single precision float: sign bit, 8 bits exponent, 23 bits mantissa
# np.float64, # Double precision float: sign bit, 11 bits exponent, 52 bits mantissa
# np.complex, # Shorthand for complex128.
# np.complex64, # Complex number, represented by two 32-bit floats (real and imaginary components)
# np.complex128
# ]
#
# if isinstance(x, tuple(scalars)):
# return
#
#
if not isinstance(x, np.ndarray):
msg = 'Not an array: %s %s ' % (type(x), describe_type(x))
raise ValueError(msg)
if not x.shape == ():
msg = 'Not a scalar: %s' % describe_value(x)
raise ValueError(msg)
new_contract('np_scalar', 'np_zeroshape_array|np_scalar_type')
```
#### File: geometry/manifolds/embedding_relations.py
```python
from geometry.poses import SE3_from_SE2
from geometry.poses_embedding import SE2_from_SO2, SO2_project_from_SE2, \
SE3_from_SO3, SO3_project_from_SE3, se2_from_so2, so2_project_from_se2, \
se3_from_so3, so3_project_from_se3, se2_project_from_se3, se3_from_se2, \
SE2_project_from_SE3, R2_project_from_SE2, SE2_from_R2, R3_project_from_SE3, \
SE3_from_R3
from geometry.rotations import SO2_from_angle, angle_from_SO2
from geometry.rotations_embedding import SO2_project_from_SO3, SO3_from_SO2, \
so2_project_from_so3, so3_from_so2
from geometry.spheres import normalize_pi
from geometry.spheres_embedding import S1_project_from_S2, S2_from_S1, \
S1_project_from_R2, S2_project_from_R3
import numpy as np
from .differentiable_manifold import DifferentiableManifold
from .euclidean import R1, R2, R3
from .special_euclidean_algebra import se3, se2
from .special_euclidean_group import SE2, SE3
from .special_orthogonal_algebra import so2, so3
from .special_orthogonal_group import SO2, SO3
from .sphere import S1, S2
from .torus import T1, T2, T3
from .translation_algebra import tran3, tran2, tran1
from .translation_group import Tran2, Tran3
def embedding(small, big, embed_in, project_from, desc=None):
DifferentiableManifold.embedding(small, big, embed_in, project_from,
itype='user', desc=desc)
def isomorphism(A, B, a_to_b, b_to_a, desc=None):
DifferentiableManifold.isomorphism(A, B, a_to_b, b_to_a,
itype='user', desc=desc)
def identity(x):
return x
def tran1_project_from_tran2(b):
return np.array([[0, b[0, -1]],
[0, 0]])
def tran2_from_tran1(b):
return np.array([[0, 0, b[0, -1]],
[0, 0, 0],
[0, 0, 0]])
def tran2_project_from_se2(b):
return np.array([[0, 0, b[0, -1]],
[0, 0, b[1, -1]],
[0, 0, 0]])
def tran3_project_from_se3(b):
return np.array([[0, 0, 0, b[0, -1]],
[0, 0, 0, b[1, -1]],
[0, 0, 0, b[2, -1]],
[0, 0, 0, 0]])
embedding(R1, R2, lambda a: np.array([a[0], 0]),
lambda b: np.array([b[0]]))
embedding(R2, R3, lambda a: np.array([a[0], a[1], 0]),
lambda b: b[0:2])
embedding(SO2, SO3, SO3_from_SO2, SO2_project_from_SO3)
embedding(so2, so3, so3_from_so2, so2_project_from_so3)
embedding(SO2, SE2, SE2_from_SO2, SO2_project_from_SE2)
embedding(SO3, SE3, SE3_from_SO3, SO3_project_from_SE3)
embedding(so3, se3, se3_from_so3, so3_project_from_se3)
embedding(so2, se2, se2_from_so2, so2_project_from_se2)
embedding(se2, se3, se3_from_se2, se2_project_from_se3)
embedding(SE2, SE3, SE3_from_SE2, SE2_project_from_SE3)
embedding(S1, S2, S2_from_S1, S1_project_from_S2)
embedding(S1, R2, identity, S1_project_from_R2)
embedding(S2, R3, identity, S2_project_from_R3)
embedding(R2, SE2, SE2_from_R2, R2_project_from_SE2)
embedding(R3, SE3, SE3_from_R3, R3_project_from_SE3)
def T1_from_S1(a):
return np.array([np.cos(float(a)), np.sin(float(a))])
def S1_from_T1(b):
return normalize_pi(np.array([np.arctan2(b[1], b[0])]))
def SO2_from_T1(x):
return SO2_from_angle(x[0])
def T1_from_SO2(y):
return np.array([angle_from_SO2(y)])
isomorphism(T1, S1, T1_from_S1, S1_from_T1)
isomorphism(T1, SO2, SO2_from_T1, T1_from_SO2)
embedding(T1, T2, lambda a: np.array([a[0], 0]),
lambda b: np.array([b[0]]))
embedding(T2, T3, lambda a: np.array([a[0], a[1], 0]),
lambda b: b[0:2])
# TODO: more tori?
embedding(T1, R1, identity, lambda x: T1.normalize(x))
embedding(T2, R2, identity, lambda x: T2.normalize(x))
embedding(T3, R3, identity, lambda x: T3.normalize(x))
embedding(tran1, tran2, tran2_from_tran1, tran1_project_from_tran2)
embedding(tran2, tran3, lambda b: np.array([[0, 0, 0, b[0, -1]],
[0, 0, 0, b[1, -1]],
[0, 0, 0, 0],
[0, 0, 0, 0]]),
lambda b: np.array([[0, 0, b[0, -1]],
[0, 0, b[1, -1]],
[0, 0, 0]]))
embedding(tran2, se2, identity, tran2_project_from_se2)
embedding(tran3, se3, identity, tran3_project_from_se3)
embedding(Tran2, SE2, identity,
lambda b: np.array([[1, 0, b[0, 2]],
[0, 1, b[1, 2]],
[0, 0, 1]]))
embedding(Tran3, SE3, identity,
lambda b: np.array([[1, 0, 0, b[0, -1]],
[0, 1, 0, b[1, -1]],
[0, 0, 1, b[2, -1]],
[0, 0, 0, 1]]))
```
#### File: geometry/manifolds/matrix_lie_group_tangent.py
```python
from contracts import contract
from .differentiable_manifold import DifferentiableManifold
from .matrix_lie_group import MatrixLieGroup
__all__ = ['MatrixLieGroupTangent']
class MatrixLieGroupTangent(DifferentiableManifold):
''' This class represents the tangent bundle of a matrix Lie group
using a tuble (base, v0), where v0 is in the algebra.
Compare with the generic TangentBundle that uses the representation
(base, vel) where vel is tangent at base (it holds that vel=base*v0).
(MatrixLieGroup has different representation)
'''
# TODO: the tangent bundle of a matrix Lie group has more properties than
# this.
# TODO: create tests for all of this
def __init__(self, base_group):
assert isinstance(base_group, MatrixLieGroup)
self.base = base_group
dimension = 2 * base_group.get_dimension()
DifferentiableManifold.__init__(self, dimension=dimension)
def __str__(self):
return "T%se" % self.base
@contract(x='tuple[2]')
def belongs(self, x):
self.base.belongs(x[0])
self.base.get_algebra().belongs(x[1])
def belongs_ts(self, bv):
# TODO: implement
raise ValueError('Not supported')
def project_ts(self, bv): # TODO: test
# TODO: implement
raise ValueError('Not supported')
@contract(a='belongs', b='belongs', returns='>=0')
def distance(self, a, b):
# TODO: implement
raise ValueError('Not supported')
@contract(base='belongs', p='belongs', returns='belongs_ts')
def logmap(self, base, p):
raise ValueError('Not supported')
@contract(bv='belongs_ts', returns='belongs')
def expmap(self, bv):
raise ValueError('Not supported')
@contract(returns='list(belongs)')
def interesting_points(self):
# TODO: write this
return []
@contract(a='belongs')
def friendly(self, a):
'''
Returns a friendly description string for a point on the manifold.
'''
v = self.base.get_algebra().vector_from_algebra(a[1])
return "V(%s,%s)" % (self.base.friendly(a[0]), v.tolist())
```
#### File: geometry/manifolds/matrix_linear_space.py
```python
from abc import abstractmethod
from contracts import check, contract
from geometry.manifolds.differentiable_manifold import DifferentiableManifold
from geometry.utils.numpy_backport import assert_allclose
import numpy as np
__all__ = ['MatrixLinearSpace']
class MatrixLinearSpace(DifferentiableManifold):
@contract(dimension='int,>0')
def __init__(self, dimension, shape):
''' Note dimension is the intrinsic dimension. '''
# TODO: give basis?
self.shape = shape
DifferentiableManifold.__init__(self, dimension=dimension)
def zero(self):
''' Returns the zero element for this algebra. '''
return np.zeros(self.shape)
def norm(self, v):
''' Return the norm of a vector in the algebra.
This is used in :py:class:`MatrixLieGroup` to measure
distances between points in the Lie group.
'''
return np.linalg.norm(v, 2)
# Manifolds methods
def distance(self, a, b):
return self.norm(a - b)
@contract(bv='belongs_ts')
def expmap(self, bv):
base, vel = bv
return base + vel
@contract(base='belongs', p='belongs', returns='belongs_ts')
def logmap(self, base, p):
return base, p - base
@contract(x='array')
def belongs(self, x):
if x.shape != self.shape:
raise ValueError('Expected shape %r, not %r.' %
(self.shape, x.shape))
# TODO: make contract
assert np.all(np.isreal(x)), "Expected real vector"
proj = self.project(x)
assert_allclose(proj, x, atol=1e-8) # XXX: tol
def belongs_ts(self, bv):
# formatm('bv', bv)
check('tuple(shape(x),shape(x))', bv, x=self.shape)
base, vel = bv
self.belongs(base)
self.belongs(vel)
@abstractmethod
def project(self, v): # @UnusedVariable
''' Projects a vector onto this Lie Algebra. '''
def project_ts(self, bv):
base, vel = bv
return base, self.project(vel)
```
#### File: geometry/manifolds/point_set.py
```python
from contracts import contract
from geometry.manifolds.differentiable_manifold import DifferentiableManifold
import numpy as np
# TODO: do some testing
class PointSet(object):
""" A set of points on a differentiable manifold. """
@contract(manifold=DifferentiableManifold)
def __init__(self, manifold, points=[]):
self.points = list(points)
self.manifold = manifold
def __len__(self):
return len(self.points)
def get_points(self):
""" returns an iterable """
return list(self.points)
def add(self, p):
self.points.append(p)
def is_closer_than(self, p, min_dist):
# quick check: check only last
d_last = self.manifold.distance(p, self.points[-1])
if d_last <= min_dist:
return True
return self.minimum_distance(p) <= min_dist
def distances_to_point(self, p):
return np.array([self.manifold.distance(p, p0) for p0 in self.points])
def minimum_distance(self, p):
dists = self.distances_to_point(p)
return np.min(dists)
def average(self):
""" Returns the average point """
# TODO: generalize
return self.manifold.riemannian_mean(self.points)
def closest_index_to(self, p):
dists = self.distances_to_point(p)
return np.argmin(dists)
def centroid_index(self):
avg = self.average()
closest = self.closest_index_to(avg)
return closest
def centroid(self):
""" REturns the point which is closest to the average """
i = self.centroid_index()
return self.points[i]
```
#### File: geometry/manifolds/product_manifold.py
```python
from contracts import contract
import numpy as np
from .differentiable_manifold import DifferentiableManifold
__all__ = ['ProductManifold']
class ProductManifold(DifferentiableManifold):
@contract(components='seq[>=2,N]($DifferentiableManifold)',
weights='None|array[N](>0)')
def __init__(self, components, weights=None):
dim = sum([m.dimension for m in components])
DifferentiableManifold.__init__(self, dimension=dim)
self.components = components
if weights is None:
weights = np.ones(len(components))
self.weights = weights
@contract(a='seq')
def belongs(self, a):
if not len(a) == len(self.components): # XXX: what should I throw?
raise ValueError('I expect a sequence of length %d, not %d.' %
(len(a), len(self.components)))
for x, m in zip(a, self.components):
m.belongs(x)
def distance(self, a, b):
''' Computes the geodesic distance between two points. '''
distances = [m.distance(x, y) for x, y, m in zip(a, b, self.components)]
distances = np.array(distances)
return (distances * self.weights).sum()
def logmap(self, base, p):
''' Computes the logarithmic map from base point *a* to target *b*. '''
raise ValueError('Not implemented') # FIXME: finish this
def expmap(self, bv):
raise ValueError('Not implemented') # FIXME: finish this
def project_ts(self, bv):
raise ValueError('Not implemented') # FIXME: finish this
def __repr__(self):
return 'P(%s)' % "x".join([str(x) for x in self.components])
```
#### File: geometry/manifolds/square.py
```python
from contracts import contract
from geometry.manifolds import DifferentiableManifold, RandomManifold
import numpy as np
__all__ = ['Square', 'Sq', 'Sq1', 'Sq2', 'Sq3']
class Square(RandomManifold):
""" A cube/square in [0, 1].
All points in R^n belong to the torus. """
def __init__(self, n):
DifferentiableManifold.__init__(self, dimension=n)
self.n = n
@contract(a='array[N]')
def belongs(self, a):
ok = np.logical_and(a >= 0, a <= 1)
if not np.all(ok):
raise ValueError("Not all are ok in %s" % a)
def distance(self, a, b):
_, vel = self.logmap(a, b)
return np.linalg.norm(vel)
def logmap(self, base, p):
vel = p - base
return base, vel
def expmap(self, bv):
a, vel = bv
b = a + vel
return b
def project_ts(self, bv):
return bv # XXX: more checks
def sample_uniform(self):
return np.random.rand(self.n)
@contract(returns='belongs_ts')
def sample_velocity(self, a): # @UnusedVariable
b = self.sample_uniform()
_, vel = self.logmap(a, b)
return vel
def friendly(self, a):
return 'point(%s)' % a
@contract(returns='list(belongs)')
def interesting_points(self):
interesting = []
interesting.append(np.zeros(self.n))
for i in range(self.n):
z = np.zeros(self.n)
z[i] = 1
interesting.append(z)
return interesting
def __repr__(self):
return 'Sq%s' % self.n
Sq1 = Square(1)
Sq2 = Square(2)
Sq3 = Square(3)
Sq = {1: Sq1, 2: Sq2, 3: Sq3}
```
#### File: geometry/manifolds/torus.py
```python
from contracts import contract
from geometry.spheres import normalize_pi
import numpy as np
from .differentiable_manifold import DifferentiableManifold
__all__ = ['Torus', 'T', 'T1', 'T2', 'T3']
class Torus(DifferentiableManifold):
def __init__(self, n):
DifferentiableManifold.__init__(self, dimension=n)
self.n = n
def belongs(self, a):
ok = np.logical_and(a >= -np.pi, a < np.pi)
if not np.all(ok):
raise ValueError("Not all are ok in %s" % a)
def distance(self, a, b):
b = self.normalize(b - a)
return np.linalg.norm(b)
def logmap(self, base, p):
vel = self.normalize(p - base)
return base, vel
def expmap(self, bv):
a, vel = bv
b = self.normalize(a + vel)
return b
def project_ts(self, bv):
return bv # XXX: more checks
@contract(returns='belongs')
def sample_uniform(self):
return np.random.rand(self.n) * 2 * np.pi - np.pi
def normalize(self, a):
return normalize_pi(a)
def friendly(self, a):
return 'point(%s)' % a
@contract(returns='list(belongs)')
def interesting_points(self):
interesting = []
interesting.append(np.zeros(self.n))
for _ in range(2):
interesting.append(self.sample_uniform())
return interesting
def __repr__(self):
return 'T%s' % self.n
T1 = Torus(1)
T2 = Torus(2)
T3 = Torus(3)
T = {1: T1, 2: T2, 3: T3}
```
#### File: geometry/manifolds/translation_group.py
```python
from contracts import contract
from geometry.poses import extract_pieces, pose_from_rotation_translation, \
rotation_translation_from_pose
from geometry.utils.numpy_backport import assert_allclose
import numpy as np
from .differentiable_manifold import DifferentiableManifold
from .euclidean import R
from .matrix_lie_group import MatrixLieGroup
from .translation_algebra import tran
__all__ = ['TranG', 'Tran', 'Tran1', 'Tran2', 'Tran3']
class TranG(MatrixLieGroup):
'''
The translation subgroup of SE(n).
'''
@contract(n='1|2|3')
def __init__(self, n):
algebra = tran[n]
MatrixLieGroup.__init__(self, n=n + 1, algebra=algebra, dimension=n)
self.En = R[n]
DifferentiableManifold.isomorphism(self, algebra,
self.algebra_from_group,
self.group_from_algebra,
itype='lie')
def __repr__(self):
# return 'Tran(%s)' % (self.n - 1)
return 'Tr%s' % (self.n - 1)
def belongs(self, x):
# TODO: explicit
R, t, zero, one = extract_pieces(x) # @UnusedVariable
assert_allclose(R, np.eye(self.n - 1))
assert_allclose(zero, 0, err_msg='I expect the lower row to be 0.')
assert_allclose(one, 1, err_msg='Bottom-right must be 1.')
@contract(returns='belongs')
def sample_uniform(self):
t = self.En.sample_uniform()
return pose_from_rotation_translation(np.eye(self.n - 1), t)
def friendly(self, a):
t = rotation_translation_from_pose(a)[1]
return 'Tran(%s)' % (self.En.friendly(t))
def logmap(self, base, p):
return base, p - base
def expmap(self, bv):
base, vel = bv
return base + vel
def algebra_from_group(self, g):
a = np.zeros((self.n, self.n))
a[:-1, -1] = g[:-1, -1]
return a
def group_from_algebra(self, a):
g = np.eye(self.n)
g[:-1, -1] = a[:-1, -1]
return g
def interesting_points(self):
points = []
for t in self.En.interesting_points():
p = pose_from_rotation_translation(np.eye(self.n - 1), t)
points.append(p)
return points
Tran1 = TranG(1)
Tran2 = TranG(2)
Tran3 = TranG(3)
Tran = {1: Tran1, 2: Tran2, 3: Tran3}
```
#### File: site-packages/geometry/mds_algos.py
```python
import itertools
from contracts import check_multiple
from contracts import contract
from geometry import logger, eigh
from geometry.formatting import formatm
from geometry.procrustes import best_similarity_transform
from geometry.spheres import project_vectors_onto_sphere
from geometry.utils.numpy_backport import assert_allclose
import numpy as np
@contract(S='array[KxN]', returns='array[NxN](>=0)')
def euclidean_distances(S):
''' Computes the euclidean distance matrix for the given points. '''
K, N = S.shape
D = np.zeros((N, N))
for i in range(N):
p = S[:, i]
pp = np.tile(p, (N, 1)).T
assert pp.shape == (K, N)
d2 = ((S - pp) * (S - pp)).sum(axis=0)
d2 = np.maximum(d2, 0)
D[i, :] = np.sqrt(d2)
return D
def double_center(P):
n = P.shape[0]
grand_mean = P.mean()
row_mean = np.zeros(n)
col_mean = np.zeros(n)
for i in range(n):
row_mean[i] = P[i, :].mean()
col_mean[i] = P[:, i].mean()
R = row_mean.reshape(n, 1).repeat(n, axis=1)
assert R.shape == (n, n)
C = col_mean.reshape(1, n).repeat(n, axis=0)
assert C.shape == (n, n)
B2 = -0.5 * (P - R - C + grand_mean)
if False:
B = np.zeros(P.shape)
for i, j in itertools.product(range(n), range(n)):
B[i, j] = -0.5 * (P[i, j] - col_mean[j] - row_mean[i] + grand_mean)
assert_allclose(B2, B)
return B2
@contract(C='array[NxN]', ndim='int,>0,K', returns='array[KxN]')
def inner_product_embedding_slow(C, ndim):
U, S, V = np.linalg.svd(C, full_matrices=0)
check_multiple([('array[NxN]', U),
('array[N]', S),
('array[NxN]', V)])
coords = V[:ndim, :]
for i in range(ndim):
coords[i, :] = coords[i, :] * np.sqrt(S[i])
return coords
@contract(C='array[NxN]', ndim='int,>1,K', returns='array[KxN]')
def inner_product_embedding(C, ndim):
n = C.shape[0]
if ndim > n:
msg = 'Number of points: %s Dimensions: %s' % (n, ndim)
raise ValueError(msg)
eigvals = (n - ndim, n - 1)
print(n, eigvals)
S, V = eigh(C, eigvals=eigvals)
if S.size >= 2:
assert S[0] <= S[1] # eigh returns in ascending order
if np.any(S < 0):
msg = 'The cosine matrix singular values are not all positive: \n'
msg += formatm('S', S)
msg += 'I assume it is rounding error and approximate with:\n'
S[S < 0] = 0
msg += formatm('S\'', S)
logger.warning(msg)
assert V.shape == (n, ndim)
assert S.shape == (ndim,)
# check_multiple([('K', ndim),
# ('array[NxK]', V),
# ('array[K]', S)])
coords = V.T
for i in range(ndim):
assert S[i] >= 0
coords[i, :] = coords[i, :] * np.sqrt(S[i])
return coords
def truncated_svd_randomized(M, k):
''' Truncated SVD based on randomized projections. '''
p = k + 5 # TODO: add parameter
Y = np.dot(M, np.random.normal(size=(M.shape[1], p)))
Q, r = np.linalg.qr(Y) # @UnusedVariable
B = np.dot(Q.T, M)
Uhat, s, v = np.linalg.svd(B, full_matrices=False)
U = np.dot(Q, Uhat)
return U.T[:k].T, s[:k], v[:k]
@contract(C='array[NxN]', ndim='int,>0,K', returns='array[KxN]')
def inner_product_embedding_randomized(C, ndim):
'''
Best embedding of inner product matrix based on
randomized projections.
'''
U, S, V = truncated_svd_randomized(C, ndim) # @UnusedVariable.
check_multiple([('K', ndim),
('array[KxN]', V),
('array[K]', S)])
coords = V
for i in range(ndim):
coords[i, :] = coords[i, :] * np.sqrt(S[i])
return coords
@contract(D='distance_matrix,array[MxM](>=0)', ndim='K,int,>=1', returns='array[KxM]')
def mds(D, ndim, embed=inner_product_embedding):
# if D.dtype != np.float64:
# D = D.astype(np.float64)
diag = D.diagonal()
# the diagonal should be zero
if not np.allclose(diag, 0):
msg = 'The diagonal of the distance matrix should be zero.'
msg += 'Here are all the entries: %s' % diag.tolist()
raise ValueError(diag)
# assert_allclose(diag, 0, atol=1e-09)
# Find centered cosine matrix
P = D * D
B = double_center(P)
return embed(B, ndim)
@contract(D='distance_matrix,array[MxM](>=0)', ndim='K,int,>=1', returns='array[KxM]')
def mds_randomized(D, ndim):
''' MDS based on randomized projections. '''
return mds(D, ndim, embed=inner_product_embedding_randomized)
@contract(C='array[NxN]', ndim='int,>0,K', returns='array[KxN],directions')
def spherical_mds(C, ndim, embed=inner_product_embedding):
# TODO: check cosines
coords = embed(C, ndim)
proj = project_vectors_onto_sphere(coords)
return proj
# TODO: spherical_mds_randomized
best_embedding_on_sphere = spherical_mds
@contract(references='array[KxN]', distances='array[N](>=0)')
def place(references, distances):
# TODO: docs
K, N = references.shape
# First do MDS on all data
D = np.zeros((N + 1, N + 1))
D[:N, :N] = euclidean_distances(references)
D[N, N] = 0
D[N, :N] = distances
D[:N, N] = distances
Sm = mds(D, K)
# Only if perfect
# Dm = euclidean_distances(Sm)
# assert_almost_equal(D[:N, :N], Dm[:N, :N])
# new in other frame
R, t = best_similarity_transform(Sm[:, :N], references)
Sm_aligned = np.dot(R, Sm) + t
result = Sm_aligned[:, N]
return result
```
#### File: site-packages/geometry/procrustes.py
```python
from contracts import check, contract
import numpy as np
# TODO: write tests
@contract(X='array[KxN],K>=2,K<N', Y='array[KxN]',
returns='array[KxK],orthogonal')
def best_orthogonal_transform(X, Y):
''' Finds the best orthogonal transform R between X and Y,
such that R X ~= Y. '''
YX = np.dot(Y, X.T)
check('array[KxK]', YX)
U, _, V = np.linalg.svd(YX)
best = np.dot(U, V)
return best
@contract(M='array[NxN]', returns='array[NxN],orthogonal')
def closest_orthogonal_matrix(M):
''' Finds the closest orthogonal matrix to M. '''
U, _, V = np.linalg.svd(M)
R = np.dot(U, V)
return R
# TODO: write tests
@contract(X='array[KxN],K>=2,K<N', Y='array[KxN]',
returns='tuple( (array[KxK],orthogonal), array[Kx1])')
def best_similarity_transform(X, Y):
''' Finds the best transform (R,t) between X and Y,
such that R X + t ~= Y. '''
K = X.shape[0]
Xm = X.mean(axis=1).reshape(K, 1)
Ym = Y.mean(axis=1).reshape(K, 1)
X = X - Xm
Y = Y - Ym
# assert_allclose(X.mean(axis=1), 0, atol=1e-8)
# assert_allclose(Y.mean(axis=1), 0, atol=1e-8)
YX = np.dot(Y, X.T)
check('array[KxK]', YX)
U, _, V = np.linalg.svd(YX)
R = np.dot(U, V)
t = Ym - np.dot(R, Xm)
return R, t
```
#### File: geometry/unittests/misc_tests.py
```python
from geometry import map_hat, hat_map
from .utils import GeoTestCase, directions_sequence
class UtilsTests(GeoTestCase):
def hat_test(self):
self.check_conversion(directions_sequence(), hat_map, map_hat)
```
#### File: geometry/unittests/random_geometry_tests.py
```python
import unittest
from nose.plugins.attrib import attr
from contracts import check, fail
from contracts.enabling import all_disabled
from geometry import (random_rotation, random_quaternion, random_direction,
random_directions_bounded, any_distant_direction, any_orthogonal_direction,
distribution_radius, geodesic_distance_on_sphere, assert_orthogonal,
rotation_from_axis_angle, default_axis, default_axis_orthogonal,
random_orthogonal_direction, random_directions, assert_allclose)
import numpy as np
from .utils import directions_sequence
N = 20
class GeometryTests(unittest.TestCase):
def is_contracts_active(self):
return not all_disabled()
# TODO: add statistics test
def test_random_quaternions(self):
for i in range(N): #@UnusedVariable
random_quaternion()
def test_random_rotations(self):
for i in range(N): #@UnusedVariable
random_rotation()
def test_random_direction(self):
for i in range(N): #@UnusedVariable
random_direction()
def test_checks(self):
if not self.is_contracts_active():
return
R = np.zeros((10, 10))
fail('rotation_matrix', R)
R = random_rotation()
R[0, 2] += R[0, 1]
fail('rotation_matrix', R)
R = random_rotation()
R *= 2
fail('rotation_matrix', R)
def test_unit_length(self):
if not self.is_contracts_active():
return
check('unit_length', np.array([1]))
check('unit_length', np.array([0, 1]))
fail('unit_length', np.array([0, 2]))
def test_random_directions(self):
N = 20
x = random_directions(N)
assert x.shape == (3, N)
def test_distances(self):
for i in range(N): #@UnusedVariable
s = random_direction()
dist = geodesic_distance_on_sphere
assert_allclose(dist(s, s), 0)
assert_allclose(dist(s, -s), np.pi)
@attr('density')
def random_directions_bounded_test_1():
# TODO: write actual test
r = np.pi / 2
N = 180
random_directions_bounded(ndim=2, radius=r, num_points=N, center=None)
random_directions_bounded(ndim=3, radius=r, num_points=N, center=None)
random_directions_bounded(ndim=2, radius=r, num_points=N,
center=random_direction(2))
random_directions_bounded(ndim=3, radius=r, num_points=N,
center=random_direction(3))
def check_reasonable_radius(r, r2, N): #@UnusedVariable
bounds = [0.8, 1.2] # TODO: make it depend on N
if not (r * bounds[0] <= r2 <= r * bounds[1]):
msg = 'Constructed distribution with radius %f, got %f.' % (r, r2)
assert False, msg
def random_directions_bounded_check(ndim, radius, N):
S = random_directions_bounded(ndim=ndim, radius=radius, num_points=N)
r2 = distribution_radius(S)
check_reasonable_radius(radius, r2, N)
@attr('density')
def random_directions_bounded_test():
radius = [np.pi, np.pi / 2, np.pi / 6]
N = 300
for ndim in [2, 3]:
for r in radius:
yield random_directions_bounded_check, ndim, r, N
def distribution_radius_check(center, radius, N):
angles = (np.random.rand(N) - 0.5) * 2 * radius
angles += center
S = np.vstack((np.cos(angles), np.sin(angles)))
r2 = distribution_radius(S)
check_reasonable_radius(radius, r2, N)
def distribution_radius_test():
radius = [np.pi, np.pi / 2, np.pi / 6]
N = 300
for r in radius:
for i in range(5): #@UnusedVariable
center = np.random.rand() * 2 * np.pi
yield distribution_radius_check, center, r, N
def any_distant_direction_test():
for s in directions_sequence():
z = any_distant_direction(s)
d = geodesic_distance_on_sphere(z, s)
assert d > np.pi / 6
def any_orthogonal_direction_test():
for s in directions_sequence():
for i in range(5): #@UnusedVariable
z = any_orthogonal_direction(s)
assert_orthogonal(z, s)
def random_orthogonal_direction_test():
for s in directions_sequence():
for i in range(5): #@UnusedVariable
z = random_orthogonal_direction(s)
assert_orthogonal(z, s)
def default_axis_orthogonal_test():
z1 = default_axis()
z2 = default_axis_orthogonal()
assert_orthogonal(z1, z2)
def sorted_directions_test():
# TODO
pass
# TODO: write tests for ndim=2
def assert_orthogonal_test():
for s in directions_sequence():
axis = any_orthogonal_direction(s)
angle = np.pi / 2
R = rotation_from_axis_angle(axis, angle)
s2 = np.dot(R, s)
assert_orthogonal(s, s2)
```
#### File: geometry/unittests/utils.py
```python
import unittest
from contracts import contract
from geometry import (random_direction, random_quaternion, random_rotation,
assert_allclose)
import numpy as np
N = 20
def rotations_sequence():
yield np.eye(3)
# TODO: add special values
for i in range(N): # @UnusedVariable
yield random_rotation()
def directions_sequence():
''' Sequence of directions in S^2. '''
yield np.array([1.0, 0.0, 0.0])
yield np.array([0.0, 1.0, 0.0])
yield np.array([0.0, 0.0, 1.0])
yield np.array([-1.0, 0.0, 0.0])
yield np.array([0.0, -1.0, 0.0])
yield np.array([0.0, 0.0, -1.0])
# TODO: add special values
for i in range(N): # @UnusedVariable
yield random_direction(3)
def quaternions_sequence():
# TODO: add special values
for i in range(N): # @UnusedVariable
yield random_quaternion()
def axis_angle_sequence():
# TODO: add special
for i in range(N): # @UnusedVariable
yield random_axis_angle()
@contract(returns='tuple(direction, (float,<3.15))')
def random_axis_angle():
max_angle = np.pi * 0.9
angle = np.random.uniform() * max_angle
axis = random_direction()
return axis, angle
class GeoTestCase(unittest.TestCase):
def check_one(self, x, op1, op2):
def call(function, param):
if isinstance(param, tuple):
return function(*param)
else:
return function(param)
y = call(op1, x)
x2 = call(op2, y)
if isinstance(x, tuple):
for a, b in zip(x, x2):
assert_allclose(a, b)
else:
assert_allclose(x, x2)
def check_conversion(self, sequence, op1, op2):
''' Checks that x = op2(op1(x)) for all x in sequence.
If intermediate results are tuples, they are passed
as distinct parameters. '''
for x in sequence:
# yield self.check_one, x, op1, op2
self.check_one(x, op1, op2)
``` |
{
"source": "Johnson9009/relay",
"score": 2
} |
#### File: python/dli/frontends.py
```python
import tvm
from tvm import relay
def load_relay(model_path):
with open(model_path) as f:
ir_mod = tvm.parser.fromtext(f.read())
return ir_mod, None
def load_tensorflow(model_path, shape_dict):
# pylint: disable=C0415
import tensorflow as tf
try:
# Package "tf.compat.v1" is added from version "r1.13".
tf_compat_v1 = tf.compat.v1
except ImportError:
tf_compat_v1 = tf
with open(model_path, "rb") as f:
graph_def = tf_compat_v1.GraphDef()
graph_def.ParseFromString(f.read())
# Import the graph definition into the default graph. Actually without this
# line of code the TensorFlow model can be correctly converted to Relay IR,
# but it can diagnose whether the TensorFlow model is broken or not.
tf.import_graph_def(graph_def, name="")
return relay.frontend.from_tensorflow(graph_def, shape=shape_dict)
def parse(framework, model_path, shape_dict):
if framework == "relay":
ir_mod, params = load_relay(model_path)
elif framework == "tensorflow":
ir_mod, params = load_tensorflow(model_path, shape_dict)
if params is not None:
# Use constant nodes(i.e., parameters or weights) to replace their
# corresponding variable nodes. After importing, all inputs and weights
# of NN model are converted to parameters of Relay "main" function, with
# the help of this replacement only inputs of NN model will still be
# parameters, and the "params" isn't needed, because it is merged into
# "ir_mod".
ir_mod["main"] = relay.build_module.bind_params_by_name(ir_mod["main"],
params)
passes = [
relay.transform.RemoveUnusedFunctions(),
]
with tvm.transform.PassContext(opt_level=3):
ir_mod = tvm.transform.Sequential(passes)(ir_mod)
return ir_mod
``` |
{
"source": "johnsonandjohnson/nlprov",
"score": 3
} |
#### File: nlprov/nlprov/__init__.py
```python
import spacy
def get_spacy_nlp():
try:
spacy_nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner', 'textcat'])
except OSError:
# We should tell the user explicitly what they need to do.
raise Exception("Please run `python -m spacy download en_core_web_sm` locally.")
return spacy_nlp
``` |
{
"source": "Johnson-A/UNM_Research",
"score": 2
} |
#### File: mantle_simulation/tests/MultipleRunsBLNK_19.py
```python
from time import clock
from dolfin import *
from math import *
from shutil import copyfile
from os import makedirs
import numpy
"""This version of the code runs a swarm of simulations of various viscosities and temperatures per viscosity. Since 5-29, it also includes an adiabatic temperature variance at the LAB."""
def RunJob(Tb,mu_value,path):
set_log_level(ERROR)
runtimeInit = clock() #to time the calculation
tfile = File(path+'/t6t.pvd')
mufile = File(path+"/mu.pvd")
ufile = File(path+'/velocity.pvd')
gradpfile = File(path+'/gradp.pvd')
pfile = File(path+'/pstar.pvd')
parameters = open(path+'/parameters','w',0)
temp_values = [27.+273, Tb+273, 1300.+273, 1305.+273]
rho_0 = 3300.
alpha = 2.5e-5
g = 9.81
kappa = 1.E-6 #not actually used.. (in cian's notes he is using the non dimensional kappa in the equations, which here i have defined as 1 (as kappa/kappa_0) so i think i can get away with this)
dTemp = temp_values[3] - temp_values[0]
b = 12.7
cc = log(128)
#Ep = 0.014057
Ep = b/dTemp
theta = 0.5
h = 1000000.
mu_a = mu_value #this was taken from the blankenbach paper, can change..
kappa_0 = 1.E-6
#above - 'freely chosen' variables
temp_values = [x/dTemp for x in temp_values] #non dimensionalising temp
mu_bot = exp(-Ep*(temp_values[3]*dTemp - 1573) + cc )*mu_a
print mu_a
print mu_bot
Ra = rho_0*alpha*g*dTemp*h**3/(kappa_0*mu_a)
w0 = rho_0*alpha*g*dTemp*h**2/mu_a
tau = h/w0
p0 = mu_a*w0/h
print Ra
print w0
print p0
vslipx = 1.6e-09/w0
dt = 3.E11/tau
t = dt
tEnd = 3.E15/tau #non dimensionalising times. Note these are not in million years,
nx = 50
ny = 50
MeshHeight = 400000./h #nondimensional (nd)
MeshWidth = 1000000./h #nd
LABHeight = 0.75*MeshHeight
for name in dir():
ev = str(eval(name))
if name[0] != '_' and ev[0] != '<':
parameters.write(name+' = '+ev+'\n')
vslip = Constant((1.6e-09/w0, 0.0)) #nondimensional
noslip = Constant((0.0, 0.0))
class PeriodicBoundary(SubDomain):
def inside(self, x, on_boundary):
return abs(x[0]) < DOLFIN_EPS and on_boundary
def map(self, x, y):
y[0] = x[0] - MeshWidth
y[1] = x[1]
pbc = PeriodicBoundary()
class LithosExp(Expression):
def eval(self, values, x):
# values[0] = LABHeight - 0.1*MeshHeight + cos(10*pi*x[0]/MeshWidth)*0.1*MeshHeight
# disc = (0.2 * MeshHeight) ** 2 - (x[0] - 0.5 * MeshWidth) ** 2
height = 0.05
width = 0.2
scale = 0.05
ridge1 = height*(1-tanh((x[0] - (0.5 + width)* MeshWidth)/scale))
ridge2 = height*(1-tanh((x[0] - (0.5 - width)* MeshWidth)/scale))
# hump = 0
# if disc > 0:
# hump = sqrt(disc)
hump = ridge1 - ridge2
values[0] = LABHeight - hump
LAB = LithosExp()
class TempExp(Expression):
def eval(self, value, x):
if x[1] >= LAB(x):
value[0] = temp_values[0] + (temp_values[1]-temp_values[0])*(MeshHeight - x[1])/(MeshHeight - LAB(x))
else:
value[0] = temp_values[3] - (temp_values[3]-temp_values[2])*(x[1])/(LAB(x))
class muExp(Expression):
def __init__(self, muTemp):
self.muTemp = muTemp
def eval(self, value, x):
t1 = numpy.zeros(1, dtype='d')
self.muTemp.eval(t1,x)
value[0] = exp(-Ep*(t1*dTemp - 1573) + cc*x[1]/MeshHeight)*mu_a
#if value[0] > 1e+26:
# value[0] = 1e+26 ###########################################
value[0] = value[0] / mu_a
def ZerothStokes(zerothMu, zerothTemp):
solver = KrylovSolver('tfqmr', 'amg')
R = VectorFunctionSpace(mesh, 'CG', 2, constrained_domain=pbc)
Llll = VectorFunctionSpace(mesh, 'CG', 2)
Q = FunctionSpace(mesh, 'CG', 1, constrained_domain=pbc)
W = R * Q
def top(x, on_boundary):return abs(x[1] - MeshHeight) <= DOLFIN_EPS
def bottom(x, on_boundary):return abs(x[1]) <= DOLFIN_EPS
def left(x, on_boundary): return ((x[1] <= LAB(x)) and (x[0] <= DOLFIN_EPS))
def right(x, on_boundary): return (((x[1])<= LAB(x)) and ((x[0] - MeshWidth) <= DOLFIN_EPS))
noslip = Constant((0.0, 0.0))
bc0 = DirichletBC(W.sub(0), noslip, top)
bc1 = DirichletBC(W.sub(0), vslip, bottom)
bcp1 = DirichletBC(W.sub(1), Constant(0.0), bottom)
bcs = [bc0, bc1, bcp1]
u = Function(W)
v,p = split(u)
u_t = TestFunction(W) #not actually used here
v_t, p_t = TestFunctions(W)
u_a = TrialFunction(W)
################# Form!!! #########
r_v = (inner(sym(grad(v_t)), 2.*zerothMu*sym(grad(v))) \
- div(v_t)*p \
- zerothTemp*v_t[1])*dx
r_p = p_t*div(v)*dx
###############
#not both this and below forms use cian's scaling.
r = r_v + r_p
solve(r == 0, u, bcs)
vel, pre = u.split()
return vel
mesh = RectangleMesh(Point(0.0,0.0),Point(MeshWidth,MeshHeight),nx,ny,'left/right')
Svel = VectorFunctionSpace(mesh, 'CG', 2, constrained_domain=pbc)
Spre = FunctionSpace(mesh, 'CG', 1, constrained_domain=pbc)
Stemp = FunctionSpace(mesh, 'CG', 1, constrained_domain=pbc)
Smu = FunctionSpace(mesh, 'CG', 1, constrained_domain=pbc)
Sgradp = VectorFunctionSpace(mesh, 'CG', 2, constrained_domain=pbc)
S0 = MixedFunctionSpace([Svel,Spre,Stemp])
u = Function(S0)
v,p,T = split(u)
v_t, p_t, T_t = TestFunctions(S0)
du = TrialFunction(S0)
T0 = Function(Stemp)
T0.interpolate(TempExp())
mu = Function(Smu)
mu.interpolate(muExp(T0))
v0 = Function(Svel)
v0 = ZerothStokes(mu, T0)
v_theta = (1. - theta)*v0 + theta*v
T_theta = (1. - theta)*T + theta*T0
r_v = (inner(sym(grad(v_t)), 2.*mu*sym(grad(v))) \
- div(v_t)*p \
- T*v_t[1] )*dx
r_p = p_t*div(v)*dx
r_T = (T_t*((T - T0) \
+ dt*inner(v_theta, grad(T_theta))) \
+ (dt/Ra)*inner(grad(T_t), grad(T_theta)) )*dx
r = r_v + r_p + r_T
def top(x, on_boundary): return x[1] >= MeshHeight - DOLFIN_EPS
def bottom(x, on_boundary): return x[1] <= DOLFIN_EPS
def left(x, on_boundary): return ((x[1] <= LAB(x)) and (x[0] <= DOLFIN_EPS))
def right(x, on_boundary): return (((x[1])<= LAB(x)) and ((x[0] - MeshWidth) <= DOLFIN_EPS))
bcv0 = DirichletBC(S0.sub(0), noslip, top)
bcv1 = DirichletBC(S0.sub(0), vslip, bottom)
bcp0 = DirichletBC(S0.sub(1), Constant(0.0), bottom)
bct0 = DirichletBC(S0.sub(2), Constant(temp_values[0]), top)
bct1 = DirichletBC(S0.sub(2), Constant(temp_values[3]), bottom)
bcs = [bcv0, bcv1, bcp0, bct0, bct1]
t = 0
count = 0
while (t < tEnd):
solve(r == 0, u, bcs)
t += dt
nV, nP, nT = u.split()
if (count % 100 == 0):
pfile << nP
ufile << nV
tfile << nT
mufile << mu
gradpfile << project(grad(nP),Sgradp)
count +=1
assign(T0, nT)
assign(v0, nV)
mu.interpolate(muExp(T0))
print 'Case mu=%g, Tb=%g complete.'%(mu_a, Tb), ' Run time =', clock() - runtimeInit, 's'
if __name__ == "__main__":
Tbs = [800,1000,1300]
#Mus = numpy.linspace(1e19,1e21,5)
Mus = [1e19]
for mu in Mus:
mufolder='mu='+str(mu)
try:
makedirs(mufolder)
except:
pass
for temp in Tbs:
tempfolder='Tb='+str(temp)
workpath=mufolder+'/'+tempfolder
RunJob(temp,mu,workpath)
```
#### File: mantle_simulation/tests/test.py
```python
from dolfin import *
class PeriodicBoundary(SubDomain):
def inside(self, x, on_boundary):
return (near(x[0], 0) and on_boundary)
def map(self, x, y):
y[0] = x[0] - 1.0
y[1] = x[1]
mesh = UnitSquareMesh(10, 10)
pbc = PeriodicBoundary()
V = VectorFunctionSpace(mesh, 'CG', 2, constrained_domain=pbc)
S = FunctionSpace(mesh, 'CG', 1, constrained_domain=pbc)
fluid_vel = Function(V)
v_t = TestFunction(V)
s_t = TestFunction(S)
r_v = inner(grad(v_t), grad(fluid_vel)) * dx
def top(x, on_boundary): return near(x[1], 1.0)
def bottom(x, on_boundary): return near(x[1], 0.0)
bcv1 = DirichletBC(V, (1.0, 1.0), top)
bcv0 = DirichletBC(V, (0.0, 1.0), bottom)
solve(r_v == 0.0, fluid_vel, [bcv0, bcv1])
File('output.pvd') << fluid_vel
print(type(fluid_vel))
```
#### File: mantle_simulation/tests/test_stokes.py
```python
from dolfin import *
mesh = UnitSquareMesh(8,8)
V = VectorFunctionSpace(mesh, 'CG', 1)
u = TrialFunction(V)
v = TestFunction(V)
# Define boundary conditions
tol = 1E-14
def lower_boundary(x, on_boundary):
return on_boundary and abs(x[1]) < tol
def upper_boundary(x, on_boundary):
return on_boundary and abs(x[1]-1) < tol
Gamma_0 = DirichletBC(V, (0.0,0.0), lower_boundary)
Gamma_1 = DirichletBC(V, (1.0,0.0), upper_boundary)
bcs = [Gamma_0, Gamma_1]
a = div(v) * div(u) * dx
L = 0.0
# Compute solution
t = Function(V)
solve(a==L,t,bcs)
plot(u, titel='Velocity', interactive=True)
``` |
{
"source": "johnsoncn/compression",
"score": 3
} |
#### File: compression/models/image_process.py
```python
from PIL import Image
import matplotlib.pyplot as plt
import os
import shutil
def copy_img():
src_dir = '/home/dingchaofan/data/cc_compression/train/'
dst_dir = '/home/dingchaofan/data/cc_compression/source/'
os.mkdir(dst_dir)
sources = os.listdir(src_dir)
# print(sources)
for src in sources:
img = Image.open('/home/dingchaofan/data/cc_compression/train/' + src)
width, height = img.size
# print(width, height)
if width > 1000 and height > 1000:
print(width, height)
shutil.copy(src_dir + src, dst_dir + src)
def image_names():
dst_dir = '/home/dingchaofan/data/cc_compression/source/'
sources = os.listdir(dst_dir)
for i, src in enumerate(sources):
os.rename(src=dst_dir+src, dst=dst_dir+str(i)+'.png')
print(i)
print(os.listdir(dst_dir))
def test():
import glob
res = glob.glob('/home/dingchaofan/data/cc_compression/source/')
print(res)
test()
```
#### File: python/entropy_models/continuous_batched.py
```python
import functools
import tensorflow as tf
from tensorflow_compression.python.distributions import helpers
from tensorflow_compression.python.entropy_models import continuous_base
from tensorflow_compression.python.ops import gen_ops
from tensorflow_compression.python.ops import math_ops
from tensorflow_compression.python.ops import round_ops
__all__ = [
"ContinuousBatchedEntropyModel",
]
@tf.keras.utils.register_keras_serializable(package="tensorflow_compression")
class ContinuousBatchedEntropyModel(continuous_base.ContinuousEntropyModelBase):
"""Batched entropy model for continuous random variables.
This entropy model handles quantization of a bottleneck tensor and helps with
training of the parameters of the probability distribution modeling the
tensor (a shared "prior" between sender and receiver). It also pre-computes
integer probability tables, which can then be used to compress and decompress
bottleneck tensors reliably across different platforms.
A typical workflow looks like this:
- Train a model using an instance of this entropy model as a bottleneck,
passing the bottleneck tensor through it. With `training=True`, the model
computes a differentiable upper bound on the number of bits needed to
compress the bottleneck tensor.
- For evaluation, get a closer estimate of the number of compressed bits
using `training=False`.
- Instantiate an entropy model with `compression=True` (and the same
parameters as during training), and share the model between a sender and a
receiver.
- On the sender side, compute the bottleneck tensor and call `compress()` on
it. The output is a compressed string representation of the tensor. Transmit
the string to the receiver, and call `decompress()` there. The output is the
quantized bottleneck tensor. Continue processing the tensor on the receiving
side.
Entropy models which contain range coding tables (i.e. with
`compression=True`) can be instantiated in three ways:
- By providing a continuous "prior" distribution object. The range coding
tables are then derived from that continuous distribution.
- From a config as returned by `get_config`, followed by a call to
`set_weights`. This implements the Keras serialization protocol. In this
case, the initializer creates empty state variables for the range coding
tables, which are then filled by `set_weights`. As a consequence, this
method requires `stateless=False`.
- In a more low-level way, by directly providing the range coding tables to
`__init__`, for use cases where the Keras protocol can't be used (e.g., when
the entropy model must not create variables).
This class assumes that all scalar elements of the encoded tensor are
statistically independent, and that the parameters of their scalar
distributions do not depend on data. The innermost dimensions of the
bottleneck tensor must be broadcastable to the batch shape of `prior`. Any
dimensions to the left of the batch shape are assumed to be i.i.d., i.e. the
likelihoods are broadcast to the bottleneck tensor accordingly.
A more detailed description (and motivation) of this way of performing
quantization and range coding can be found in the following paper. Please cite
the paper when using this code for derivative work.
> "End-to-end Optimized Image Compression"<br />
> <NAME>, <NAME>, <NAME><br />
> https://openreview.net/forum?id=rJxdQ3jeg
"""
def __init__(self,
prior=None,
coding_rank=None,
compression=False,
stateless=False,
expected_grads=False,
tail_mass=2**-8,
range_coder_precision=12,
dtype=None,
prior_shape=None,
cdf=None,
cdf_offset=None,
cdf_shapes=None,
non_integer_offset=True,
quantization_offset=None,
laplace_tail_mass=0):
"""Initializes the instance.
Args:
prior: A `tfp.distributions.Distribution` object. A density model fitting
the marginal distribution of the bottleneck data with additive uniform
noise, which is shared a priori between the sender and the receiver. For
best results, the distribution should be flexible enough to have a
unit-width uniform distribution as a special case, since this is the
marginal distribution for bottleneck dimensions that are constant. The
distribution parameters may not depend on data (they must be either
variables or constants).
coding_rank: Integer. Number of innermost dimensions considered a coding
unit. Each coding unit is compressed to its own bit string, and the
bits in the __call__ method are summed over each coding unit.
compression: Boolean. If set to `True`, the range coding tables used by
`compress()` and `decompress()` will be built on instantiation. If set
to `False`, these two methods will not be accessible.
stateless: Boolean. If `False`, range coding tables are created as
`Variable`s. This allows the entropy model to be serialized using the
`SavedModel` protocol, so that both the encoder and the decoder use
identical tables when loading the stored model. If `True`, creates range
coding tables as `Tensor`s. This makes the entropy model stateless and
allows it to be constructed within a `tf.function` body, for when the
range coding tables are provided manually. If `compression=False`, then
`stateless=True` is implied and the provided value is ignored.
expected_grads: If True, will use analytical expected gradients during
backpropagation w.r.t. additive uniform noise.
tail_mass: Float. Approximate probability mass which is encoded using an
Elias gamma code embedded into the range coder.
range_coder_precision: Integer. Precision passed to the range coding op.
dtype: `tf.dtypes.DType`. Data type of this entropy model (i.e. dtype of
prior, decompressed values). Must be provided if `prior` is omitted.
prior_shape: Batch shape of the prior (dimensions which are not assumed
i.i.d.). Must be provided if `prior` is omitted.
cdf: `tf.Tensor` or `None`. If provided, is used for range coding rather
than tables built from the prior.
cdf_offset: `tf.Tensor` or `None`. Must be provided along with `cdf`.
cdf_shapes: Shapes of `cdf` and `cdf_offset`. If provided, empty range
coding tables are created, which can then be restored using
`set_weights`. Requires `compression=True` and `stateless=False`.
non_integer_offset: Boolean. Whether to quantize to non-integer offsets
heuristically determined from mode/median of prior. Set this to `False`
if you are using soft quantization during training.
quantization_offset: `tf.Tensor` or `None`. If `cdf` is provided and
`non_integer_offset=True`, must be provided as well.
laplace_tail_mass: Float. If positive, will augment the prior with a
Laplace mixture for training stability. (experimental)
"""
if not (prior is not None) == (dtype is None) == (prior_shape is None):
raise ValueError(
"Either `prior` or both `dtype` and `prior_shape` must be provided.")
if (prior is None) + (cdf_shapes is None) + (cdf is None) != 2:
raise ValueError(
"Must provide exactly one of `prior`, `cdf`, or `cdf_shapes`.")
if not compression and not (
cdf is None and cdf_offset is None and cdf_shapes is None):
raise ValueError("CDFs can't be provided with `compression=False`")
if prior is not None and prior.event_shape.rank:
raise ValueError("`prior` must be a (batch of) scalar distribution(s).")
super().__init__(
coding_rank=coding_rank,
compression=compression,
stateless=stateless,
expected_grads=expected_grads,
tail_mass=tail_mass,
dtype=dtype if dtype is not None else prior.dtype,
laplace_tail_mass=laplace_tail_mass,
)
self._prior = prior
self._non_integer_offset = bool(non_integer_offset)
self._prior_shape = tf.TensorShape(
prior_shape if prior is None else prior.batch_shape)
if self.coding_rank < self.prior_shape.rank:
raise ValueError("`coding_rank` can't be smaller than `prior_shape`.")
with self.name_scope:
if quantization_offset is not None:
# If quantization offset is passed in manually, use it.
pass
elif not self.non_integer_offset:
# If not using the offset heuristic, always quantize to integers.
quantization_offset = None
elif cdf_shapes is not None:
# `cdf_shapes` being set indicates that we are using the `SavedModel`
# protocol. So create a placeholder value.
quantization_offset = tf.zeros(
self.prior_shape_tensor, dtype=self.dtype)
elif cdf is not None:
# CDF is passed in manually. So assume the same about the offsets.
if quantization_offset is None:
raise ValueError(
"When providing `cdf` and `non_integer_offset=True`, must also "
"provide `quantization_offset`.")
else:
assert self._prior is not None
# If prior is available, determine offsets from it using the heuristic.
quantization_offset = helpers.quantization_offset(self.prior)
# Optimization: if the quantization offset is zero, we don't need to
# subtract/add it when quantizing, and we don't need to serialize its
# value. Note that this code will only work in eager mode.
if (tf.executing_eagerly() and
tf.reduce_all(tf.equal(quantization_offset, 0.))):
quantization_offset = None
else:
quantization_offset = tf.broadcast_to(
quantization_offset, self.prior_shape_tensor)
if quantization_offset is None:
self._quantization_offset = None
elif self.compression and not self.stateless:
self._quantization_offset = tf.Variable(
quantization_offset, dtype=self.dtype, trainable=False,
name="quantization_offset")
else:
self._quantization_offset = tf.convert_to_tensor(
quantization_offset, dtype=self.dtype, name="quantization_offset")
if self.compression:
if cdf is None and cdf_shapes is None:
cdf, cdf_offset = self._build_tables(
self.prior, range_coder_precision, offset=quantization_offset)
self._init_compression(cdf, cdf_offset, cdf_shapes)
@property
def prior_shape(self):
"""Batch shape of `prior` (dimensions which are not assumed i.i.d.)."""
return self._prior_shape
@property
def prior_shape_tensor(self):
"""Batch shape of `prior` as a `Tensor`."""
return tf.constant(self.prior_shape.as_list(), dtype=tf.int32)
@property
def non_integer_offset(self):
return self._non_integer_offset
@property
def quantization_offset(self):
if self._quantization_offset is None:
return None
return tf.convert_to_tensor(self._quantization_offset)
@tf.Module.with_name_scope
def __call__(self, bottleneck, training=True):
"""Perturbs a tensor with (quantization) noise and estimates rate.
Args:
bottleneck: `tf.Tensor` containing the data to be compressed. Must have at
least `self.coding_rank` dimensions, and the innermost dimensions must
be broadcastable to `self.prior_shape`.
training: Boolean. If `False`, computes the Shannon information of
`bottleneck` under the distribution `self.prior`, which is a
non-differentiable, tight *lower* bound on the number of bits needed to
compress `bottleneck` using `compress()`. If `True`, returns a somewhat
looser, but differentiable *upper* bound on this quantity.
Returns:
A tuple (bottleneck_perturbed, bits) where `bottleneck_perturbed` is
`bottleneck` perturbed with (quantization) noise, and `bits` is the rate.
`bits` has the same shape as `bottleneck` without the `self.coding_rank`
innermost dimensions.
"""
log_prob_fn = functools.partial(self._log_prob, self.prior)
if training:
log_probs, bottleneck_perturbed = math_ops.perturb_and_apply(
log_prob_fn, bottleneck, expected_grads=self.expected_grads)
else:
bottleneck_perturbed = self.quantize(bottleneck)
log_probs = log_prob_fn(bottleneck_perturbed)
axes = tuple(range(-self.coding_rank, 0))
bits = tf.reduce_sum(log_probs, axis=axes) / (
-tf.math.log(tf.constant(2, dtype=log_probs.dtype)))
return bottleneck_perturbed, bits
@tf.Module.with_name_scope
def quantize(self, bottleneck):
"""Quantizes a floating-point bottleneck tensor.
The tensor is rounded to integer values potentially shifted by offsets (if
`self.quantization_offset is not None`). These offsets can depend on
`self.prior`. For instance, for a Gaussian distribution, when
`self.non_integer_offset == True`, the returned values would be rounded
to the location of the mode of the distribution plus or minus an integer.
The gradient of this rounding operation is overridden with the identity
(straight-through gradient estimator).
Args:
bottleneck: `tf.Tensor` containing the data to be quantized. The innermost
dimensions must be broadcastable to `self.prior_shape`.
Returns:
A `tf.Tensor` containing the quantized values.
"""
return round_ops.round_st(bottleneck, self.quantization_offset)
@tf.Module.with_name_scope
def compress(self, bottleneck):
"""Compresses a floating-point tensor.
Compresses the tensor to bit strings. `bottleneck` is first quantized
as in `quantize()`, and then compressed using the probability tables in
`self.cdf` (derived from `self.prior`). The quantized tensor can later be
recovered by calling `decompress()`.
The innermost `self.coding_rank` dimensions are treated as one coding unit,
i.e. are compressed into one string each. Any additional dimensions to the
left are treated as batch dimensions.
Args:
bottleneck: `tf.Tensor` containing the data to be compressed. Must have at
least `self.coding_rank` dimensions, and the innermost dimensions must
be broadcastable to `self.prior_shape`.
Returns:
A `tf.Tensor` having the same shape as `bottleneck` without the
`self.coding_rank` innermost dimensions, containing a string for each
coding unit.
"""
input_shape = tf.shape(bottleneck)
all_but_last_n_elems = lambda t, n: t[:-n] if n else t
batch_shape = all_but_last_n_elems(input_shape, self.coding_rank)
iid_shape = all_but_last_n_elems(input_shape, self.prior_shape.rank)
offset = self.quantization_offset
if offset is not None:
bottleneck -= offset
symbols = tf.cast(tf.round(bottleneck), tf.int32)
symbols = tf.reshape(symbols, tf.concat([iid_shape, [-1]], 0))
symbols -= self.cdf_offset
handle = gen_ops.create_range_encoder(batch_shape, self.cdf)
handle = gen_ops.entropy_encode_channel(handle, symbols)
return gen_ops.entropy_encode_finalize(handle)
@tf.Module.with_name_scope
def decompress(self, strings, broadcast_shape):
"""Decompresses a tensor.
Reconstructs the quantized tensor from bit strings produced by `compress()`.
It is necessary to provide a part of the output shape in `broadcast_shape`.
Args:
strings: `tf.Tensor` containing the compressed bit strings.
broadcast_shape: Iterable of ints. The part of the output tensor shape
between the shape of `strings` on the left and `self.prior_shape` on the
right. This must match the shape of the input to `compress()`.
Returns:
A `tf.Tensor` of shape `strings.shape + broadcast_shape +
self.prior_shape`.
"""
strings = tf.convert_to_tensor(strings, dtype=tf.string)
broadcast_shape = tf.convert_to_tensor(broadcast_shape, dtype=tf.int32)
decode_shape = tf.concat(
[broadcast_shape, [tf.reduce_prod(self.prior_shape_tensor)]], 0)
output_shape = tf.concat(
[tf.shape(strings), broadcast_shape, self.prior_shape_tensor], 0)
handle = gen_ops.create_range_decoder(strings, self.cdf)
handle, symbols = gen_ops.entropy_decode_channel(
handle, decode_shape, self.cdf_offset.dtype)
sanity = gen_ops.entropy_decode_finalize(handle)
tf.debugging.assert_equal(sanity, True, message="Sanity check failed.")
symbols += self.cdf_offset
symbols = tf.reshape(symbols, output_shape)
outputs = tf.cast(symbols, self.dtype)
offset = self.quantization_offset
if offset is not None:
outputs += offset
return outputs
def get_config(self):
"""Returns the configuration of the entropy model.
Returns:
A JSON-serializable Python dict.
"""
config = super().get_config()
config.update(
prior_shape=tuple(map(int, self.prior_shape)),
# Since the prior is never passed when using the `SavedModel` protocol,
# we can reuse this flag to indicate whether the offsets need to be
# loaded from a variable.
non_integer_offset=self.quantization_offset is not None,
)
return config
``` |
{
"source": "Johnsoneer/The-Quote-Book",
"score": 2
} |
#### File: migrations/versions/2156b753a963_.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2156b753a963'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('people_quoted',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('first_name', sa.String(length=32), nullable=True),
sa.Column('last_name_initial', sa.String(length=1), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('is_verified', sa.Boolean(), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('is_admin', sa.Boolean(), nullable=True),
sa.Column('signup_datetime', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_signup_datetime'), 'users', ['signup_datetime'], unique=False)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
op.create_table('quotes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('submitted_by_id', sa.Integer(), nullable=True),
sa.Column('primary_person_quoted_id', sa.Integer(), nullable=True),
sa.Column('submitted_datetime', sa.DateTime(), nullable=True),
sa.Column('context', sa.String(length=140), nullable=True),
sa.ForeignKeyConstraint(['primary_person_quoted_id'], ['people_quoted.id'], ),
sa.ForeignKeyConstraint(['submitted_by_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_quotes_submitted_datetime'), 'quotes', ['submitted_datetime'], unique=False)
op.create_table('phrases',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('quote_id', sa.Integer(), nullable=True),
sa.Column('person_quoted_id', sa.Integer(), nullable=True),
sa.Column('phrase_text', sa.String(length=500), nullable=True),
sa.ForeignKeyConstraint(['person_quoted_id'], ['people_quoted.id'], ),
sa.ForeignKeyConstraint(['quote_id'], ['quotes.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('phrases')
op.drop_index(op.f('ix_quotes_submitted_datetime'), table_name='quotes')
op.drop_table('quotes')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_signup_datetime'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_table('people_quoted')
# ### end Alembic commands ###
``` |
{
"source": "johnsongeorge-w/python-binaryaudit",
"score": 2
} |
#### File: python-binaryaudit/binaryaudit/db.py
```python
from sqlalchemy.engine.url import URL
from sqlalchemy import MetaData, create_engine
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import sessionmaker
from envparse import env
class wrapper:
'''
'''
def __init__(self, dbconfig, logger) -> None:
'''
'''
self.logger = logger
self._connection_timeout = 600
env.read_envfile(dbconfig)
def _acquire_session(self):
'''
'''
return self._session()
def _flush_session(self, session) -> None:
'''
'''
session.commit()
def _release_session(self, session) -> None:
'''
'''
session.close()
def _initialize_db_connection(self) -> None:
'''
'''
connection_url = URL(
drivername="mssql+pyodbc",
username=env.str('User'),
password=<PASSWORD>'),
host=env.str('Server'),
database=env.str('Database'),
query={"driver": "ODBC Driver 17 for SQL Server"}
)
db_engine = create_engine(
connection_url,
pool_recycle=self._connection_timeout
)
self.logger.note("Initializing database connection")
metadata = MetaData()
metadata.reflect(
db_engine,
only=[
"abi_checker_baseline_tbl",
"abi_checker_product_tbl",
"abi_checker_transaction_details_tbl",
"abi_checker_transaction_main_tbl"
],
)
db_map = automap_base(metadata=metadata)
db_map.prepare()
self.abi_checker_baseline_tbl = db_map.classes.abi_checker_baseline_tbl
self.abi_checker_product_tbl = db_map.classes.abi_checker_product_tbl
self.abi_checker_transaction_details_tbl = db_map.classes.abi_checker_transaction_details_tbl
self.abi_checker_transaction_main_tbl = db_map.classes.abi_checker_transaction_main_tbl
self._session = sessionmaker(bind=db_engine, expire_on_commit=False)
def initialize_db(self) -> None:
'''
'''
self._initialize_db_connection()
def is_db_connected(self) -> bool:
'''
'''
if self._session:
return True
return False
def get_product_id(self, distroname, derivativename) -> int:
'''
'''
product_id = 0
session = self._acquire_session()
record = (
session.query(
self.abi_checker_product_tbl).filter_by(
DistroName=distroname,
DerivativeName=derivativename
).one_or_none()
)
if record is None:
prd_record = self.abi_checker_product_tbl(
DistroName=distroname,
DerivativeName=derivativename
)
session.add(prd_record)
self._flush_session(session)
record = (
session.query(
self.abi_checker_product_tbl).filter_by(
DistroName=distroname,
DerivativeName=derivativename
).one_or_none())
self._release_session(session)
product_id = record.ProductID
return product_id
class orchestrator:
'''
ABI checker orchestrator class for the trigger the abi checker functionality.
'''
def __init__(self, distroname, derivative, build_id, telemetery, logger, db_config="db_config"):
'''
'''
self.logger = logger
self.distroname = distroname
self.derivative = derivative
self.build_id = build_id
self.product_id = 0
self.enable_telemetry = telemetery
self.db_config = db_config
# Instantiate the db connection to upload results to DB
if self.enable_telemetry == 'y':
self.db_conn = wrapper(self.db_config, self.logger)
self.db_conn.initialize_db()
def initalize_product_id(self) -> None:
'''
'''
if self.db_conn.is_db_connected:
product_id = self.db_conn.get_product_id(
self.distroname,
self.derivative
)
self.logger.note("Product_id: %s" % product_id)
else:
self.logger.debug("Not connected")
``` |
{
"source": "JohnsonHansC/umadbro",
"score": 3
} |
#### File: JohnsonHansC/umadbro/Logging.py
```python
__author__ = '<NAME>', '<NAME>'
import urllib2
import platform
import tempfile
class Loggy:
def __init__(self): # Creation of log file when object is made.
self.log_file = open(tempfile.gettempdir() + '/umadbro.log', "a+")
self.close()
# Name: log
# Purpose: Log commands for debugging purposes.
# Param: str(command) that's attempting to be executed
# Return: Nothing
def log(self, command):
self.log_file = open(tempfile.gettempdir() + '/umadbro.log', "a+")
self.log_file.write(command + "\n")
self.close()
# Name: post_log
# Purpose: send an HTTP PUT message to umadbro server
# to figure out what aren't working and docs to add.
# Return: Nothing
def post_log(self):
self.log_file = open(tempfile.gettempdir() + '/umadbro.log', "r")
tmp_contents = self.log_file.read()
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request('http://umadbro.pw', str(tmp_contents))
request.add_header('Content-Type', 'text/umad_requests')
request.get_method = 'PUT'
self.close()
# name: close
# Purpose: close file after writing/reading has occurred.
# Return: Nothing
def close(self): # Cleaning up the file
self.log_file.close()
``` |
{
"source": "johnsonice/IMF_Textmining",
"score": 3
} |
#### File: Python/7_sentiment_analysis/2_sentiment.py.py
```python
import os
os.chdir('d:/usr-profiles/chuang/Desktop/Dev/textmining/2_imf_docs/1_use_xmls/process_search_docs')
import pandas as pd
#import csv
from util import read_keywords,find_exact_keywords,construct_rex
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize,sent_tokenize
from nltk.stem import WordNetLemmatizer
import copy
#%%
## Merge both xml and txt pd together ##
def combine_df(xml_results,txt_results):
"""
Pass in xml and txt result files
return combined and clearned df
"""
df_xml = pd.read_csv(xml_results)
df_txt = pd.read_csv(txt_results)
df = pd.concat([df_xml,df_txt],ignore_index=True) ## append dataframes together
df_headers = df.columns.values.tolist()
df_headers.remove('Unnamed: 0') ## delete the first column
meta_list = ['doc_id','para_id','context','para_word_count']
var_list = [v for v in df_headers if v not in meta_list ]
ordered_vars =meta_list + var_list
## check for duplicates
assert len(ordered_vars) == len(set(ordered_vars)), 'duplicate in var names' ## should return Ture
df = df[ordered_vars] ## clean dataframe with both txt and xml
return df
## Load sentiment keywords ##############
def get_sent_keys(sentiment_path):
"""
pass in pos and neg keywords list
"""
sent_list = read_keywords(sentiment_path)
pos_list = [p[0] for p in sent_list[1:] if len(p[0])>0]
neg_list = [p[1] for p in sent_list[1:] if len(p[1])>0]
return pos_list, neg_list
## tokenize lementize and remove stop words
def _process_text(text,stopw,lemmatizer):
tokens = word_tokenize(text.lower())
tokens = [lemmatizer.lemmatize(t) for t in tokens]
tokens = [t for t in tokens if t not in stopw]
return tokens
def check_negation(negation_check,negations):
res = [t for t in negation_check if t in negations]
if len(res)> 0 :
return True
else:
return False
def get_negation_check(tokens,idx,ran=3):
t_len = len(tokens)
nidx = [idx-ran,idx+ran]
if nidx[0]<0:nidx[0] = 0
if nidx[1]> t_len+1:nidx[0] = t_len+1
negation_check = tokens[nidx[0]:nidx[1]]
return negation_check
def get_pos_count(text,pos_list,neg_list,negations,stopw,lemmatizer,search_keys,search_rex,by='sent'):
if by == 'sent':
sentences = sent_tokenize(text)
s_list = [s for s in sentences if len(find_exact_keywords(s,search_keys,rex))>0]
reduced_para = ' '.join(s_list)
if len(reduced_para)==0: return 0,0,0,''
text = reduced_para
tokens = _process_text(text,stopw,lemmatizer)
total_words = len(tokens)
pos = 0
neg = 0
for idx,t in enumerate(tokens):
if t in pos_list:
negation_check = get_negation_check(tokens,idx,3)
if check_negation(negation_check,negations):
neg+=1
else:
pos+=1
elif t in neg_list:
negation_check = get_negation_check(tokens,idx,3)
if check_negation(negation_check,negations):
pos+=1
else:
neg+=1
return pos,neg,total_words,text
#%%
xml_results = 'data/xml_results.csv'
txt_results = 'data/txt_results.csv'
sentiment_path = 'sentiment_words_modified.csv'
negations = ['not','no','nobody','nobody','none','never','neither','cannot',"can't"]
stopw = stopwords.words('english')
stopw = [s for s in stopw if s not in negations]
lemmatizer = WordNetLemmatizer()
df = combine_df(xml_results,txt_results)
pos_list, neg_list = get_sent_keys(sentiment_path)
search_keys = copy.deepcopy(df.columns.values.tolist())[4:]
rex = construct_rex(search_keys)
#%%
# test
#test = df['context'][:10].apply(get_pos_count,args=(pos_list,neg_list,negations,stopw,lemmatizer,search_keys,rex))
#%%
df['sentiment'] = df['context'].apply(get_pos_count,args=(pos_list,neg_list,negations,stopw,lemmatizer,search_keys,rex))
df[['sentiment_pos','sentiment_neg','total_words_no_stop','reduced_context']] = df['sentiment'].apply(pd.Series)
#%%
df_headers = df.columns.values.tolist()
meta_list = ['doc_id','para_id','context','reduced_context','para_word_count','sentiment_pos','sentiment_neg','total_words_no_stop']
var_list = [v for v in df_headers if v not in meta_list ]
var_list.remove('sentiment')
ordered_var_list = meta_list + var_list
df = df[ordered_var_list]
df.to_csv('data/sentiment.csv',encoding='utf-8')
``` |
{
"source": "johnsonjo4531/deno",
"score": 2
} |
#### File: deno/tools/cargo_publish_others.py
```python
import os
import sys
import argparse
from util import run, root_path
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--dry-run", action="store_true")
args = parser.parse_args()
cargo_publish = ["cargo", "publish"]
if args.dry_run:
cargo_publish += ["--dry-run"]
# Publish the deno_typescript crate.
os.chdir(os.path.join(root_path, "deno_typescript"))
run(cargo_publish)
# Publish the deno_cli crate.
os.chdir(os.path.join(root_path, "cli"))
run(cargo_publish)
if __name__ == '__main__':
sys.exit(main())
``` |
{
"source": "johnsonkee/graduate_design",
"score": 3
} |
#### File: graduate_design/advgan/Cadvgan.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import os
from tensorflow import keras
from tensorflow.keras import layers
import time
import pdb
from IPython import display
import pandas
# the input is picture x
def make_generator_model():
"""
:return: tensor:[None,784]
"""
model = tf.keras.Sequential()
# the images has been flatten to 784, and the one-hot labels are added.
model.add(layers.Dense(28*28, use_bias=False, input_shape=(794,)))
model.add(layers.Reshape((28,28,1)))
assert model.output_shape == (None, 28, 28, 1) # Note: None is the batch size
model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert model.output_shape == (None, 14, 14, 64) # Note:None is the size of batch
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert model.output_shape == (None, 7, 7, 128)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2D(256, (5, 5), strides=(1, 1), padding='same', use_bias=False,
input_shape=[28, 28, 1]))
assert model.output_shape == (None, 7, 7, 256)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
assert model.output_shape == (None, 7, 7, 128)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert model.output_shape == (None, 14, 14, 64)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
assert model.output_shape == (None, 28, 28, 1)
return model
def make_discriminator_model():
model = tf.keras.Sequential()
# the images has been flatten to 784, and the one-hot labels are adde d.
model.add(layers.Dense(28*28, use_bias=False, input_shape=(794,)))
model.add(layers.Reshape((28,28,1)))
assert model.output_shape == (None, 28, 28, 1) # Note: None is the batch size
model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(1))
return model
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def discriminator_loss(real_output, fake_output):
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)
def classifier_loss(preds, target, is_targeted=False):
# if it is targeted attack
# 有一种做法是把target(integer)转化成onehot
# normal loss:
"""
if is_targeted:
return cross_entropy(preds, target)
return -cross_entropy(preds, target)
"""
# C&W loss
real = tf.reduce_sum(target * preds, 1)
other = tf.reduce_max((1 - target) * preds - (target * 10000), 1)
if is_targeted:
return tf.reduce_sum(tf.maximum(0.0, other - real))
return tf.reduce_sum(tf.maximum(0.0, real - other))
def perturb_loss(perturbation, thresh=0.3):
zeros = tf.zeros((tf.shape(perturbation)[0]))
# norm-2
#return tf.reduce_mean(
# tf.maximum(zeros, tf.norm(tf.reshape(perturbation, (tf.shape(perturbation)[0], -1)), axis=1) - thresh))
# norm-inf
return tf.reduce_mean(
tf.maximum(zeros,
tf.norm(tf.reshape(perturbation, (tf.shape(perturbation)[0], -1)), ord=np.inf, axis=1) - thresh))
def total_loss(f_loss, gan_loss, perturb_loss, alpha=1.0, beta=5.0):
"""
"""
total_loss = f_loss + alpha * gan_loss + beta * perturb_loss
return total_loss
# Notice the use of `tf.function`
# This annotation causes the function to be "compiled".
@tf.function
def train_step(images, labels, alpha, beta):
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
# pdb.set_trace()
perturbation = generator(tf.concat([images,labels],1), training=True)
generated_images = images + tf.reshape(perturbation,[-1,784])
real_output = discriminator(tf.concat([images,labels],1), training=True)
fake_output = discriminator(tf.concat([generated_images,labels],1), training=True)
preds = classifier(tf.reshape(generated_images,[-1,28,28,1]))
class_loss = classifier_loss(preds, labels, is_targeted=False)
gen_loss = generator_loss(fake_output)
pert_loss = perturb_loss(perturbation, thresh=0.3)
all_loss = total_loss(class_loss, gen_loss, pert_loss, alpha=alpha, beta=beta)
disc_loss = discriminator_loss(real_output, fake_output)
gradients_of_generator = gen_tape.gradient(all_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
return all_loss, disc_loss
def generate_and_save_images(model, epoch, test_input):
# Notice `training` is set to False.
# This is so all layers run in inference mode (batchnorm).
predictions = model(test_input, training=False)
fig = plt.figure(figsize=(4,4))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i+1)
plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
plt.axis('off')
plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
# plt.show()
def train(dataset, labels, epochs):
record_loss = []
for epoch in range(epochs):
start = time.time()
for image_batch, label_batch in zip(dataset, labels):
all_loss, disc_loss = train_step(image_batch, label_batch,alpha=1,beta=2)
print("gen_loss:{},disc_loss:{}".format(all_loss.numpy(),disc_loss.numpy()))
record_loss.append([epoch+1,all_loss.numpy(),disc_loss.numpy()])
# Produce images for the GIF as we go
display.clear_output(wait=True)
# generate_and_save_images(generator,epoch + 1,test_images[1])
# Save the model every 15 epochs
if (epoch + 1) % 15 == 0:
checkpoint.save(file_prefix=checkpoint_prefix)
generator.save("generator_cgan_a1_b2.h5")
discriminator.save("discriminator_cgan_a1_b2.h5")
df = pandas.DataFrame(record_loss, columns=["epoch", "gen_loss", "disc_loss"])
if not os.path.exists("log"):
os.makedirs("log")
df.to_csv("log/Cadvgan_loss.csv",index=False)
print('Time for epoch {} is {} sec'.format(epoch + 1, time.time() - start))
display.clear_output(wait=True)
# Generate after the final epoch
# generate_and_save_images(generator,epochs)
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# use the gpu memory depending on our needs
tf.config.gpu.set_per_process_memory_growth(enabled=True)
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
train_images = (train_images - 127.5) / 127.5 # Normalize the images to [-1, 1]
test_images = test_images.reshape(test_images.shape[0], 28, 28, 1).astype('float32')
test_images = (test_images - 127.5) / 127.5 # Normalize the images to [-1, 1]
train_images = train_images.reshape(-1, 784)
train_labels = keras.utils.to_categorical(train_labels).reshape(-1, 10)
BUFFER_SIZE = 60000
BATCH_SIZE = 256
# Batch and shuffle the data
# no shuffle
train_images_dataset = tf.data.Dataset.from_tensor_slices(train_images).batch(BATCH_SIZE)
train_labels_dataset = tf.data.Dataset.from_tensor_slices(train_labels).batch(BATCH_SIZE)
# shuffle
# train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
use_model = False
if use_model:
generator = keras.models.load_model("generator_cgan.h5")
discriminator = keras.models.load_model("discriminator_cgan.h5")
else:
generator = make_generator_model()
discriminator = make_discriminator_model()
classifier_path = "./models/CNN_mnist.h5"
classifier = keras.models.load_model(classifier_path)
classifier.evaluate(test_images, test_labels, verbose=1)
# https://github.com/caogang/wgan-gp/blob/master/gan_mnist.py Line163
generator_optimizer = tf.keras.optimizers.Adam(learning_rate = 1e-4,beta_1 = 0.5,beta_2 = 0.9)
discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate = 1e-4,beta_1 = 0.5,beta_2 = 0.9)
checkpoint_dir = './training_checkpoints_cgan'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
EPOCHS = 700
train(train_images_dataset,train_labels_dataset,EPOCHS)
```
#### File: graduate_design/cleverhans_tutorials/mymodel.py
```python
from distutils.version import LooseVersion
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Dropout
if LooseVersion(keras.__version__) >= LooseVersion('2.0.0'):
from keras.layers import Conv2D
else:
from keras.layers import Convolution2D
import math
def conv_2d(filters, kernel_shape, strides, padding, input_shape=None):
"""
Defines the right convolutional layer according to the
version of Keras that is installed.
:param filters: (required integer) the dimensionality of the output
space (i.e. the number output of filters in the
convolution)
:param kernel_shape: (required tuple or list of 2 integers) specifies
the strides of the convolution along the width and
height.
:param padding: (required string) can be either 'valid' (no padding around
input or feature map) or 'same' (pad to ensure that the
output feature map size is identical to the layer input)
:param input_shape: (optional) give input shape if this is the first
layer of the model
:return: the Keras layer
"""
def modelB(logits=False, input_ph=None, img_rows=28, img_cols=28,
channels=1, nb_filters=64, nb_classes=10):
"""
Defines a CNN model using Keras sequential model
:param logits: If set to False, returns a Keras model, otherwise will also
return logits tensor
:param input_ph: The TensorFlow tensor for the input
(needed if returning logits)
("ph" stands for placeholder but it need not actually be a
placeholder)
:param img_rows: number of row in the image
:param img_cols: number of columns in the image
:param channels: number of color channels (e.g., 1 for MNIST)
:param nb_filters: number of convolutional filters per layer
:param nb_classes: the number of output classes
:return:
"""
model = Sequential()
# Define the layers successively (convolution layers are version dependent)
if keras.backend.image_dim_ordering() == 'th':
input_shape = (channels, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, channels)
layers = [conv_2d(nb_filters, (8, 8), (2, 2), "same",
input_shape=input_shape),
Activation('relu'),
conv_2d((nb_filters * 2), (6, 6), (2, 2), "valid"),
Activation('relu'),
conv_2d((nb_filters * 2), (5, 5), (1, 1), "valid"),
Activation('relu'),
Flatten(),
Dense(nb_classes)]
for layer in layers:
model.add(layer)
model.add(Activation('softmax'))
return model
def modelA(logits=False, input_ph=None, img_rows=28, img_cols=28,
channels=1, nb_filters=64, nb_classes=10):
"""
Defines a CNN model using Keras sequential model
:param logits: If set to False, returns a Keras model, otherwise will also
return logits tensor
:param input_ph: The TensorFlow tensor for the input
(needed if returning logits)
("ph" stands for placeholder but it need not actually be a
placeholder)
:param img_rows: number of row in the image
:param img_cols: number of columns in the image
:param channels: number of color channels (e.g., 1 for MNIST)
:param nb_filters: number of convolutional filters per layer
:param nb_classes: the number of output classes
:return:
"""
model = Sequential()
# Define the layers successively (convolution layers are version dependent)
if keras.backend.image_dim_ordering() == 'th':
input_shape = (channels, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, channels)
layers = [Flatten(input_shape=input_shape),
Dense(nb_filters),
Activation('relu'),
Dense(nb_filters * 2),
Activation('relu'),
Dense(nb_filters * 4),
Activation('relu'),
Dropout(0.2),
Dense(nb_classes)]
for layer in layers:
model.add(layer)
if logits:
logits_tensor = model(input_ph)
model.add(Activation('softmax'))
return model
def modelC(logits=False, input_ph=None, img_rows=28, img_cols=28,
channels=1, nb_filters=64, nb_classes=10):
"""
Defines a CNN model using Keras sequential model
:param logits: If set to False, returns a Keras model, otherwise will also
return logits tensor
:param input_ph: The TensorFlow tensor for the input
(needed if returning logits)
("ph" stands for placeholder but it need not actually be a
placeholder)
:param img_rows: number of row in the image
:param img_cols: number of columns in the image
:param channels: number of color channels (e.g., 1 for MNIST)
:param nb_filters: number of convolutional filters per layer
:param nb_classes: the number of output classes
:return:
"""
model = Sequential()
# Define the layers successively (convolution layers are version dependent)
if keras.backend.image_dim_ordering() == 'th':
input_shape = (channels, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, channels)
model = keras.Sequential([
keras.layers.Conv2D(input_shape=(28, 28, 1),
kernel_size=(3, 3), filters=32, activation='relu'),
keras.layers.Conv2D(kernel_size=(3, 3), filters=32, activation='relu'),
keras.layers.MaxPool2D(),
keras.layers.Conv2D(kernel_size=(3, 3), filters=32, activation='relu'),
keras.layers.Conv2D(kernel_size=(3, 3), filters=32, activation='relu'),
keras.layers.MaxPool2D(),
keras.layers.Flatten(),
keras.layers.Dense(200, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(200, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
return model
```
#### File: graduate_design/image/mnist_natural_adversary.py
```python
import os, sys
import pickle
import numpy as np
import tensorflow as tf
import argparse
from keras.models import load_model
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['font.size'] = 12
import matplotlib.pyplot as plt
plt.style.use('seaborn-deep')
import tflib.mnist
from mnist_wgan_inv import MnistWganInv
from search import iterative_search, recursive_search
import pickle
def save_adversary(adversary, filename):
fig, ax = plt.subplots(1, 2, figsize=(9, 4))
ax[0].imshow(np.reshape(adversary['x'], (28, 28)),
interpolation='none', cmap=plt.get_cmap('gray'))
ax[0].text(1, 5, str(adversary['y']), color='white', fontsize=50)
ax[0].axis('off')
ax[1].imshow(np.reshape(adversary['x_adv'], (28, 28)),
interpolation='none', cmap=plt.get_cmap('gray'))
ax[1].text(1, 5, str(adversary['y_adv']), color='white', fontsize=50)
ax[1].axis('off')
fig.savefig(filename)
plt.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gan_path', type=str, default='./models/model-47999',
help='mnist GAN path')
parser.add_argument('--rf_path', type=str, default='./models/mnist_rf_9045.sav',
help='RF classifier path')
parser.add_argument('--lenet_path', type=str, default='./models/mnist_lenet_9871.h5',
help='LeNet classifier path')
parser.add_argument('--classifier', type=str, default='rf',
help='classifier: rf OR lenet')
parser.add_argument('--iterative', action='store_true',
help='iterative search OR recursive')
parser.add_argument('--nsamples', type=int, default=5000,
help='number of samples in each search iteration')
parser.add_argument('--step', type=float, default=0.01,
help='Delta r for search step size')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--output_path', type=str, default='./examples/',
help='output path')
args = parser.parse_args()
if args.classifier == 'rf':
classifier = pickle.load(open(args.rf_path, 'rb'))
def cla_fn(x):
return classifier.predict(np.reshape(x, (-1, 784)))
elif args.classifier == 'lenet':
graph_CLA = tf.Graph()
with graph_CLA.as_default():
classifier = load_model(args.lenet_path)
def cla_fn(x):
with graph_CLA.as_default():
return np.argmax(classifier.predict_on_batch(np.reshape(x, (-1, 1, 28, 28))), axis=1)
else:
sys.exit('Please choose MNIST classifier: rf OR lenet')
graph_GAN = tf.Graph()
with graph_GAN.as_default():
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(gpu_options=gpu_options)
sess_GAN = tf.Session(config=config)
model_GAN = MnistWganInv()
saver_GAN = tf.train.Saver(max_to_keep=100)
saver_GAN = tf.train.import_meta_graph('{}.meta'.format(args.gan_path))
saver_GAN.restore(sess_GAN, args.gan_path)
def gen_fn(z):
with sess_GAN.as_default():
with graph_GAN.as_default():
x_p = sess_GAN.run(model_GAN.generate(tf.cast(tf.constant(np.asarray(z)), 'float32')))
return x_p
def inv_fn(x):
with sess_GAN.as_default():
with graph_GAN.as_default():
z_p = sess_GAN.run(model_GAN.invert(x))
return z_p
if args.iterative:
search = iterative_search
else:
search = recursive_search
_, _, test_data = tflib.mnist.load_data()
x_adv = []
y_origin = []
for i in range(500):
x = test_data[0][i+100]
y = test_data[1][i+100]
y_pred = cla_fn(x)[0]
# only modify those samples which are correctly classified
if y_pred != y:
continue
adversary = search(gen_fn, inv_fn, cla_fn, x, y,
nsamples=args.nsamples, step=args.step, verbose=args.verbose)
if args.iterative:
filename = 'mnist_{}_iterative_{}.png'.format(str(i).zfill(4), args.classifier)
else:
filename = 'mnist_{}_recursive_{}.png'.format(str(i).zfill(4), args.classifier)
x_adv.append(adversary["x_adv"])
y_origin.append(adversary["y"])
# save_adversary(adversary, os.path.join(args.output_path, filename))
if i % 50 == 0:
pickle.dump(x_adv,open("../../server_copyin/x_adv500.pkl","wb"))
pickle.dump(y_origin,open("../../server_copyin/y_origin500.pkl","wb"))
``` |
{
"source": "johnsonkobe24/privacyflash-pro",
"score": 2
} |
#### File: privacyflash-pro/tests/test_privacy_practices.py
```python
import unittest
from policygenerator.src.privacy_practices import *
def rm_dsstore_list(list):
"""
** Intended for macOS users who sorted files in Finder in any special way **
Removes the directories (strings) from an input list that contain
.DS_Store in the string (therefore removes the directory to .DS_Store files)
:param: list of directories
:return: list of directories without directories to any .DS_Store files
"""
for directory in list:
if ".DS_Store" in directory:
list.remove(directory)
return list
class TestPrivacyPractices(unittest.TestCase):
def test_retrieve_privacy_practice_data(self):
"""
Tests that retrieve_privacy_practice_data() correctly creates a dictionary
from spec/privacy_practices.yaml
Compares the output length of the dictrionary to
policygenerator.constants.Practices
:retrieve_privacy_practice_data() param: none
:retrieve_privacy_practice_data() return: dic w/ privacy practices enums as keys
"""
data = retrieve_privacy_practice_data()
self.assertEqual(len(data), len(Practices))
def test_search_root_dir(self):
"""
Tests that all of the elements not a part of "Pods" or "Carthage" or
third party frameworks are added to the list returned from
search_root_dir()
Compares the length of the list to the number of items there are in the directory
:search_root_dir() param: root directory (string)
:search_root_dir() return: list of all files in root directory (list)
"""
no_files = 20
test_app_dir = "testappdir/testapp/"
path = os.path.join(os.path.abspath(os.path.dirname(__file__)), test_app_dir)
res_dir = search_root_dir(path)
rm_dsstore_list(res_dir) # remove .DS_Store files
self.assertEqual(len(res_dir), no_files) # check length against predetermined count
def test_get_pod_loc(self):
"""
Tests that get_pod_loc() correctly identifies the number of
cocoa pod sdks that match the specification according to get_pod_loc()
Tests length of returned list against the number of items in the directory
:get_pod_loc() param: directory of iOS project (string)
:get_pod_loc() return: list of cocoa pod sk directories (strings)
"""
no_dir = 2
test_app_dir = "testappdir/testapp/"
path = os.path.join(os.path.abspath(os.path.dirname(__file__)), test_app_dir)
res_dir = get_pod_loc(path)
self.assertEqual(len(res_dir), no_dir) # check length against predetermined count
def test_get_cart_loc(self):
"""
Tests that get_cart_loc() correctly identifies the number of
Carthage sdks, in accordance with the spec of get_cart_loc()
Tests length of returned list against the number of items in the directory
:get_cart_loc() param: directory of iOS project (string)
:get_cart_loc() return: list of Carthage sdk directories (strings)
"""
no_dir = 5
test_app_dir = "testappdir/testapp/"
path = os.path.join(os.path.abspath(os.path.dirname(__file__)), test_app_dir)
res_dir = get_cart_loc(path)
self.assertEqual(len(res_dir), no_dir) # check length against predetermined count
def test_grab_third_party_files(self):
"""
Tests that grab_third_party_files() grabs the correct number of other sdks
used in a given project
Tests length of returned dic against the number of locations given in the list
:grab_third_party_files() param: list of directories of sdks (list)
:grab_third_party_files() return: dic containing sdks as keys and directories
of sdks as lists (dic)
"""
no_dir = 5
test_app_dir = "testappdir/testapp/sdks/"
path = os.path.join(os.path.abspath(os.path.dirname(__file__)), test_app_dir)
res_dir = grab_third_party_files([path])
rm_dsstore_list(res_dir[path]) # remove .DS_Store files
self.assertEqual(len(res_dir[path]), no_dir) # check len against predetermined count
def test_load_third_df(self):
"""
Tests that load_third_df() correctly parses the ad networks dataframe
Tests length of returned dictionary against number of elements in
third_parties.yaml
:load_third_df() param: none
:load_third_df() return: dic of every SDK along with their type (dic)
"""
data = load_third_df()
self.assertEqual(len(data), 300)
def test_locate_entitlements_file(self):
"""
Tests that locate_entitelment_file identifies entitlements file in directory
:locate_entitlements_file() param: root directory (string)
:locate_entitlements_file() return: location of entitlements files (string)
"""
import_dir = os.path.abspath(os.path.dirname(__file__))
test_app_dir = "testappdir/"
entitlements_dir = "testappdir/testapp/Clients/Entitlements/testapp.entitlements"
path = os.path.join(import_dir, test_app_dir)
entitlements = os.path.join(import_dir, entitlements_dir)
res_dir = locate_entitlements_file(path)
self.assertEqual(res_dir, entitlements)
def test_load_data(self):
"""
Tests that load_data() correctly accumulates and loads all relavent data
utilizing the previous functions in the privacy_practices.py module
Essentially calls all previous functions one more to time to ensure that
load_data() runs properly
:load_data() param: root directory (string)
:load_data() return: list of cocoa pod sk directories (strings)
"""
test_app_dir = "testappdir/testapp/"
path_dir = os.path.abspath(os.path.dirname(__file__)) # location of '/tests' folder
path = os.path.join(path_dir, test_app_dir) # full root path
(fp_info, fp_files, sdk_files, tp_info, entitlements) = load_data(path)
# Remove .DS_Store files
fp_files = rm_dsstore_list(fp_files)
self.assertEqual(len(fp_info), len(Practices)) # test_retrieve_privacy_practice_data
self.assertEqual(len(fp_files), 20) # test_search_root_dir
self.assertEqual(len(tp_info), 300) # test_load_third_df
self.assertEqual(len(sdk_files), 2 + 5) # test_grab_third_party_files
entitlements_dir = "testappdir/testapp/Clients/Entitlements/testapp.entitlements"
self.assertEqual(entitlements, os.path.join(path_dir, entitlements_dir))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JohnsonLee98/hover_1",
"score": 2
} |
#### File: src/model/dist.py
```python
import tensorflow as tf
from tensorpack import *
from tensorpack.models import BatchNorm, BNReLU, Conv2D, MaxPooling, Conv2DTranspose
from tensorpack.tfutils.summary import add_moving_summary, add_param_summary
from .utils import *
import sys
sys.path.append("..") # adds higher directory to python modules path.
try: # HACK: import beyond current level, may need to restructure
from config import Config
except ImportError:
assert False, 'Fail to import config.py'
"""
Ported from Naylor code at to match our processing frameworks
https://github.com/PeterJackNaylor/DRFNS/blob/master/src_RealData/
"""
class Graph(ModelDesc, Config):
def __init__(self):
super(Graph, self).__init__()
assert tf.test.is_gpu_available()
self.data_format = 'channels_first'
def _get_inputs(self):
return [InputDesc(tf.float32, [None] + self.train_input_shape + [3], 'images'),
InputDesc(tf.float32, [None] + self.train_mask_shape + [None], 'truemap-coded')]
# for node to receive manual info such as learning rate.
def add_manual_variable(self, name, init_value, summary=True):
var = tf.get_variable(name, initializer=init_value, trainable=False)
if summary:
tf.summary.scalar(name + '-summary', var)
return
def _get_optimizer(self):
with tf.variable_scope("", reuse=True):
lr = tf.get_variable('learning_rate')
opt = self.optimizer(learning_rate=lr)
return opt
def _build_graph(self, inputs):
####
def down_conv_block(name, l, channel, nr_blks, stride=1):
with tf.variable_scope(name):
if stride != 1:
assert stride == 2, 'U-Net supports stride 2 down-sample only'
l = MaxPooling('max_pool', l, 2, strides=2)
for idx in range(0, nr_blks):
l = Conv2D('conv_%d' % idx, l, channel, 3,
padding='valid', strides=1, activation=BNReLU)
return l
####
def up_conv_block(name, l, shorcut, channel, nr_blks, stride=2):
with tf.variable_scope(name):
if stride != 1:
up_channel = l.get_shape().as_list()[1] # NCHW
assert stride == 2, 'U-Net supports stride 2 up-sample only'
l = Conv2DTranspose('deconv', l, up_channel, 2, strides=2)
l = tf.concat([l, shorcut], axis=1)
for idx in range(0, nr_blks):
l = Conv2D('conv_%d' % idx, l, channel, 3,
padding='valid', strides=1, activation=BNReLU)
return l
####
is_training = get_current_tower_context().is_training
images, truemap_coded = inputs
orig_imgs = images
if self.type_classification:
true_type = truemap_coded[...,1]
true_type = tf.cast(true_type, tf.int32)
true_type = tf.identity(true_type, name='truemap-type')
one_type = tf.one_hot(true_type, self.nr_types, axis=-1)
true_type = tf.expand_dims(true_type, axis=-1)
true_dst = truemap_coded[...,-1]
true_dst = tf.expand_dims(true_dst, axis=-1)
true_dst = tf.identity(true_dst, name='truemap-dst')
#### Xavier initializer
with argscope(Conv2D, activation=tf.identity, use_bias=True,
kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d(),
bias_initializer=tf.constant_initializer(0.1)), \
argscope([Conv2D, Conv2DTranspose, MaxPooling, BatchNorm], data_format=self.data_format):
i = tf.transpose(images / 255.0, [0, 3, 1, 2])
####
with tf.variable_scope('encoder'):
e0 = down_conv_block('e0', i, 32, nr_blks=2, stride=1)
e1 = down_conv_block('e1', e0, 64, nr_blks=2, stride=2)
e2 = down_conv_block('e2', e1, 128, nr_blks=2, stride=2)
e3 = down_conv_block('e3', e2, 256, nr_blks=2, stride=2)
e4 = down_conv_block('e4', e3, 512, nr_blks=2, stride=2)
c0 = crop_op(e0, (176, 176))
c1 = crop_op(e1, (80, 80))
c2 = crop_op(e2, (32, 32))
c3 = crop_op(e3, (8, 8))
with tf.variable_scope('decoder'):
d3 = up_conv_block('d3', e4, c3, 256, nr_blks=2, stride=2)
d2 = up_conv_block('d2', d3, c2, 128, nr_blks=2, stride=2)
d1 = up_conv_block('d1', d2, c1, 64, nr_blks=2, stride=2)
d0 = up_conv_block('d0', d1, c0, 32, nr_blks=2, stride=2)
####
logi_dst = Conv2D('conv_out_dst', d0, 1, 1, activation=tf.identity)
logi_dst = tf.transpose(logi_dst, [0, 2, 3, 1])
pred_dst = tf.identity(logi_dst, name='predmap-dst')
if self.type_classification:
logi_type = Conv2D('conv_out_type', d0, self.nr_types, 1, activation=tf.identity)
logi_type = tf.transpose(logi_type, [0, 2, 3, 1])
soft_type = tf.nn.softmax(logi_type, axis=-1)
# encoded so that inference can extract all output at once
predmap_coded = tf.concat([soft_type, pred_dst], axis=-1)
else:
predmap_coded = pred_dst
# * channel ordering: type-map, segmentation map
# encoded so that inference can extract all output at once
predmap_coded = tf.identity(predmap_coded, name='predmap-coded')
####
if is_training:
######## LOSS
loss = 0
### regression loss
loss_mse = pred_dst - true_dst
loss_mse = loss_mse * loss_mse
loss_mse = tf.reduce_mean(loss_mse, name='loss_mse')
loss += loss_mse
if self.type_classification:
loss_type = categorical_crossentropy(soft_type, one_type)
loss_type = tf.reduce_mean(loss_type, name='loss-xentropy-class')
add_moving_summary(loss_type)
loss += loss_type
wd_loss = regularize_cost('.*/W', l2_regularizer(5.0e-6), name='l2_regularize_loss')
loss += wd_loss
self.cost = tf.identity(loss, name='cost')
add_moving_summary(self.cost)
####
add_param_summary(('.*/W', ['histogram'])) # monitor W
#### logging visual sthg
orig_imgs = tf.cast(orig_imgs , tf.uint8)
tf.summary.image('input', orig_imgs, max_outputs=1)
orig_imgs = crop_op(orig_imgs, (184, 184), "NHWC")
pred_dst = colorize(pred_dst[...,0], cmap='jet')
true_dst = colorize(true_dst[...,0], cmap='jet')
viz = tf.concat([orig_imgs, true_dst, pred_dst,], 2)
tf.summary.image('output', viz, max_outputs=1)
return
``` |
{
"source": "johnson-li/fast-pandas",
"score": 3
} |
#### File: fast-pandas/tests/test_groupby.py
```python
from unittest import TestCase
from quick_pandas.wrappers.pandas.groupby import *
SIZE = 1000
RANGE = 10
class TestGroupBy(TestCase):
def small_df(self):
a = np.array([1, 1, 3, 3, 3, 1], dtype=float)
b = np.array([1, 1, 2, 2, 10, 1], dtype=float)
c = np.array([11, 22, 33, 44, 55, 66], dtype=float)
d = np.array([111, 222, 333, 444, 555, 666], dtype=float)
e = np.array([1, 1, 3, 3, 3, 1]).astype(str)
return pd.DataFrame({'A': a, 'B': b, 'C': c, 'D': d, 'E': e})
def large_df(self):
return pd.DataFrame({'A': np.random.randint(0, RANGE, SIZE),
'B': np.random.randint(0, RANGE, SIZE),
'C': np.random.rand(SIZE),
'D': np.random.rand(SIZE),
'E': np.random.randint(0, 2, SIZE).astype(str),
})
def test_group(self):
data = [np.array([1, 2, 1, 3, 4]),
np.array(['a', 'b', 'a', 'a', 'c'])]
au8, dts = dtypes.convert_to_uint8(data)
groups = group(au8, dts, 0, list(np.arange(len(data[0]))))
def test_group_by(self):
df1 = self.large_df()
df2 = df1.copy()
by = ['A', 'B', 'E']
targets = ['C', 'D']
res1 = df1.groupby(by=by, sort=True).transform(np.mean)
for b in by:
res1[b] = df1[b]
res2 = group_and_transform(df2, by, targets, np.mean, inplace=False, sort=False)
for t in targets:
self.assertTrue((res1[t].values == res2[t].values).all())
def test_group_by_ext(self):
df1 = self.small_df()
df2 = df1.copy()
by = ['A', 'B', 'E']
targets = ['C', 'D']
res1 = df1.groupby(by=by, sort=True).transform(np.mean)
for b in by:
res1[b] = df1[b]
print(res1)
res2 = quick_pandas.ext.group_by.group_and_transform(df2, by, targets, np.mean, inplace=False, sort=False)
print(res2)
for t in targets:
self.assertTrue((res1[t].values == res2[t].values).all())
def test_transform(self):
array = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
res = quick_pandas.ext.group_by.transform_py([array], [(0, 3), (3, 8), (8, 9)],
np.arange(len(array)).astype(np.int32), np.mean)
self.assertEqual([2, 2, 2, 6, 6, 6, 6, 6, 9], res[0].tolist())
``` |
{
"source": "johnson-li/fast_pandas",
"score": 3
} |
#### File: fast_pandas/tests/test_sort.py
```python
from unittest import TestCase
import quick_pandas.sort
from quick_pandas.sort import *
class TestSort(TestCase):
def __init__(self, *args, **kwargs):
super(TestSort, self).__init__(*args, **kwargs)
# def test_radix_sort(self):
# for array_range in [1, 10, 100, 1000, 10000, 100000]:
# for array_length in [1, 10, 100, 1000, 10000, 100000]:
# array = np.random.randint(-array_range, array_range, (array_length,))
# radix_sort(array)
# for i in range(1, len(array)):
# self.assertGreaterEqual(array[i], array[i - 1])
def test_radix_argsort_int(self):
quick_pandas.sort.INSERTION_SORT_LIMIT = 0
for array_range in [1, 10, 100, 1000, 10000, 100000]:
for array_length in [1, 10, 100, 1000, 10000, 100000]:
array = np.random.randint(-array_range, array_range, (array_length,))
au8, dts = dtypes.convert_to_uint8([array])
indexes = np.arange(array_length)
ranges = radix_argsort0_int(au8, dts, 0, array, indexes, 0, array_length)
array_sorted = np.sort(array)
for i in range(array_length):
self.assertEqual(array[indexes[i]], array_sorted[i])
pre_max = None
length = 0
for r in ranges:
start, end, array_index, uniform = r
length += end - start
max_val = array[indexes[start]]
for i in range(start, end):
if uniform:
self.assertEqual(array[indexes[i]], array[indexes[start]])
if pre_max is not None:
self.assertGreater(array[indexes[i]], pre_max)
if array[indexes[i]] > max_val:
max_val = array[indexes[i]]
pre_max = max_val
self.assertEqual(array_length, length)
def test_radix_argsort_str(self):
quick_pandas.sort.INSERTION_SORT_LIMIT = 0
for array_range in [1, 10, 100, 1000, 10000, 100000, 1000000, 10000000]:
for array_length in [1, 10, 100, 1000, 10000, 10000]:
array = np.random.randint(0, array_range, (array_length,))
array = array.astype(str)
au8, dts = dtypes.convert_to_uint8([array])
indexes = np.arange(array_length)
ranges = radix_argsort0_str(au8, dts, 0, array.view(np.uint8), indexes, 0, array_length, array.itemsize)
array_sorted = np.sort(array)
for i in range(array.shape[0]):
self.assertEqual(array[indexes[i]], array_sorted[i])
pre_max = None
length = 0
for r in ranges:
start, end, array_index, uniform = r
length += end - start
max_val = array[indexes[start]]
for i in range(start, end):
if uniform:
self.assertEqual(array[indexes[i]], array[indexes[start]])
if pre_max is not None:
self.assertGreater(array[indexes[i]], pre_max)
if array[indexes[i]] > max_val:
max_val = array[indexes[i]]
pre_max = max_val
self.assertEqual(array_length, length)
def test_radix_argsort_float64(self):
quick_pandas.sort.INSERTION_SORT_LIMIT = 0
for array_range in [1, 10, 100, 1000, 10000, 100000, 1000000]:
for array_length in [10, 100, 1000, 10000, 10000]:
array = np.random.rand(array_length)
array -= 0.5
array *= array_range
array[0] = np.nan
array[1] = np.inf
array[2] = -np.inf
au8, dts = dtypes.convert_to_uint8([array])
indexes = np.arange(array_length)
ranges = radix_argsort0_float(au8, dts, 0, array.view(np.uint64), indexes, 0, array_length)
array_sorted = np.sort(array)
for i in range(array_length):
if np.isnan(array_sorted[i]):
self.assertTrue(np.isnan(array[indexes[i]]))
else:
self.assertEqual(array[indexes[i]], array_sorted[i])
pre_max = None
length = 0
for r in ranges:
start, end, array_index, uniform = r
length += end - start
max_val = array[indexes[start]]
for i in range(start, end):
if uniform:
if np.isnan(array[indexes[i]]):
self.assertTrue(np.isnan(array[indexes[start]]))
else:
self.assertEqual(array[indexes[i]], array[indexes[start]])
if pre_max is not None:
if not np.isnan(array[indexes[i]]):
self.assertGreater(array[indexes[i]], pre_max)
if array[indexes[i]] > max_val:
max_val = array[indexes[i]]
pre_max = max_val
self.assertEqual(array_length, length)
def test_cmp_mix_single(self):
for dtype in [np.int64, np.float64, np.str]:
array = np.array([121, 12], dtype=dtype)
arrays = [array]
arrays_uint8 = [a.view(np.uint8) for a in arrays]
res = cmp_mix(arrays_uint8, dtypes.get_dtypes(arrays), 0, 0, 1)
self.assertEqual(res, 1)
res = cmp_mix(arrays_uint8, dtypes.get_dtypes(arrays), 0, 1, 0)
self.assertEqual(res, -1)
res = cmp_mix(arrays_uint8, dtypes.get_dtypes(arrays), 0, 0, 0)
self.assertEqual(res, 0)
def test_cmp_mix_multiple(self):
arrays = [
np.array([1, 1]),
np.array([1.1, 1.1]),
np.array(['1.11', '1.1']),
]
arrays_uint8, dts = dtypes.convert_to_uint8(arrays)
res = cmp_mix(arrays_uint8, dts, 0, 0, 1)
self.assertEqual(res, 1)
res = cmp_mix(arrays_uint8, dts, 0, 1, 0)
self.assertEqual(res, -1)
def test_insertion_argsort(self):
for array_range in [1, 10, 100, 1000, 10000, 100000]:
for array_length in [1, 10, 100, 1000]:
for dtype in [np.int64, np.float64, np.str]:
array = np.random.randint(-array_range, array_range, (array_length,))
array = array.astype(dtype)
au8, dts = dtypes.convert_to_uint8([array])
indexes = np.arange(array_length)
ranges = insertion_argsort0(au8, dts, 0, indexes, 0, array_length)
res = np.argsort(array)
for i in range(array_length):
a = array[res[i]]
b = array[indexes[i]]
if dtype in [np.float64, np.float32] and np.isnan(a):
self.assertEqual(np.isnan(a), np.isnan(b))
else:
self.assertEqual(a, b)
pre = None
length = 0
for r in ranges:
start, end, array_index, uniform = r
for i in range(start + 1, end):
self.assertEqual(array[indexes[start]], array[indexes[i]])
if pre:
self.assertGreater(array[indexes[start]], pre)
pre = array[indexes[start]]
length += end - start
self.assertEqual(array_length, length)
def test_range(self):
@njit()
def outer():
short_rgs = []
long_rgs = [(0, 16)]
while long_rgs:
rg = long_rgs.pop(0)
for r in inner(rg):
if r[1] - r[0] > 2:
long_rgs.append(r)
else:
short_rgs.append(r)
return short_rgs
@njit()
def inner(rg: List):
mid = (rg[0] + rg[1]) // 2
return [(rg[0], mid), (mid, rg[1])]
self.assertEqual([(i, i + 2) for i in range(0, 16, 2)], outer())
def test_radix_argsort_mix(self):
quick_pandas.sort.INSERTION_SORT_LIMIT = 64
array_size = 1000
array_range = 100
array = np.random.randint(0, array_range, array_size)
int_array = array - array_range // 2
float_array = array / 10
str_array = array.astype(str)
for array in [int_array, float_array, str_array]:
indexes = np.arange(array_size)
au8, dts = dtypes.convert_to_uint8([array])
ranges = radix_argsort0_mix(au8, dts, indexes)
array_sorted = np.sort(array)
for i in range(array_size):
if array.dtype.type in [np.float32, np.float64] and np.isnan(array_sorted[i]):
self.assertTrue(np.isnan(array[indexes[i]]))
else:
self.assertEqual(array[indexes[i]], array_sorted[i])
self.assertEqual(array_size, sum([r[1] - r[0] for r in ranges]))
def test_radix_argsort_multiarray(self):
arrays = [np.array(['1', '11', '12', '1', '12']), np.array([1, 11, 12, 1, 12]),
np.array([1.1, 2.2, 3.3, 1.1, 1.1])]
au8, dts = dtypes.convert_to_uint8(arrays)
indexes = np.arange(5)
ranges = radix_argsort0_mix(au8, dts, indexes)
print(ranges)
for array in arrays:
print(array[indexes])
for i in range(4):
self.assertEqual([(0, 2, 0, False), (2, 3, 0, False), (3, 4, 0, False), (4, 5, 0, False)][i], ranges[i])
# self.assertEqual(['1', '1', '11', '12', '12'], arrays[0][indexes])
# self.assertEqual([1, 1, 11, 12, 12], arrays[1][indexes])
# self.assertEqual([1.1, 1.1, 2.2, 1.1, 3.3], arrays[2][indexes])
``` |
{
"source": "johnson-li/python_profiler",
"score": 2
} |
#### File: python_profiler/viewer/main.py
```python
import calendar
import datetime
import gzip
import inspect
import json
import os
import subprocess
import time
import uuid
from flask import Flask, request, send_file, send_from_directory, render_template, redirect, url_for
import stack_profiler_viewer
app = Flask(__name__)
@app.route("/")
def hello():
return redirect(url_for('help_page'))
@app.route("/help")
def help_page():
return render_template('help.html')
@app.route('/static/<path:path>')
def send_js(path):
return send_from_directory('static', path)
@app.route('/generic_profiler/detail', methods=['GET'])
def generic_profiler():
date = datetime.datetime.fromtimestamp(stack_profiler_viewer.valid_date(request.args.get('date', int(time.time()))))
repo = request.args.get('repo', 'default')
host = request.args.get('host', 'default')
line_num = int(request.args.get('line', 0))
path = '/logdir/{}/{}{:02d}{:02d}/{}.perftree/{}.log'.format(date.year, date.year, date.month, date.day, repo,
host)
if os.path.isfile(path):
with open(path) as f:
for i, line in enumerate(f):
if i == line_num:
_, json_str = line.split(': ', 1)
j = json.loads(json_str)
parameter = j.pop('parameter', None)
return render_template('tree.html', json_str=json.dumps(j), parameter=json.dumps(parameter))
else:
path += '.gz'
if os.path.isfile(path):
with gzip.open(path) as f:
for i, line in enumerate(f):
if i == line_num:
_, json_str = line.split(': ', 1)
j = json.loads(json_str)
parameter = j.pop('parameter', None)
return render_template('tree.html', json_str=json.dumps(j), parameter=json.dumps(parameter))
else:
return 'No data'
return 'last'
@app.route("/generic_profiler/thumbnail", methods=['GET'])
def generic_profiler_thumbnail():
date = datetime.datetime.fromtimestamp(stack_profiler_viewer.valid_date(request.args.get('date', int(time.time()))))
repo = request.args.get('repo', 'default')
hosts = request.args.get('hosts', None)
hosts = hosts.split('+') if hosts else []
log_dir = '/logdir/{}/{}{:02d}{:02d}/{}.perftree'.format(date.year, date.year, date.month, date.day, repo)
log_paths = ['{}/{}.log'.format(log_dir, host) for host in hosts] if hosts else \
['{}/{}'.format(log_dir, file_name) for file_name in (os.listdir(log_dir) if os.path.isdir(log_dir) else [])]
if not hosts:
hosts = [path.split('/')[-1].split('.')[0] for path in log_paths]
data_list = []
def handle_file(host, ft):
for line_num, line in enumerate(ft):
case = {'time': line[:15], 'line_num': line_num, 'path': log_path, 'host': host}
line = line[16:]
_, line = line.split(' ', 1)
_, content = line.split(': ', 1)
try:
content_json = json.loads(content)
except Exception as e:
print e
continue
content_json.pop('parameter', None)
case['title'] = content_json.keys()[0]
case['link'] = '/generic_profiler/detail?repo={}&date={}&line={}&host={}'. \
format(repo, date.date(), line_num, host)
data_list.append(case)
for host, log_path in zip(hosts, log_paths):
if os.path.isfile(log_path):
if log_path.endswith('.gz'):
with gzip.open(log_path) as f:
handle_file(host, f)
else:
with open(log_path) as f:
handle_file(host, f)
else:
log_path += '.gz'
if os.path.isfile(log_path):
with gzip.open(log_path, 'r') as f:
handle_file(host, f)
if data_list:
return render_template('thumbnail.html', cases=sorted(data_list, key=lambda x: x['time'], reverse=True))
else:
return 'No data'
def get_stack_profiler_path(start, end, repo):
date_list = [datetime.datetime.fromtimestamp(end)]
date_now = date_list[0].date()
for i in range(1, 4):
date_pre = date_now - datetime.timedelta(days=1)
ts_now = calendar.timegm(date_now.timetuple())
ts_pre = calendar.timegm(date_pre.timetuple())
if ts_pre <= start <= ts_now:
date_list.append(date_pre)
else:
break
date_now = date_pre
print date_list
return ['/logdir/{}/{}{:02d}{:02d}/{}.stack'.format(date.year, date.year, date.month, date.day, repo) for
date in set(date_list)]
@app.route("/stack_profiler", methods=['GET'])
def stack_profiler():
start = stack_profiler_viewer.valid_date(request.args.get('start', int(time.time() - 3600)))
end = stack_profiler_viewer.valid_date(request.args.get('end', int(time.time())))
repo = request.args.get('repo', 'default')
show_others = request.args.get('show_others', False)
formatter = stack_profiler_viewer.FlamegraphFormatter()
output_file = '/tmp/stack_profiler/' + str(uuid.uuid4()) + '.fold'
output_file_svg = '/tmp/stack_profiler/' + str(uuid.uuid4()) + '.svg'
stack_profiler_viewer.fold_data(get_stack_profiler_path(start, end, repo), output_file, start, end, formatter,
show_others)
path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
subprocess.call(['{}/flamegraph.pl'.format(path), output_file], stdout=open(output_file_svg, 'w'))
resp = send_file(output_file_svg, as_attachment=False, attachment_filename='stack_profiler.svg')
os.remove(output_file)
os.remove(output_file_svg)
return resp
if __name__ == "__main__":
app.run(host='0.0.0.0')
```
#### File: python_profiler/viewer/stack_profiler_viewer.py
```python
import argparse
import calendar
import collections
import gzip
import inspect
import json
import os
import re
import subprocess
import time
import dateutil.parser
# import bryo.utils.s3util
SOURCE_FILE_FILTER = re.compile('^/nail/srv/')
class CollectorFormatter(object):
"""
Abstract class for output formats
"""
def format(self, stacks):
raise Exception("not implemented")
class PlopFormatter(CollectorFormatter):
"""
Formats stack frames for plop.viewer
"""
def __init__(self, max_stacks=500):
self.max_stacks = max_stacks
def format(self, stacks):
# defaultdict instead of counter for pre-2.7 compatibility
stack_counts = collections.defaultdict(int)
for frames, count in stacks:
stack_counts[tuple([tuple(l) for l in frames])] += count
stack_counts = dict(sorted(stack_counts.iteritems(),
key=lambda kv: -kv[1])[:self.max_stacks])
return repr(stack_counts)
class FlamegraphFormatter(CollectorFormatter):
"""
Creates Flamegraph files
"""
def format(self, stacks):
output = ""
previous = None
previous_count = 0
for stack, count in stacks:
current = self.format_flame(stack)
if current == previous:
previous_count += count
else:
if previous:
output += "%s %d\n" % (previous, previous_count)
previous_count = count
previous = current
output += "%s %d\n" % (previous, previous_count)
return output
@staticmethod
def format_flame(stack):
funcs = map("{0[2]} ({0[0]}:{0[1]})".format, reversed(stack))
return ";".join(funcs)
def handle_file(f, start_ts, end_ts, stacks, show_others):
for line in f:
# some old version of log does not have count field, so a check is needed
index = line.find(': ')
if index > 0:
line = line[index + 2:]
index1 = line.index(' ')
index2 = line.rfind('&&&')
ts = int(line[:index1])
count = int(line[index2 + 3:]) if index2 > 0 else 1
if start_ts <= ts <= end_ts:
stack = json.loads(line[index1 + 1:index2] if index2 > 0 else line[index1 + 1:])
back_index = next((i for i, v in enumerate(reversed(stack)) if SOURCE_FILE_FILTER.match(v[0])), -1)
front_index = next((i for i, v in enumerate(stack) if SOURCE_FILE_FILTER.match(v[0])), -1)
if back_index >= 0:
stack = stack[:-back_index] if show_others else stack[front_index:-back_index]
if stack[0][0] == '/nail/srv/suso/utils/stack_profiler.py':
continue
stacks.append((stack, count))
def get_stacks(path, start_ts, end_ts, show_others):
stacks = []
if os.path.isdir(path):
file_names = [path + '/' + i for i in os.listdir(path)]
else:
file_names = [path]
for file_name in file_names:
if file_name.endswith('.gz'):
with gzip.open(file_name) as f:
handle_file(f, start_ts, end_ts, stacks, show_others)
else:
with open(file_name) as f:
handle_file(f, start_ts, end_ts, stacks, show_others)
return stacks
def fold_data(input_paths, output_path, start_ts, end_ts, formatter, show_others):
stack_list = []
for input_path in input_paths:
stack_list += get_stacks(input_path, start_ts, end_ts, show_others)
data = formatter.format(stack_list)
if output_path:
f = open(output_path, 'w')
f.write(data)
def get_dir(file_name):
directory = '/tmp/stack-profiler'
if not os.path.exists(directory):
os.makedirs(directory)
return directory + '/' + file_name
def valid_date(val):
if isinstance(val, int) or val.isdigit():
return int(val)
return calendar.timegm(dateutil.parser.parse(val).utctimetuple())
def main():
parser = argparse.ArgumentParser(description='', prog='python generate_stack_profiler_data',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--format", "-f", help="Output format", choices=["plop", "flamegraph"], default="flamegraph")
parser.add_argument("--mode", help="Interval timer mode to use, see `man 2 setitimer`",
choices=["prof", "real", "virtual"], default="prof")
parser.add_argument("--output", help="data output file name", default=get_dir("stack_profiler.folded"))
parser.add_argument("--output-svg", help="svg output file name, works with -s tag",
default=get_dir("stack_profiler.svg"))
parser.add_argument("--input", help="input file name", default="/nail/logs/emma-stack-profiler.sample")
parser.add_argument("--start", '-S', help="start timestamp, or time str that can be parsed by dateutil.parser",
default=None, type=valid_date)
parser.add_argument("--end", '-E', help="end timestamp, or time str that can be parsed by dateutil.parser",
default=long(time.time()), type=valid_date)
parser.add_argument("--upload", "-u", action='store_true', help="upload svg output data to s3, works with -s tag")
parser.add_argument("--delete", "-d", action='store_true', help="delete intermediate files")
parser.add_argument("--svg", "-s", action='store_true',
help="generate flame graph svg if the format is set as flamegraph")
args = parser.parse_args()
if args.format == "plop":
formatter = PlopFormatter()
else:
formatter = FlamegraphFormatter()
fold_data([args.input], args.output, args.start, args.end, formatter, False)
if args.svg:
path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
subprocess.call(['{}/flamegraph.pl'.format(path), args.output], stdout=open(args.output_svg, 'w'))
if args.upload and args.svg:
pass
# url = bryo.utils.s3util.put('stack_profiler/stack:{}-{}.svg'.format(args.start if args.start else 0, args.end),
# open(args.output_svg), False)
# print 'uploaded to url:', url
if args.delete:
os.remove(args.output)
if args.svg:
os.remove(args.output_svg)
if __name__ == '__main__':
main()
``` |
{
"source": "Johnson-Lsx/espnet",
"score": 2
} |
#### File: espnet2/bin/enh_asr_train.py
```python
from espnet2.tasks.enh_asr import EnhASRTask
def get_parser():
parser = EnhASRTask.get_parser()
return parser
def main(cmd=None):
r"""Enh-ASR training.
Example:
% python enh_asr_train.py asr --print_config --optim adadelta \
> conf/train.yaml
% python enh_asr_train.py --config conf/train.yaml
"""
EnhASRTask.main(cmd=cmd)
if __name__ == "__main__":
main()
``` |
{
"source": "Johnson-Research-Group/flat-graphene",
"score": 3
} |
#### File: flat-graphene/flatgraphene/hhelp.py
```python
from pathlib import Path
def help():
"""
Forward documentation/doc strings to the terminal
"""
#construct path to help document from current file
help_file_name = 'help_doc.md'
help_file_path = str(Path(__file__).parent) + '/' + help_file_name
#read lines of help file
with open(help_file_path,'r') as f:
lines = f.readlines()
#send lines of help file to terminal
for line in lines:
print(line.strip()) #remove new line character as print adds its own
``` |
{
"source": "Johnson-Research-Group/Pyhesive",
"score": 2
} |
#### File: Pyhesive/pyhesive/_mesh.py
```python
from sys import version_info
import traceback
import logging
import meshio
import pymetis
import collections
import scipy
from scipy import sparse as scp
import numpy as np
from ._utils import flatten,get_log_level,get_log_stream
from ._cell_set import CellSet
from ._partition_interface import PartitionInterface
class Mesh(object):
__slots__ = ("log","cell_data","coords","__dict__")
def __init_logger(self):
slog = logging.getLogger(self.__class__.__name__)
slog.setLevel(get_log_level())
slog.propagate = False
ch = logging.StreamHandler(get_log_stream())
ch.setLevel(get_log_level())
ch.propagate = False
ch.setFormatter(
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(funcName)s - %(message)s")
)
slog.addHandler(ch)
return slog
def __assert_partitioned(self):
assert hasattr(self,"partitions"),"Must partition mesh first"
return
def __init__(self,mesh):
if isinstance(mesh,meshio.Mesh):
try:
self.cell_data = CellSet.from_CellBlock(mesh.cells[-1])
except IndexError as ie:
raise ValueError("input mesh does not contain any cells") from ie
self.coords = mesh.points
elif isinstance(mesh,self.__class__):
self.cell_data = mesh.cell_data
self.coords = mesh.coords
else:
err = "unknown type of input mesh {}".format(type(mesh))
raise ValueError(err)
self.cohesive_cells = None
self.log = self.__init_logger()
self.log.info("number of cells %d, vertices %d",len(self.cell_data.cells),len(self.coords))
self.log.info(
"cell dimension %d, type %s, number of faces per vertex %d",
self.cell_data.dim,self.cell_data.type,len(self.cell_data.face_indices[0])
)
return
@classmethod
def from_file(cls,file_name,format_in=None):
return cls(meshio.read(file_name,format_in))
@classmethod
def from_POD(cls,points,cells):
return cls(meshio.Mesh(points,cells))
def __ne__(self,other):
return not self.__eq__(other)
def __eq__(self,other):
def attr_eq(mine,other):
if type(mine) != type(other):
return False
if isinstance(mine,np.ndarray):
return np.array_equiv(mine,other)
if scp.issparse(mine):
from common import assert_scipy_all_close
try:
assert_scipy_all_close(mine,other)
except AssertionError:
return False
return True
try:
# try the dumb way
return mine == other
except:
pass
if isinstance(mine,(list,tuple)):
for m,o in zip(mine,other):
if not attr_eq(m,o):
return False
return True
raise NotImplemtentedError
if id(self) == id(other):
return True
if not isinstance(other,self.__class__):
return NotImplemented
if len(self.__slots__) != len(other.__slots__):
return False
for attr in self.__slots__:
if attr != "__dict__":
try:
my_attr = getattr(self,attr)
other_attr = getattr(other,attr)
except AttributeError:
return False
if not attr_eq(my_attr,other_attr):
return False
self_dict = self.__dict__
other_dict = other.__dict__
if len(self_dict.keys()) != len(other_dict.keys()):
return False
self_items,other_items = sorted(self_dict.items()),sorted(other_dict.items())
for (self_key,self_val),(other_key,other_val) in zip(self_items,other_items):
try:
if (self_key != other_key) or (self_val != other_val):
return False
except ValueError:
if not attr_eq(self_val,other_val):
return False
return True
def __enter__(self):
return self
def __exit__(self,exc_type,exc_value,tb):
if exc_type is not None:
traceback.print_exception(exc_type,exc_value,tb)
return
def write_mesh(self,mesh_file_out,mesh_format_out=None,prune=False,return_mesh=False):
cells = [(self.cell_data.type,self.cell_data.cells)]
if self.cohesive_cells is not None:
self.log.info(
"generated %d cohesive elements of type '%s' and %d duplicated vertices",
len(self.cohesive_cells),self.cohesive_cells.type,len(self.dup_coords)
)
cells.append((self.cohesive_cells.type,self.cohesive_cells.cells))
else:
self.log.info("generated no cohesive elements")
mesh_out = meshio.Mesh(self.coords,cells)
if prune:
mesh_out.remove_orphaned_nodes()
if return_mesh:
return mesh_out
meshio.write(mesh_file_out,mesh_out,file_format=mesh_format_out)
self.log.info("wrote mesh to '%s' with format '%s'",mesh_file_out,mesh_format_out)
return
def partition_mesh(self,num_part=-1):
if num_part == 0:
self.partitions = tuple()
return self
if num_part == -1:
num_part = len(self.cell_data)
elif num_part > len(self.cell_data):
self.log.warning(
"number of partitions %d > num cells %d, using num cells instead",
num_part,len(self.cell_data)
)
self.adjacency_matrix = self.compute_adjacency_matrix()
self.cell_adjacency,bd_faces = self.compute_closure(full_closure=False)
self.bd_set = set(flatten(bd_faces))
if num_part < len(self.cell_data):
ncuts,membership = pymetis.part_graph(num_part,adjacency=self.cell_adjacency)
if ncuts == 0:
raise RuntimeError("no partitions were made by partitioner")
membership = np.array(membership)
self.partitions = tuple(np.argwhere(membership == p).ravel() for p in range(num_part))
else:
membership = np.array([x for x in range(len(self.cell_data))])
self.partitions = tuple(np.array([x]) for x in membership)
n_valid = sum(1 for partition in self.partitions if len(partition))
self.log.info(
"number of partitions requested %d, actual %d, average cells/partition %d",
num_part,n_valid,len(self.cell_data)/n_valid
)
part_count_sum = sum(len(partition) for partition in self.partitions)
if part_count_sum != len(self.cell_data):
err = "Partition cell-count sum {} != global number of cells {}".format(
part_count_sum,len(self.cell_data)
)
raise RuntimeError(err)
return self
def __get_partition_vertex_map(self,partitions=None,cell_set=None):
if hasattr(self,"partition_vertex_map"):
return self.partition_vertex_map
if cell_set is None:
cell_set = self.cell_data
if partitions is None:
self.__assert_partitioned();
partitions = self.partitions
partition_vertex_map = (np.unique(cell_set.cells[part].ravel()) for part in partitions)
self.partition_vertex_map = collections.Counter(flatten(partition_vertex_map))
return self.partition_vertex_map
def __compute_partition_interface(self,partition,global_adjacency_matrix=None):
self.__get_partition_vertex_map()
boundary_faces = []
cell_set = self.cell_data[partition]
faces_per_cell = len(cell_set.face_indices)
face_dim = len(cell_set.face_indices[0])
if global_adjacency_matrix is None:
adj_mat = self.adjacency_matrix[partition,:][:,partition]
else:
adj_mat = global_adjacency_matrix[partition,:][:,partition].to_lil()
for row_idx,row in enumerate(adj_mat.data):
local_neighbors = [
adj_mat.rows[row_idx][n] for n in (i for i,k in enumerate(row) if k == face_dim)
]
self.log.debug("cell %d locally adjacent to %s",row_idx,local_neighbors)
if len(local_neighbors) != faces_per_cell:
# map local neighbor index to global cell ids
mapped_local_set = set(partition[local_neighbors])
# get the ids of all global neighbors
global_neighbors = self.cell_adjacency[partition[row_idx]]
# equivalent to set(globals).difference(mapped_local_set)
exterior_neighbors = [n for n in global_neighbors if n not in mapped_local_set]
# for all of my exterior neighbors, what faces do we have in common?
vertex_set = set(cell_set.cells[row_idx])
exterior_faces = [
vertex_set.intersection(self.cell_data.cells[c]) for c in exterior_neighbors
]
bd_faces = []
# loop over all possible faces of mine, and finding the indices for the mirrored
# face for the exterior partner cell
for idx,face in enumerate(map(tuple,cell_set.cells[row_idx][cell_set.face_indices])):
try:
exteriorIdx = exterior_faces.index(set(face))
except ValueError:
continue
neighbor_vertices = self.cell_data.cells[exterior_neighbors[exteriorIdx]]
neighbor_indices = np.array([(neighbor_vertices == x).nonzero()[0][0] for x in face])
bd_faces.append((face,exterior_neighbors[exteriorIdx],neighbor_indices))
if self.log.isEnabledFor(logging.DEBUG):
if len(exterior_neighbors):
self.log.debug("cell %d marked on interface with neighbors %s",row_idx,exterior_neighbors)
else:
self.log.debug("cell %d marked locally interior",row_idx)
for face in bd_faces:
self.log.debug("face %s marked on interface",face[0])
boundary_faces.append(bd_faces)
else:
self.log.debug("cell %d marked locally interior",row_idx)
self.log.debug("%d interface face(s)",sum(len(_) for _ in boundary_faces))
try:
#if len(bd_faces):
f,si,sv = zip(*flatten(boundary_faces))
except ValueError:
# ValueError: not enough values to unpack (expected 3, got 0)
f,si,sv = [],[],[]
p = PartitionInterface(
own_faces=np.array(f),mirror_ids=np.array(si),mirror_vertices=np.array(sv)
)
return p
def __get_partition_interface_list(self):
self.__assert_partitioned()
if not hasattr(self,"partition_interfaces"):
self.partition_interfaces = [self.__compute_partition_interface(p) for p in self.partitions]
return self.partition_interfaces
def compute_adjacency_matrix(self,cells=None,format="lil",v2v=False):
def mat_size(a):
if isinstance(a,scp.csr_matrix) or isinstance(a,scp.csc_matrix):
return a.data.nbytes+a.indptr.nbytes+a.indices.nbytes
elif isinstance(a,scp.lil_matrix):
return a.data.nbytes+a.rows.nbytes
elif isinstance(a,scp.coo_matrix):
return a.col.nbytes+a.row.nbytes+a.data.nbytes
return 0
if cells is None:
cells = self.cell_data.cells
ne = len(cells)
element_ids = np.empty((ne,len(cells[0])),dtype=np.intp)
element_ids[:] = np.arange(ne).reshape(-1,1)
cell_dim = len(element_ids[0])
v2c = scp.coo_matrix((
np.ones((ne*cell_dim,),dtype=np.intp),
(cells.ravel(),element_ids.ravel(),)
)).tocsr(copy=False)
if version_info <= (3,5):
c2c = v2c.T @ v2c
else:
c2c = v2c.T.__matmul__(v2c)
self.log.debug("c2c mat size %g kB",mat_size(c2c)/(1024**2))
c2c = c2c.asformat(format,copy=False)
self.log.debug("c2c mat size after compression %g kB",mat_size(c2c)/(1024**2))
if v2v:
v2v = v2c @ v2c.T
self.log.debug("v2v mat size %d bytes",mat_size(v2v))
v2v = v2v.asformat(format,copy=False)
self.log.debug("v2v mat size after compression %d bytes",mat_size(v2v))
return c2c,v2v
else:
return c2c
def compute_closure(self,cell_set=None,adj_mat=None,full_closure=True):
if cell_set is None:
cell_set = self.cell_data
if adj_mat is None:
adj_mat = self.compute_adjacency_matrix(cell_set.cells)
bd_cells,bd_faces = [],[]
faces_per_cell = len(cell_set.face_indices)
face_dim = len(cell_set.face_indices[0])
local_adjacency = {}
for row_idx,row in enumerate(adj_mat.data):
neighbors = (i for i,k in enumerate(row) if k == face_dim)
local_adjacency[row_idx] = list(map(adj_mat.rows[row_idx].__getitem__,neighbors))
self.log.debug("cell %d adjacent to %s",row_idx,local_adjacency[row_idx])
if len(local_adjacency[row_idx]) != faces_per_cell:
if full_closure:
# the cell does not have a neighbor for every face!
self.log.debug("cell %d marked on boundary",row_idx)
bd_cells.append(row_idx)
# for all of my neighbors, what faces do we have in common?
own_vertices = set(cell_set.cells[row_idx])
interior_faces = [
own_vertices.intersection(cell_set.cells[c]) for c in local_adjacency[row_idx]
]
# all possible faces of mine
all_faces = map(tuple,cell_set.cells[row_idx][cell_set.face_indices])
bdf = [face for face in all_faces if set(face) not in interior_faces]
if self.log.isEnabledFor(logging.DEBUG):
for boundary_face in bdf:
self.log.debug("face %s marked on boundary",boundary_face)
assert len(bdf)+len(interior_faces) == faces_per_cell
bd_faces.append(bdf)
else:
self.log.debug("cell %d marked interior",row_idx)
if full_closure:
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug(
"%d interior cell(s), %d boundary cell(s), %d boundary face(s)",
len(cell_set.cells)-len(bd_cells),len(bd_cells),sum(len(_) for _ in bd_faces)
)
return local_adjacency,bd_cells,bd_faces
self.log.debug("%d boundary face(s)",sum(len(_) for _ in bd_faces))
return local_adjacency,bd_faces
def __duplicate_vertices(self,old_vertex_list,global_dict,coords,partition_vertex_map,dup_coords=None):
translation_dict = {}
convertable_vertices = tuple(x for x in old_vertex_list if x not in global_dict)
if len(convertable_vertices):
try:
num_duplicatable_vertices = len(coords)+len(dup_coords)
except TypeError as te:
if "object of type 'NoneType' has no len()" in te.args:
num_duplicatable_vertices = len(coords)
else:
raise te
new_vertex_coords = []
vertex_counts = [partition_vertex_map[x]-1 for x in convertable_vertices]
for vertex,count in zip(convertable_vertices,vertex_counts):
num_duplicatable_vertices_last = num_duplicatable_vertices
num_duplicatable_vertices += count
# At least one other partition must own the boundary vertex
# otherwise the routine generating local interior boundaries is buggy
assert num_duplicatable_vertices > num_duplicatable_vertices_last
translation_dict[vertex] = collections.deque(
range(num_duplicatable_vertices_last,num_duplicatable_vertices)
)
new_vertex_coords.extend([coords[vertex] for _ in range(count)])
self.log.debug("duped vertex %d -> %s",vertex,translation_dict[vertex])
try:
dup_coords.extend(new_vertex_coords)
except AttributeError as ae:
# dup_coords is None
if "'NoneType' object has no attribute 'extend'" in ae.args:
dup_coords = new_vertex_coords.copy()
else:
raise ae
else:
self.log.debug("no vertices to duplicate")
return dup_coords,translation_dict
def __generate_global_conversion(self,partitions,global_conversion_map):
self.dup_coords,dup_coords = None,None
try:
for (idx,part),boundary in zip(enumerate(partitions),self.partition_interfaces):
self.log.debug("partition %d contains (%d) cells %s",idx,len(part),part)
# old vertex IDs
old_vertices = {*flatten(boundary.own_faces)}
# duplicate the vertices, return the duplicates new IDs
dup_coords,local_conversion_map = self.__duplicate_vertices(
old_vertices,global_conversion_map,self.coords,self.partition_vertex_map,dup_coords
)
yield part,boundary,global_conversion_map
global_conversion_map.update(local_conversion_map)
finally:
# fancy trickery to update the coordinates __after__ the final yield has been called
self.dup_coords = np.array(dup_coords)
return
def remap_vertices(self):
self.__assert_partitioned();
source_vertices = []
mapped_vertices = []
vertices_per_face = len(self.cell_data.face_indices[0])
global_conversion_map = dict()
for part,boundary,global_conversion_map in self.__generate_global_conversion(self.partitions,global_conversion_map):
# loop through every cell in the current partition, if it contains vertices that are
# in the global conversion map then renumber using the top of the stack
converted_partition = np.array([
[global_conversion_map[v][0] if v in global_conversion_map else v for v in c] for c in self.cell_data.cells[part]
])
try:
# assign, throws ValueError if partition is empty
self.cell_data.cells[part] = converted_partition
except ValueError:
self.log.debug("no vertices to update")
continue
# for every face in the list of boundary faces, convert the boundary face vertex as
# above
mapped_boundary_faces = [[global_conversion_map[v][0] if v in global_conversion_map else v for v in f] for f in boundary.own_faces]
for mapped_bd_face,boundary_faces,src,idx in zip(mapped_boundary_faces,*boundary):
# we only want to record vertices which have an entire face changed, i.e. not just
# an edge or sole vertex
if vertices_per_face == sum(1 for i,j in zip(mapped_bd_face,boundary_faces) if i != j):
# now find the entries of the interface partner and take the indices
# corresponding to our face. Note that this face is __guaranteed__ to already
# have been handled, otherwise we would not have it in the conversion dict
interface_partner = self.cell_data.cells[src][idx]
source_vertices.append(interface_partner)
mapped_vertices.append(mapped_bd_face)
self.log.debug("updated face %s -> %s",interface_partner,mapped_bd_face)
converted_boundary_vertices = [
vertex for vertex in set(flatten(boundary.own_faces)) if vertex in global_conversion_map
]
if self.log.isEnabledFor(logging.DEBUG):
for vertex in converted_boundary_vertices:
previous_vertex = global_conversion_map[vertex].popleft()
try:
self.log.debug(
"updated mapping %d: %d -> %d",
vertex,previous_vertex,global_conversion_map[vertex][0]
)
except IndexError as ie:
self.log.debug("updated mapping %d: %d -> Empty",vertex,previous_vertex)
else:
for vertex in converted_boundary_vertices:
global_conversion_map[vertex].popleft()
if self.log.isEnabledFor(logging.DEBUG):
for vertex in global_conversion_map:
if len(global_conversion_map[vertex]):
self.log.error(
"vertex %d contains additional unapplied point-maps %s",
vertex,global_conversion_map[vertex]
)
return source_vertices,mapped_vertices
def insert_elements(self):
if self.partitions:
self.__get_partition_interface_list();
source_vertices,mapped_vertices = self.remap_vertices()
cells = np.hstack((mapped_vertices,source_vertices))
self.cohesive_cells = CellSet.from_POD(self.cell_data.cohesive_type,cells)
if self.dup_coords.shape:
self.coords = np.vstack((self.coords,self.dup_coords))
else:
self.log.debug("no coordinates were duplicated!")
if self.log.isEnabledFor(logging.DEBUG):
for element in self.cohesive_cells.cells:
self.log.debug("created new cohesive element %s",element)
return self
def verify_cohesive_mesh(self):
if len(self.cohesive_cells):
cohesive_set = set(frozenset(cell) for cell in self.cohesive_cells.cells)
self.log.debug(
"number of unique cohesive elements %s, total number of cohesive elements %s",
len(cohesive_set),len(self.cohesive_cells)
)
if len(cohesive_set) != len(self.cohesive_cells):
raise RuntimeError("there are duplicate cohesive cells!")
self.log.info("mesh seems ok")
else:
self.log.info("mesh has no cohesive cells")
return self
```
#### File: pyhesive/test/fixtures.py
```python
import os
import collections
import meshio
import numpy as np
import scipy as scp
import pyhesive
import pyhesive.test.common as common
import copy
import pytest
data_set_named_tuple = collections.namedtuple("DataSet",["mesh","adjacency","closure","data"])
data_set_named_tuple.__new__.__defaults__ = (None,)*len(data_set_named_tuple._fields)
class DataSet(data_set_named_tuple):
__slots__ = ()
def meshlist():
return ["tetSingle","tetDouble","hexSingle","hexDouble","hexQuad","hexOctet"]
def find_data(name,mesh,partition_list=[0]):
filename = os.path.join(common.bin_dir,name)+".pkl"
try:
data = common.load_obj(filename)
except FileNotFoundError:
data = {
"name" : name,
"mesh" : mesh,
"partition_data" : dict()
}
for part in partition_list:
pyh = copy.deepcopy(pyhesive.Mesh(mesh))
pyh.partition_mesh(part)
data["partition_data"][part] = {
# must deep copy since insertElements() is not guaranteed to create the
# intermediate structures
"pyhesive_mesh" : copy.deepcopy(pyh.insert_elements()),
"partition_vertex_map" : pyh._Mesh__get_partition_vertex_map(),
"partition_interfaces" : pyh._Mesh__get_partition_interface_list(),
}
del pyh
common.store_obj(filename,data)
return data
# @pytest.fixture
# def mesh(request):
# print(vars(request).keys())
# if isinstance(request.param,DataSet):
# return request.param.mesh
# elif isinstance(request.param,meshio.Mesh):
# return request.param
# elif isinstance(request.param,string):
# return request.getfixturevalue(request.param).mesh
# else:
# raise RuntimeError
@pytest.fixture
def empty():
mesh = meshio.Mesh(np.empty((0,3)),[])
return DataSet(mesh=mesh)
@pytest.fixture
def tetSingle():
mesh = meshio.Mesh(
np.array([[0.0,0.0,0.0],
[1.0,0.0,0.0],
[1.0,1.0,0.0],
[1.0,0.0,1.0]]),
[("tetra",np.array([[0,1,2,3]]))]
)
adjacency = (scp.sparse.lil_matrix(np.array([[4]])),
scp.sparse.lil_matrix(np.ones((4,4))))
closure = ({0: []},[0],[[(2,1,0),(2,0,3),(2,3,1),(0,1,3)]])
data = find_data("tetSingle",mesh)
return DataSet(mesh=mesh,adjacency=adjacency,closure=closure,data=data)
@pytest.fixture
def tetDouble():
mesh = meshio.Mesh(
np.array([
[0.0,0.0,0.0],
[1.0,0.0,0.0],
[1.0,1.0,0.0],
[0.0,1.0,0.0],
[0.5,0.5,0.5],
]),
[("tetra",np.array([
[0,1,2,4],
[0,2,3,4]]))]
)
adjacency = (
scp.sparse.lil_matrix(np.array([[4,3],[3,4]])),
scp.sparse.lil_matrix(
np.array([
[2,1,2,1,2],
[1,1,1,0,1],
[2,1,2,1,2],
[1,0,1,1,1],
[2,1,2,1,2]
])
)
)
closure = ({0:[1],1:[0]},[0,1],[[(2,1,0),(2,4,1),(0,1,4)],[(3,2,0),(3,0,4),(3,4,2)]])
data = find_data("tetDouble",mesh,[0,-1])
return DataSet(mesh=mesh,adjacency=adjacency,closure=closure,data=data)
@pytest.fixture
def hexSingle():
r"""
7 - - - - - - 6
| \ | \
| \ | \
| 4 - - - - - - 5
| | | |
3 - - | - - - 2 |
\ | \ |
\ | \ |
0 - - - - - - 1
"""
mesh = meshio.Mesh(
np.array([
[0.0,0.0,0.0],
[1.0,0.0,0.0],
[1.0,1.0,0.0],
[0.0,1.0,0.0],
[0.0,0.0,1.0],
[1.0,0.0,1.0],
[1.0,1.0,1.0],
[0.0,1.0,1.0],
]),
[("hexahedron",np.array([[0,1,2,3,4,5,6,7]]))]
)
adjacency = (scp.sparse.lil_matrix(np.array([[8]])),scp.sparse.lil_matrix(np.ones((8,8))))
closure = ({0:[]},[0],[[(0,4,7,3),(0,1,5,4),(0,3,2,1),(6,7,4,5),(2,3,7,6),(2,6,5,1)]])
data = find_data("hexSingle",mesh)
return DataSet(mesh=mesh,adjacency=adjacency,closure=closure,data=data)
@pytest.fixture
def hexDouble():
r"""
9 - - - - - - 10 - - - - - - 11
| \ | \ | \
| \ | \ | \
| 6 - - - - - - 7 - - - - - - 8
| | | | | |
3 - - | - - - 4 - - | - - - 5 |
\ | \ | \ |
\ | \ | \ |
0 - - - - - - 1 - - - - - - 2
"""
mesh = meshio.Mesh(
np.array([
[0.0,0.0,0.0],
[1.0,0.0,0.0],
[2.0,0.0,0.0],
[0.0,1.0,0.0],
[1.0,1.0,0.0],
[2.0,1.0,0.0],
[0.0,0.0,1.0],
[1.0,0.0,1.0],
[2.0,0.0,1.0],
[0.0,1.0,1.0],
[1.0,1.0,1.0],
[2.0,1.0,1.0],
]),
[("hexahedron",np.array([[0,1,4,3,6,7,10,9],[1,2,5,4,7,8,11,10]]))]
)
adjacency = (scp.sparse.lil_matrix(np.array([[8,4],[4,8]])),
scp.sparse.lil_matrix(np.array([[1,1,0,1,1,0,1,1,0,1,1,0],
[1,2,1,1,2,1,1,2,1,1,2,1],
[0,1,1,0,1,1,0,1,1,0,1,1],
[1,1,0,1,1,0,1,1,0,1,1,0],
[1,2,1,1,2,1,1,2,1,1,2,1],
[0,1,1,0,1,1,0,1,1,0,1,1],
[1,1,0,1,1,0,1,1,0,1,1,0],
[1,2,1,1,2,1,1,2,1,1,2,1],
[0,1,1,0,1,1,0,1,1,0,1,1],
[1,1,0,1,1,0,1,1,0,1,1,0],
[1,2,1,1,2,1,1,2,1,1,2,1],
[0,1,1,0,1,1,0,1,1,0,1,1]])))
closure = ({0:[1],1:[0]},[0,1],
[[(0,6,9,3),(0,1,7,6),(0,3,4,1),(10,9,6,7),(4,3,9,10)],
[(1,2,8,7),(1,4,5,2),(11,10,7,8),(5,4,10,11),(5,11,8,2)]])
data = find_data("hexDouble",mesh,[0,-1])
return DataSet(mesh=mesh,adjacency=adjacency,closure=closure,data=data)
@pytest.fixture
def hexQuad():
r"""
15- - - - - - 16 - - - - - - 17
| \ | \ | \
| \ | \ | \
| 12- - - - - - 13 - - - - - - 14
| | \ | | \ | | \
6 - - | - \ - 7 - - | - \ - 8 | \
\ | 9- - - - - - 10 - - - - - - 11
\ | | \ | | \ | |
3 - - | - - - 4 - - | - - - 5 |
\ | \ | \ |
\ | \ | \ |
0 - - - - - - 1 - - - - - - 2
"""
mesh_points = np.array([
# plane z = 0
[0.0,0.0,0.0],
[1.0,0.0,0.0],
[2.0,0.0,0.0],
[0.0,1.0,0.0],
[1.0,1.0,0.0],
[2.0,1.0,0.0],
[0.0,2.0,0.0],
[1.0,2.0,0.0],
[2.0,2.0,0.0],
# plane z = 1
[0.0,0.0,1.0],
[1.0,0.0,1.0],
[2.0,0.0,1.0],
[0.0,1.0,1.0],
[1.0,1.0,1.0],
[2.0,1.0,1.0],
[0.0,2.0,1.0],
[1.0,2.0,1.0],
[2.0,2.0,1.0],
])
ref_mesh_points = np.vstack((
[np.ravel(x) for x in np.mgrid[0:3,0:3,0:2]]
)).T.astype(mesh_points.dtype)
assert len(ref_mesh_points) == len(mesh_points)
assert not len(np.setdiff1d(ref_mesh_points,mesh_points))
mesh = meshio.Mesh(
mesh_points,
[("hexahedron",np.array([
[0,1,4,3,9,10,13,12],
[1,2,5,4,10,11,14,13],
[3,4,7,6,12,13,16,15],
[4,5,8,7,13,14,17,16]
]))]
)
adjacency = (
scp.sparse.lil_matrix(
np.array([
[8,4,4,2],
[4,8,2,4],
[4,2,8,4],
[2,4,4,8]
])
),
scp.sparse.lil_matrix(
np.array([
[1,1,0,1,1,0,0,0,0,1,1,0,1,1,0,0,0,0],
[1,2,1,1,2,1,0,0,0,1,2,1,1,2,1,0,0,0],
[0,1,1,0,1,1,0,0,0,0,1,1,0,1,1,0,0,0],
[1,1,0,2,2,0,1,1,0,1,1,0,2,2,0,1,1,0],
[1,2,1,2,4,2,1,2,1,1,2,1,2,4,2,1,2,1],
[0,1,1,0,2,2,0,1,1,0,1,1,0,2,2,0,1,1],
[0,0,0,1,1,0,1,1,0,0,0,0,1,1,0,1,1,0],
[0,0,0,1,2,1,1,2,1,0,0,0,1,2,1,1,2,1],
[0,0,0,0,1,1,0,1,1,0,0,0,0,1,1,0,1,1],
[1,1,0,1,1,0,0,0,0,1,1,0,1,1,0,0,0,0],
[1,2,1,1,2,1,0,0,0,1,2,1,1,2,1,0,0,0],
[0,1,1,0,1,1,0,0,0,0,1,1,0,1,1,0,0,0],
[1,1,0,2,2,0,1,1,0,1,1,0,2,2,0,1,1,0],
[1,2,1,2,4,2,1,2,1,1,2,1,2,4,2,1,2,1],
[0,1,1,0,2,2,0,1,1,0,1,1,0,2,2,0,1,1],
[0,0,0,1,1,0,1,1,0,0,0,0,1,1,0,1,1,0],
[0,0,0,1,2,1,1,2,1,0,0,0,1,2,1,1,2,1],
[0,0,0,0,1,1,0,1,1,0,0,0,0,1,1,0,1,1]
])
)
)
closure = (
{
0 : [1,2],
1 : [0,3],
2 : [0,3],
3 : [1,2]
},
[0,1,2,3],
[[(0,9 ,12,3 ),(0 ,1 ,10,9 ),(0 ,3 ,4 ,1 ),(13,12,9 ,10)],
[(1,2 ,11,10),(1 ,4 ,5 ,2 ),(14,13,10,11),(5 ,14,11,2 )],
[(3,12,15,6 ),(3 ,6 ,7 ,4 ),(16,15,12,13),(7 ,6 ,15,16)],
[(4,7 ,8 ,5 ),(17,16,13,14),(8 ,7 ,16,17),(8 ,17,14,5 )]]
)
data = find_data("hexQuad",mesh,[0,2,3,-1])
return DataSet(mesh=mesh,adjacency=adjacency,closure=closure,data=data)
@pytest.fixture
def hexOctet():
r"""
24 - - - - - -25 - - - - - - 26
| \ | \ | \
| \ | \ | \
| 21 - - - - - -22 - - - - - - 23
| | \ | | \ | | \
15 - -| - \ - 16 - -|- - \- - 17 | \
| \ | 18 - - - - - - 19 - - - - - 20
| \ | | | \ | | | \ | |
| 12 - -| - - - 13 - -|- - - - 14 |
| | \ | | | \ | | | \ |
6 - - | - \ | 7 - - | - \ |- 8 | \ |
\ | 9 - - - - - - 10 - - - - - -11
\ | | \ | | \ | |
3 - - | - - - 4 - - | - - - 5 |
\ | \ | \ |
\ | \ | \ |
0 - - - - - - 1 - - - - - - 2
"""
mesh_points = np.array([
# plane z = 0
[0.0,0.0,0.0],
[1.0,0.0,0.0],
[2.0,0.0,0.0],
[0.0,1.0,0.0],
[1.0,1.0,0.0],
[2.0,1.0,0.0],
[0.0,2.0,0.0],
[1.0,2.0,0.0],
[2.0,2.0,0.0],
# plane z = 1
[0.0,0.0,1.0],
[1.0,0.0,1.0],
[2.0,0.0,1.0],
[0.0,1.0,1.0],
[1.0,1.0,1.0],
[2.0,1.0,1.0],
[0.0,2.0,1.0],
[1.0,2.0,1.0],
[2.0,2.0,1.0],
# plane z = 2
[0.0,0.0,2.0],
[1.0,0.0,2.0],
[2.0,0.0,2.0],
[0.0,1.0,2.0],
[1.0,1.0,2.0],
[2.0,1.0,2.0],
[0.0,2.0,2.0],
[1.0,2.0,2.0],
[2.0,2.0,2.0],
])
ref_mesh_points = np.vstack((
[np.ravel(x) for x in np.mgrid[0:3,0:3,0:3]]
)).T.astype(mesh_points.dtype)
assert len(ref_mesh_points) == len(mesh_points)
assert not len(np.setdiff1d(ref_mesh_points,mesh_points))
mesh = meshio.Mesh(
mesh_points,
[("hexahedron",np.array([
[0,1,4,3,9,10,13,12],
[1,2,5,4,10,11,14,13],
[3,4,7,6,12,13,16,15],
[4,5,8,7,13,14,17,16],
[9,10,13,12,18,19,22,21],
[10,11,14,13,19,20,23,22],
[12,13,16,15,21,22,25,24],
[13,14,17,16,22,23,26,25],
]))]
)
adjacency = (
scp.sparse.lil_matrix(
np.array([
[8,4,4,2,4,2,2,1],
[4,8,2,4,2,4,1,2],
[4,2,8,4,2,1,4,2],
[2,4,4,8,1,2,2,4],
[4,2,2,1,8,4,4,2],
[2,4,1,2,4,8,2,4],
[2,1,4,2,4,2,8,4],
[1,2,2,4,2,4,4,8]
])
),
scp.sparse.lil_matrix(
np.array([
[1,1,0,1,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,2,1,1,2,1,0,0,0,1,2,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,1,1,0,1,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0],
[1,1,0,2,2,0,1,1,0,1,1,0,2,2,0,1,1,0,0,0,0,0,0,0,0,0,0],
[1,2,1,2,4,2,1,2,1,1,2,1,2,4,2,1,2,1,0,0,0,0,0,0,0,0,0],
[0,1,1,0,2,2,0,1,1,0,1,1,0,2,2,0,1,1,0,0,0,0,0,0,0,0,0],
[0,0,0,1,1,0,1,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,1,2,1,1,2,1,0,0,0,1,2,1,1,2,1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,1,1,0,1,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,0,0,0,0],
[1,1,0,1,1,0,0,0,0,2,2,0,2,2,0,0,0,0,1,1,0,1,1,0,0,0,0],
[1,2,1,1,2,1,0,0,0,2,4,2,2,4,2,0,0,0,1,2,1,1,2,1,0,0,0],
[0,1,1,0,1,1,0,0,0,0,2,2,0,2,2,0,0,0,0,1,1,0,1,1,0,0,0],
[1,1,0,2,2,0,1,1,0,2,2,0,4,4,0,2,2,0,1,1,0,2,2,0,1,1,0],
[1,2,1,2,4,2,1,2,1,2,4,2,4,8,4,2,4,2,1,2,1,2,4,2,1,2,1],
[0,1,1,0,2,2,0,1,1,0,2,2,0,4,4,0,2,2,0,1,1,0,2,2,0,1,1],
[0,0,0,1,1,0,1,1,0,0,0,0,2,2,0,2,2,0,0,0,0,1,1,0,1,1,0],
[0,0,0,1,2,1,1,2,1,0,0,0,2,4,2,2,4,2,0,0,0,1,2,1,1,2,1],
[0,0,0,0,1,1,0,1,1,0,0,0,0,2,2,0,2,2,0,0,0,0,1,1,0,1,1],
[0,0,0,0,0,0,0,0,0,1,1,0,1,1,0,0,0,0,1,1,0,1,1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,1,2,1,1,2,1,0,0,0,1,2,1,1,2,1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,1,1,0,1,1,0,0,0,0,1,1,0,1,1,0,0,0],
[0,0,0,0,0,0,0,0,0,1,1,0,2,2,0,1,1,0,1,1,0,2,2,0,1,1,0],
[0,0,0,0,0,0,0,0,0,1,2,1,2,4,2,1,2,1,1,2,1,2,4,2,1,2,1],
[0,0,0,0,0,0,0,0,0,0,1,1,0,2,2,0,1,1,0,1,1,0,2,2,0,1,1],
[0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1,1,0,0,0,0,1,1,0,1,1,0],
[0,0,0,0,0,0,0,0,0,0,0,0,1,2,1,1,2,1,0,0,0,1,2,1,1,2,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1,1,0,0,0,0,1,1,0,1,1]
])
)
)
closure = (
{
0 : [1,2,4],
1 : [0,3,5],
2 : [0,3,6],
3 : [1,2,7],
4 : [0,5,6],
5 : [1,4,7],
6 : [2,4,7],
7 : [3,5,6]
},
[0,1,2,3,4,5,6,7],
[[(0 ,9 ,12,3 ),(0 ,1 ,10,9 ),(0 ,3 ,4 ,1 )],
[(1 ,2 ,11,10),(1 ,4 ,5 ,2 ),(5 ,14,11,2 )],
[(3 ,12,15,6 ),(3 ,6 ,7 ,4 ),(7 ,6 ,15,16)],
[(4 ,7 ,8 ,5 ),(8 ,7 ,16,17),(8 ,17,14,5 )],
[(9 ,18,21,12),(9 ,10,19,18),(22,21,18,19)],
[(10,11,20,19),(23,22,19,20),(14,23,20,11)],
[(12,21,24,15),(25,24,21,22),(16,15,24,25)],
[(26,25,22,23),(17,16,25,26),(17,26,23,14)]]
)
data = find_data("hexOctet",mesh,[0,2,4,-1])
return DataSet(mesh=mesh,adjacency=adjacency,closure=closure,data=data)
def get_fixture(request):
return request.getfixturevalue(request.param)
@pytest.fixture
def fixt(request):
return request.getfixturevalue(request.param)
@pytest.fixture
def mesh(request):
return get_fixture(request).mesh
@pytest.fixture
def pyhmesh(request):
yield pyhesive.Mesh(get_fixture(request).mesh)
@pytest.fixture
def adjacency(request):
yield get_fixture(request).adjacency
@pytest.fixture
def closure(request):
yield get_fixture(request).closure
@pytest.fixture
def partition_data(request):
yield get_fixture(request).data["partition_data"]
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.